Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "API:

   - Crypto self tests can now be disabled at boot/run time.
   - Add async support to algif_aead.

  Algorithms:

   - A large number of fixes to MPI from Nicolai Stange.
   - Performance improvement for HMAC DRBG.

  Drivers:

   - Use generic crypto engine in omap-des.
   - Merge ppc4xx-rng and crypto4xx drivers.
   - Fix lockups in sun4i-ss driver by disabling IRQs.
   - Add DMA engine support to ccp.
   - Reenable talitos hash algorithms.
   - Add support for Hisilicon SoC RNG.
   - Add basic crypto driver for the MXC SCC.

  Others:

   - Do not allocate crypto hash tfm in NORECLAIM context in ecryptfs"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (77 commits)
  crypto: qat - change the adf_ctl_stop_devices to void
  crypto: caam - fix caam_jr_alloc() ret code
  crypto: vmx - comply with ABIs that specify vrsave as reserved.
  crypto: testmgr - Add a flag allowing the self-tests to be disabled at runtime.
  crypto: ccp - constify ccp_actions structure
  crypto: marvell/cesa - Use dma_pool_zalloc
  crypto: qat - make adf_vf_isr.c dependant on IOV config
  crypto: qat - Fix typo in comments
  lib: asn1_decoder - add MODULE_LICENSE("GPL")
  crypto: omap-sham - Use dma_request_chan() for requesting DMA channel
  crypto: omap-des - Use dma_request_chan() for requesting DMA channel
  crypto: omap-aes - Use dma_request_chan() for requesting DMA channel
  crypto: omap-des - Integrate with the crypto engine framework
  crypto: s5p-sss - fix incorrect usage of scatterlists api
  crypto: s5p-sss - Fix missed interrupts when working with 8 kB blocks
  crypto: s5p-sss - Use common BIT macro
  crypto: mxc-scc - fix unwinding in mxc_scc_crypto_register()
  crypto: mxc-scc - signedness bugs in mxc_scc_ablkcipher_req_init()
  crypto: talitos - fix ahash algorithms registration
  crypto: ccp - Ensure all dependencies are specified
  ...
This commit is contained in:
Linus Torvalds 2016-05-17 09:33:39 -07:00
commit 9a07a79684
76 changed files with 3092 additions and 778 deletions

View File

@ -1936,9 +1936,9 @@ static int test_skcipher(void)
} }
req = skcipher_request_alloc(skcipher, GFP_KERNEL); req = skcipher_request_alloc(skcipher, GFP_KERNEL);
if (IS_ERR(req)) { if (!req) {
pr_info("could not allocate request queue\n"); pr_info("could not allocate skcipher request\n");
ret = PTR_ERR(req); ret = -ENOMEM;
goto out; goto out;
} }

View File

@ -0,0 +1,21 @@
Freescale Security Controller (SCC)
Required properties:
- compatible : Should be "fsl,imx25-scc".
- reg : Should contain register location and length.
- interrupts : Should contain interrupt numbers for SCM IRQ and SMN IRQ.
- interrupt-names : Should specify the names "scm" and "smn" for the
SCM IRQ and SMN IRQ.
- clocks: Should contain the clock driving the SCC core.
- clock-names: Should be set to "ipg".
Example:
scc: crypto@53fac000 {
compatible = "fsl,imx25-scc";
reg = <0x53fac000 0x4000>;
clocks = <&clks 111>;
clock-names = "ipg";
interrupts = <49>, <50>;
interrupt-names = "scm", "smn";
};

View File

@ -23,10 +23,8 @@ Required properties:
- "samsung,exynos4210-secss" for Exynos4210, Exynos4212, Exynos4412, Exynos5250, - "samsung,exynos4210-secss" for Exynos4210, Exynos4212, Exynos4412, Exynos5250,
Exynos5260 and Exynos5420 SoCs. Exynos5260 and Exynos5420 SoCs.
- reg : Offset and length of the register set for the module - reg : Offset and length of the register set for the module
- interrupts : interrupt specifiers of SSS module interrupts, should contain - interrupts : interrupt specifiers of SSS module interrupts (one feed
following entries: control interrupt).
- first : feed control interrupt (required for all variants),
- second : hash interrupt (required only for samsung,s5pv210-secss).
- clocks : list of clock phandle and specifier pairs for all clocks listed in - clocks : list of clock phandle and specifier pairs for all clocks listed in
clock-names property. clock-names property.

View File

@ -0,0 +1,12 @@
Hisilicon Random Number Generator
Required properties:
- compatible : Should be "hisilicon,hip04-rng" or "hisilicon,hip05-rng"
- reg : Offset and length of the register set of this block
Example:
rng@d1010000 {
compatible = "hisilicon,hip05-rng";
reg = <0xd1010000 0x100>;
};

View File

@ -838,6 +838,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
It will be ignored when crashkernel=X,high is not used It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G. or memory reserved is below 4G.
cryptomgr.notests
[KNL] Disable crypto self-tests
cs89x0_dma= [HW,NET] cs89x0_dma= [HW,NET]
Format: <dma> Format: <dma>

View File

@ -627,6 +627,7 @@ F: include/linux/altera_jtaguart.h
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
M: Tom Lendacky <thomas.lendacky@amd.com> M: Tom Lendacky <thomas.lendacky@amd.com>
M: Gary Hook <gary.hook@amd.com>
L: linux-crypto@vger.kernel.org L: linux-crypto@vger.kernel.org
S: Supported S: Supported
F: drivers/crypto/ccp/ F: drivers/crypto/ccp/

View File

@ -420,6 +420,15 @@ pwm3: pwm@53fa8000 {
interrupts = <41>; interrupts = <41>;
}; };
scc: crypto@53fac000 {
compatible = "fsl,imx25-scc";
reg = <0x53fac000 0x4000>;
clocks = <&clks 111>;
clock-names = "ipg";
interrupts = <49>, <50>;
interrupt-names = "scm", "smn";
};
esdhc1: esdhc@53fb4000 { esdhc1: esdhc@53fb4000 {
compatible = "fsl,imx25-esdhc"; compatible = "fsl,imx25-esdhc";
reg = <0x53fb4000 0x4000>; reg = <0x53fb4000 0x4000>;

View File

@ -13,7 +13,7 @@
* any later version. * any later version.
*/ */
#include <crypto/aead.h> #include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <crypto/if_alg.h> #include <crypto/if_alg.h>
#include <linux/init.h> #include <linux/init.h>
@ -29,15 +29,24 @@ struct aead_sg_list {
struct scatterlist sg[ALG_MAX_PAGES]; struct scatterlist sg[ALG_MAX_PAGES];
}; };
struct aead_async_rsgl {
struct af_alg_sgl sgl;
struct list_head list;
};
struct aead_async_req {
struct scatterlist *tsgl;
struct aead_async_rsgl first_rsgl;
struct list_head list;
struct kiocb *iocb;
unsigned int tsgls;
char iv[];
};
struct aead_ctx { struct aead_ctx {
struct aead_sg_list tsgl; struct aead_sg_list tsgl;
/* struct aead_async_rsgl first_rsgl;
* RSGL_MAX_ENTRIES is an artificial limit where user space at maximum struct list_head list;
* can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
* pages
*/
#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
void *iv; void *iv;
@ -75,6 +84,17 @@ static inline bool aead_sufficient_data(struct aead_ctx *ctx)
return ctx->used >= ctx->aead_assoclen + as; return ctx->used >= ctx->aead_assoclen + as;
} }
static void aead_reset_ctx(struct aead_ctx *ctx)
{
struct aead_sg_list *sgl = &ctx->tsgl;
sg_init_table(sgl->sg, ALG_MAX_PAGES);
sgl->cur = 0;
ctx->used = 0;
ctx->more = 0;
ctx->merge = 0;
}
static void aead_put_sgl(struct sock *sk) static void aead_put_sgl(struct sock *sk)
{ {
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
@ -90,11 +110,7 @@ static void aead_put_sgl(struct sock *sk)
put_page(sg_page(sg + i)); put_page(sg_page(sg + i));
sg_assign_page(sg + i, NULL); sg_assign_page(sg + i, NULL);
} }
sg_init_table(sg, ALG_MAX_PAGES); aead_reset_ctx(ctx);
sgl->cur = 0;
ctx->used = 0;
ctx->more = 0;
ctx->merge = 0;
} }
static void aead_wmem_wakeup(struct sock *sk) static void aead_wmem_wakeup(struct sock *sk)
@ -349,23 +365,188 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page,
return err ?: size; return err ?: size;
} }
static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) #define GET_ASYM_REQ(req, tfm) (struct aead_async_req *) \
((char *)req + sizeof(struct aead_request) + \
crypto_aead_reqsize(tfm))
#define GET_REQ_SIZE(tfm) sizeof(struct aead_async_req) + \
crypto_aead_reqsize(tfm) + crypto_aead_ivsize(tfm) + \
sizeof(struct aead_request)
static void aead_async_cb(struct crypto_async_request *_req, int err)
{
struct sock *sk = _req->data;
struct alg_sock *ask = alg_sk(sk);
struct aead_ctx *ctx = ask->private;
struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
struct aead_request *req = aead_request_cast(_req);
struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
struct scatterlist *sg = areq->tsgl;
struct aead_async_rsgl *rsgl;
struct kiocb *iocb = areq->iocb;
unsigned int i, reqlen = GET_REQ_SIZE(tfm);
list_for_each_entry(rsgl, &areq->list, list) {
af_alg_free_sg(&rsgl->sgl);
if (rsgl != &areq->first_rsgl)
sock_kfree_s(sk, rsgl, sizeof(*rsgl));
}
for (i = 0; i < areq->tsgls; i++)
put_page(sg_page(sg + i));
sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
sock_kfree_s(sk, req, reqlen);
__sock_put(sk);
iocb->ki_complete(iocb, err, err);
}
static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct aead_ctx *ctx = ask->private;
struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
struct aead_async_req *areq;
struct aead_request *req = NULL;
struct aead_sg_list *sgl = &ctx->tsgl;
struct aead_async_rsgl *last_rsgl = NULL, *rsgl;
unsigned int as = crypto_aead_authsize(tfm);
unsigned int i, reqlen = GET_REQ_SIZE(tfm);
int err = -ENOMEM;
unsigned long used;
size_t outlen;
size_t usedpages = 0;
lock_sock(sk);
if (ctx->more) {
err = aead_wait_for_data(sk, flags);
if (err)
goto unlock;
}
used = ctx->used;
outlen = used;
if (!aead_sufficient_data(ctx))
goto unlock;
req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
if (unlikely(!req))
goto unlock;
areq = GET_ASYM_REQ(req, tfm);
memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
INIT_LIST_HEAD(&areq->list);
areq->iocb = msg->msg_iocb;
memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
aead_request_set_tfm(req, tfm);
aead_request_set_ad(req, ctx->aead_assoclen);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
aead_async_cb, sk);
used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
/* take over all tx sgls from ctx */
areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
GFP_KERNEL);
if (unlikely(!areq->tsgl))
goto free;
sg_init_table(areq->tsgl, sgl->cur);
for (i = 0; i < sgl->cur; i++)
sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
sgl->sg[i].length, sgl->sg[i].offset);
areq->tsgls = sgl->cur;
/* create rx sgls */
while (iov_iter_count(&msg->msg_iter)) {
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
(outlen - usedpages));
if (list_empty(&areq->list)) {
rsgl = &areq->first_rsgl;
} else {
rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
if (unlikely(!rsgl)) {
err = -ENOMEM;
goto free;
}
}
rsgl->sgl.npages = 0;
list_add_tail(&rsgl->list, &areq->list);
/* make one iovec available as scatterlist */
err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
if (err < 0)
goto free;
usedpages += err;
/* chain the new scatterlist with previous one */
if (last_rsgl)
af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
last_rsgl = rsgl;
/* we do not need more iovecs as we have sufficient memory */
if (outlen <= usedpages)
break;
iov_iter_advance(&msg->msg_iter, err);
}
err = -EINVAL;
/* ensure output buffer is sufficiently large */
if (usedpages < outlen)
goto free;
aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
areq->iv);
err = ctx->enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
if (err) {
if (err == -EINPROGRESS) {
sock_hold(sk);
err = -EIOCBQUEUED;
aead_reset_ctx(ctx);
goto unlock;
} else if (err == -EBADMSG) {
aead_put_sgl(sk);
}
goto free;
}
aead_put_sgl(sk);
free:
list_for_each_entry(rsgl, &areq->list, list) {
af_alg_free_sg(&rsgl->sgl);
if (rsgl != &areq->first_rsgl)
sock_kfree_s(sk, rsgl, sizeof(*rsgl));
}
if (areq->tsgl)
sock_kfree_s(sk, areq->tsgl, sizeof(*areq->tsgl) * areq->tsgls);
if (req)
sock_kfree_s(sk, req, reqlen);
unlock:
aead_wmem_wakeup(sk);
release_sock(sk);
return err ? err : outlen;
}
static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct aead_ctx *ctx = ask->private; struct aead_ctx *ctx = ask->private;
unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
struct aead_sg_list *sgl = &ctx->tsgl; struct aead_sg_list *sgl = &ctx->tsgl;
unsigned int i = 0; struct aead_async_rsgl *last_rsgl = NULL;
struct aead_async_rsgl *rsgl, *tmp;
int err = -EINVAL; int err = -EINVAL;
unsigned long used = 0; unsigned long used = 0;
size_t outlen = 0; size_t outlen = 0;
size_t usedpages = 0; size_t usedpages = 0;
unsigned int cnt = 0;
/* Limit number of IOV blocks to be accessed below */
if (msg->msg_iter.nr_segs > RSGL_MAX_ENTRIES)
return -ENOMSG;
lock_sock(sk); lock_sock(sk);
@ -417,21 +598,33 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter), size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
(outlen - usedpages)); (outlen - usedpages));
if (list_empty(&ctx->list)) {
rsgl = &ctx->first_rsgl;
} else {
rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
if (unlikely(!rsgl)) {
err = -ENOMEM;
goto unlock;
}
}
rsgl->sgl.npages = 0;
list_add_tail(&rsgl->list, &ctx->list);
/* make one iovec available as scatterlist */ /* make one iovec available as scatterlist */
err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter, err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
seglen);
if (err < 0) if (err < 0)
goto unlock; goto unlock;
usedpages += err; usedpages += err;
/* chain the new scatterlist with previous one */ /* chain the new scatterlist with previous one */
if (cnt) if (last_rsgl)
af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]); af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
last_rsgl = rsgl;
/* we do not need more iovecs as we have sufficient memory */ /* we do not need more iovecs as we have sufficient memory */
if (outlen <= usedpages) if (outlen <= usedpages)
break; break;
iov_iter_advance(&msg->msg_iter, err); iov_iter_advance(&msg->msg_iter, err);
cnt++;
} }
err = -EINVAL; err = -EINVAL;
@ -440,8 +633,7 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
goto unlock; goto unlock;
sg_mark_end(sgl->sg + sgl->cur - 1); sg_mark_end(sgl->sg + sgl->cur - 1);
aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->rsgl[0].sg,
used, ctx->iv); used, ctx->iv);
aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen); aead_request_set_ad(&ctx->aead_req, ctx->aead_assoclen);
@ -454,23 +646,35 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
/* EBADMSG implies a valid cipher operation took place */ /* EBADMSG implies a valid cipher operation took place */
if (err == -EBADMSG) if (err == -EBADMSG)
aead_put_sgl(sk); aead_put_sgl(sk);
goto unlock; goto unlock;
} }
aead_put_sgl(sk); aead_put_sgl(sk);
err = 0; err = 0;
unlock: unlock:
for (i = 0; i < cnt; i++) list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
af_alg_free_sg(&ctx->rsgl[i]); af_alg_free_sg(&rsgl->sgl);
if (rsgl != &ctx->first_rsgl)
sock_kfree_s(sk, rsgl, sizeof(*rsgl));
list_del(&rsgl->list);
}
INIT_LIST_HEAD(&ctx->list);
aead_wmem_wakeup(sk); aead_wmem_wakeup(sk);
release_sock(sk); release_sock(sk);
return err ? err : outlen; return err ? err : outlen;
} }
static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
int flags)
{
return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
aead_recvmsg_async(sock, msg, flags) :
aead_recvmsg_sync(sock, msg, flags);
}
static unsigned int aead_poll(struct file *file, struct socket *sock, static unsigned int aead_poll(struct file *file, struct socket *sock,
poll_table *wait) poll_table *wait)
{ {
@ -540,6 +744,7 @@ static void aead_sock_destruct(struct sock *sk)
unsigned int ivlen = crypto_aead_ivsize( unsigned int ivlen = crypto_aead_ivsize(
crypto_aead_reqtfm(&ctx->aead_req)); crypto_aead_reqtfm(&ctx->aead_req));
WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
aead_put_sgl(sk); aead_put_sgl(sk);
sock_kzfree_s(sk, ctx->iv, ivlen); sock_kzfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len); sock_kfree_s(sk, ctx, ctx->len);
@ -574,6 +779,7 @@ static int aead_accept_parent(void *private, struct sock *sk)
ctx->aead_assoclen = 0; ctx->aead_assoclen = 0;
af_alg_init_completion(&ctx->completion); af_alg_init_completion(&ctx->completion);
sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES);
INIT_LIST_HEAD(&ctx->list);
ask->private = ctx; ask->private = ctx;

View File

@ -237,6 +237,7 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
break; break;
case OID_sha224: case OID_sha224:
ctx->sinfo->sig.hash_algo = "sha224"; ctx->sinfo->sig.hash_algo = "sha224";
break;
default: default:
printk("Unsupported digest algo: %u\n", ctx->last_oid); printk("Unsupported digest algo: %u\n", ctx->last_oid);
return -ENOPKG; return -ENOPKG;

View File

@ -592,8 +592,10 @@ static const struct drbg_state_ops drbg_ctr_ops = {
******************************************************************/ ******************************************************************/
#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC) #if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key, static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
unsigned char *outval, const struct list_head *in); const struct list_head *in);
static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
const unsigned char *key);
static int drbg_init_hash_kernel(struct drbg_state *drbg); static int drbg_init_hash_kernel(struct drbg_state *drbg);
static int drbg_fini_hash_kernel(struct drbg_state *drbg); static int drbg_fini_hash_kernel(struct drbg_state *drbg);
#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */ #endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
@ -619,9 +621,11 @@ static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
LIST_HEAD(seedlist); LIST_HEAD(seedlist);
LIST_HEAD(vdatalist); LIST_HEAD(vdatalist);
if (!reseed) if (!reseed) {
/* 10.1.2.3 step 2 -- memset(0) of C is implicit with kzalloc */ /* 10.1.2.3 step 2 -- memset(0) of C is implicit with kzalloc */
memset(drbg->V, 1, drbg_statelen(drbg)); memset(drbg->V, 1, drbg_statelen(drbg));
drbg_kcapi_hmacsetkey(drbg, drbg->C);
}
drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg)); drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg));
list_add_tail(&seed1.list, &seedlist); list_add_tail(&seed1.list, &seedlist);
@ -641,12 +645,13 @@ static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
prefix = DRBG_PREFIX1; prefix = DRBG_PREFIX1;
/* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */ /* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */
seed2.buf = &prefix; seed2.buf = &prefix;
ret = drbg_kcapi_hash(drbg, drbg->C, drbg->C, &seedlist); ret = drbg_kcapi_hash(drbg, drbg->C, &seedlist);
if (ret) if (ret)
return ret; return ret;
drbg_kcapi_hmacsetkey(drbg, drbg->C);
/* 10.1.2.2 step 2 and 5 -- HMAC for V */ /* 10.1.2.2 step 2 and 5 -- HMAC for V */
ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &vdatalist); ret = drbg_kcapi_hash(drbg, drbg->V, &vdatalist);
if (ret) if (ret)
return ret; return ret;
@ -681,7 +686,7 @@ static int drbg_hmac_generate(struct drbg_state *drbg,
while (len < buflen) { while (len < buflen) {
unsigned int outlen = 0; unsigned int outlen = 0;
/* 10.1.2.5 step 4.1 */ /* 10.1.2.5 step 4.1 */
ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &datalist); ret = drbg_kcapi_hash(drbg, drbg->V, &datalist);
if (ret) if (ret)
return ret; return ret;
outlen = (drbg_blocklen(drbg) < (buflen - len)) ? outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
@ -796,7 +801,7 @@ static int drbg_hash_df(struct drbg_state *drbg,
while (len < outlen) { while (len < outlen) {
short blocklen = 0; short blocklen = 0;
/* 10.4.1 step 4.1 */ /* 10.4.1 step 4.1 */
ret = drbg_kcapi_hash(drbg, NULL, tmp, entropylist); ret = drbg_kcapi_hash(drbg, tmp, entropylist);
if (ret) if (ret)
goto out; goto out;
/* 10.4.1 step 4.2 */ /* 10.4.1 step 4.2 */
@ -874,7 +879,7 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg,
list_add_tail(&data1.list, &datalist); list_add_tail(&data1.list, &datalist);
list_add_tail(&data2.list, &datalist); list_add_tail(&data2.list, &datalist);
list_splice_tail(addtl, &datalist); list_splice_tail(addtl, &datalist);
ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist); ret = drbg_kcapi_hash(drbg, drbg->scratchpad, &datalist);
if (ret) if (ret)
goto out; goto out;
@ -907,7 +912,7 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
while (len < buflen) { while (len < buflen) {
unsigned int outlen = 0; unsigned int outlen = 0;
/* 10.1.1.4 step hashgen 4.1 */ /* 10.1.1.4 step hashgen 4.1 */
ret = drbg_kcapi_hash(drbg, NULL, dst, &datalist); ret = drbg_kcapi_hash(drbg, dst, &datalist);
if (ret) { if (ret) {
len = ret; len = ret;
goto out; goto out;
@ -956,7 +961,7 @@ static int drbg_hash_generate(struct drbg_state *drbg,
list_add_tail(&data1.list, &datalist); list_add_tail(&data1.list, &datalist);
drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg)); drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
list_add_tail(&data2.list, &datalist); list_add_tail(&data2.list, &datalist);
ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist); ret = drbg_kcapi_hash(drbg, drbg->scratchpad, &datalist);
if (ret) { if (ret) {
len = ret; len = ret;
goto out; goto out;
@ -1600,14 +1605,20 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg)
return 0; return 0;
} }
static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key, static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
unsigned char *outval, const struct list_head *in) const unsigned char *key)
{
struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
}
static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
const struct list_head *in)
{ {
struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
struct drbg_string *input = NULL; struct drbg_string *input = NULL;
if (key)
crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
crypto_shash_init(&sdesc->shash); crypto_shash_init(&sdesc->shash);
list_for_each_entry(input, in, list) list_for_each_entry(input, in, list)
crypto_shash_update(&sdesc->shash, input->buf, input->len); crypto_shash_update(&sdesc->shash, input->buf, input->len);

View File

@ -32,7 +32,7 @@ static int lzo_init(struct crypto_tfm *tfm)
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS, ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); GFP_KERNEL | __GFP_NOWARN);
if (!ctx->lzo_comp_mem) if (!ctx->lzo_comp_mem)
ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS); ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
if (!ctx->lzo_comp_mem) if (!ctx->lzo_comp_mem)

View File

@ -35,6 +35,10 @@
#include "internal.h" #include "internal.h"
static bool notests;
module_param(notests, bool, 0644);
MODULE_PARM_DESC(notests, "disable crypto self-tests");
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
/* a perfect nop */ /* a perfect nop */
@ -3885,6 +3889,11 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
int j; int j;
int rc; int rc;
if (!fips_enabled && notests) {
printk_once(KERN_INFO "alg: self-tests disabled\n");
return 0;
}
alg_test_descs_check_order(); alg_test_descs_check_order();
if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {

View File

@ -268,19 +268,6 @@ config HW_RANDOM_NOMADIK
If unsure, say Y. If unsure, say Y.
config HW_RANDOM_PPC4XX
tristate "PowerPC 4xx generic true random number generator support"
depends on PPC && 4xx
default HW_RANDOM
---help---
This driver provides the kernel-side support for the TRNG hardware
found in the security function of some PowerPC 4xx SoCs.
To compile this driver as a module, choose M here: the
module will be called ppc4xx-rng.
If unsure, say N.
config HW_RANDOM_PSERIES config HW_RANDOM_PSERIES
tristate "pSeries HW Random Number Generator support" tristate "pSeries HW Random Number Generator support"
depends on PPC64 && IBMVIO depends on PPC64 && IBMVIO
@ -309,7 +296,8 @@ config HW_RANDOM_POWERNV
config HW_RANDOM_EXYNOS config HW_RANDOM_EXYNOS
tristate "EXYNOS HW random number generator support" tristate "EXYNOS HW random number generator support"
depends on ARCH_EXYNOS depends on ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM
default HW_RANDOM default HW_RANDOM
---help--- ---help---
This driver provides kernel-side support for the Random Number This driver provides kernel-side support for the Random Number
@ -333,6 +321,19 @@ config HW_RANDOM_TPM
If unsure, say Y. If unsure, say Y.
config HW_RANDOM_HISI
tristate "Hisilicon Random Number Generator support"
depends on HW_RANDOM && ARCH_HISI
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
Generator hardware found on Hisilicon Hip04 and Hip05 SoC.
To compile this driver as a module, choose M here: the
module will be called hisi-rng.
If unsure, say Y.
config HW_RANDOM_MSM config HW_RANDOM_MSM
tristate "Qualcomm SoCs Random Number Generator support" tristate "Qualcomm SoCs Random Number Generator support"
depends on HW_RANDOM && ARCH_QCOM depends on HW_RANDOM && ARCH_QCOM

View File

@ -22,10 +22,10 @@ obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o

View File

@ -2,7 +2,7 @@
* exynos-rng.c - Random Number Generator driver for the exynos * exynos-rng.c - Random Number Generator driver for the exynos
* *
* Copyright (C) 2012 Samsung Electronics * Copyright (C) 2012 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@smasung.com> * Jonghwa Lee <jonghwa3.lee@samsung.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
@ -77,7 +77,8 @@ static int exynos_init(struct hwrng *rng)
pm_runtime_get_sync(exynos_rng->dev); pm_runtime_get_sync(exynos_rng->dev);
ret = exynos_rng_configure(exynos_rng); ret = exynos_rng_configure(exynos_rng);
pm_runtime_put_noidle(exynos_rng->dev); pm_runtime_mark_last_busy(exynos_rng->dev);
pm_runtime_put_autosuspend(exynos_rng->dev);
return ret; return ret;
} }
@ -89,6 +90,7 @@ static int exynos_read(struct hwrng *rng, void *buf,
struct exynos_rng, rng); struct exynos_rng, rng);
u32 *data = buf; u32 *data = buf;
int retry = 100; int retry = 100;
int ret = 4;
pm_runtime_get_sync(exynos_rng->dev); pm_runtime_get_sync(exynos_rng->dev);
@ -97,23 +99,27 @@ static int exynos_read(struct hwrng *rng, void *buf,
while (!(exynos_rng_readl(exynos_rng, while (!(exynos_rng_readl(exynos_rng,
EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE) && --retry) EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE) && --retry)
cpu_relax(); cpu_relax();
if (!retry) if (!retry) {
return -ETIMEDOUT; ret = -ETIMEDOUT;
goto out;
}
exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET); exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET);
*data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET); *data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET);
out:
pm_runtime_mark_last_busy(exynos_rng->dev); pm_runtime_mark_last_busy(exynos_rng->dev);
pm_runtime_put_sync_autosuspend(exynos_rng->dev); pm_runtime_put_sync_autosuspend(exynos_rng->dev);
return 4; return ret;
} }
static int exynos_rng_probe(struct platform_device *pdev) static int exynos_rng_probe(struct platform_device *pdev)
{ {
struct exynos_rng *exynos_rng; struct exynos_rng *exynos_rng;
struct resource *res; struct resource *res;
int ret;
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng), exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
GFP_KERNEL); GFP_KERNEL);
@ -141,7 +147,21 @@ static int exynos_rng_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
return devm_hwrng_register(&pdev->dev, &exynos_rng->rng); ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
if (ret) {
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
}
return ret;
}
static int exynos_rng_remove(struct platform_device *pdev)
{
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
} }
static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev) static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
@ -201,6 +221,7 @@ static struct platform_driver exynos_rng_driver = {
.of_match_table = exynos_rng_dt_match, .of_match_table = exynos_rng_dt_match,
}, },
.probe = exynos_rng_probe, .probe = exynos_rng_probe,
.remove = exynos_rng_remove,
}; };
module_platform_driver(exynos_rng_driver); module_platform_driver(exynos_rng_driver);

View File

@ -0,0 +1,126 @@
/*
* Copyright (C) 2016 HiSilicon Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/random.h>
#define RNG_SEED 0x0
#define RNG_CTRL 0x4
#define RNG_SEED_SEL BIT(2)
#define RNG_RING_EN BIT(1)
#define RNG_EN BIT(0)
#define RNG_RAN_NUM 0x10
#define RNG_PHY_SEED 0x14
#define to_hisi_rng(p) container_of(p, struct hisi_rng, rng)
static int seed_sel;
module_param(seed_sel, int, S_IRUGO);
MODULE_PARM_DESC(seed_sel, "Auto reload seed. 0, use LFSR(default); 1, use ring oscillator.");
struct hisi_rng {
void __iomem *base;
struct hwrng rng;
};
static int hisi_rng_init(struct hwrng *rng)
{
struct hisi_rng *hrng = to_hisi_rng(rng);
int val = RNG_EN;
u32 seed;
/* get a random number as initial seed */
get_random_bytes(&seed, sizeof(seed));
writel_relaxed(seed, hrng->base + RNG_SEED);
/**
* The seed is reload periodically, there are two choice
* of seeds, default seed using the value from LFSR, or
* will use seed generated by ring oscillator.
*/
if (seed_sel == 1)
val |= RNG_RING_EN | RNG_SEED_SEL;
writel_relaxed(val, hrng->base + RNG_CTRL);
return 0;
}
static void hisi_rng_cleanup(struct hwrng *rng)
{
struct hisi_rng *hrng = to_hisi_rng(rng);
writel_relaxed(0, hrng->base + RNG_CTRL);
}
static int hisi_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
struct hisi_rng *hrng = to_hisi_rng(rng);
u32 *data = buf;
*data = readl_relaxed(hrng->base + RNG_RAN_NUM);
return 4;
}
static int hisi_rng_probe(struct platform_device *pdev)
{
struct hisi_rng *rng;
struct resource *res;
int ret;
rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
if (!rng)
return -ENOMEM;
platform_set_drvdata(pdev, rng);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rng->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rng->base))
return PTR_ERR(rng->base);
rng->rng.name = pdev->name;
rng->rng.init = hisi_rng_init;
rng->rng.cleanup = hisi_rng_cleanup;
rng->rng.read = hisi_rng_read;
ret = devm_hwrng_register(&pdev->dev, &rng->rng);
if (ret) {
dev_err(&pdev->dev, "failed to register hwrng\n");
return ret;
}
return 0;
}
static const struct of_device_id hisi_rng_dt_ids[] = {
{ .compatible = "hisilicon,hip04-rng" },
{ .compatible = "hisilicon,hip05-rng" },
{ }
};
MODULE_DEVICE_TABLE(of, hisi_rng_dt_ids);
static struct platform_driver hisi_rng_driver = {
.probe = hisi_rng_probe,
.driver = {
.name = "hisi-rng",
.of_match_table = of_match_ptr(hisi_rng_dt_ids),
},
};
module_platform_driver(hisi_rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kefeng Wang <wangkefeng.wang@huawei>");
MODULE_DESCRIPTION("Hisilicon random number generator driver");

View File

@ -1,147 +0,0 @@
/*
* Generic PowerPC 44x RNG driver
*
* Copyright 2011 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#define PPC4XX_TRNG_DEV_CTRL 0x60080
#define PPC4XX_TRNGE 0x00020000
#define PPC4XX_TRNG_CTRL 0x0008
#define PPC4XX_TRNG_CTRL_DALM 0x20
#define PPC4XX_TRNG_STAT 0x0004
#define PPC4XX_TRNG_STAT_B 0x1
#define PPC4XX_TRNG_DATA 0x0000
#define MODULE_NAME "ppc4xx_rng"
static int ppc4xx_rng_data_present(struct hwrng *rng, int wait)
{
void __iomem *rng_regs = (void __iomem *) rng->priv;
int busy, i, present = 0;
for (i = 0; i < 20; i++) {
busy = (in_le32(rng_regs + PPC4XX_TRNG_STAT) & PPC4XX_TRNG_STAT_B);
if (!busy || !wait) {
present = 1;
break;
}
udelay(10);
}
return present;
}
static int ppc4xx_rng_data_read(struct hwrng *rng, u32 *data)
{
void __iomem *rng_regs = (void __iomem *) rng->priv;
*data = in_le32(rng_regs + PPC4XX_TRNG_DATA);
return 4;
}
static int ppc4xx_rng_enable(int enable)
{
struct device_node *ctrl;
void __iomem *ctrl_reg;
int err = 0;
u32 val;
/* Find the main crypto device node and map it to turn the TRNG on */
ctrl = of_find_compatible_node(NULL, NULL, "amcc,ppc4xx-crypto");
if (!ctrl)
return -ENODEV;
ctrl_reg = of_iomap(ctrl, 0);
if (!ctrl_reg) {
err = -ENODEV;
goto out;
}
val = in_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL);
if (enable)
val |= PPC4XX_TRNGE;
else
val = val & ~PPC4XX_TRNGE;
out_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL, val);
iounmap(ctrl_reg);
out:
of_node_put(ctrl);
return err;
}
static struct hwrng ppc4xx_rng = {
.name = MODULE_NAME,
.data_present = ppc4xx_rng_data_present,
.data_read = ppc4xx_rng_data_read,
};
static int ppc4xx_rng_probe(struct platform_device *dev)
{
void __iomem *rng_regs;
int err = 0;
rng_regs = of_iomap(dev->dev.of_node, 0);
if (!rng_regs)
return -ENODEV;
err = ppc4xx_rng_enable(1);
if (err)
return err;
out_le32(rng_regs + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
ppc4xx_rng.priv = (unsigned long) rng_regs;
err = hwrng_register(&ppc4xx_rng);
return err;
}
static int ppc4xx_rng_remove(struct platform_device *dev)
{
void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv;
hwrng_unregister(&ppc4xx_rng);
ppc4xx_rng_enable(0);
iounmap(rng_regs);
return 0;
}
static const struct of_device_id ppc4xx_rng_match[] = {
{ .compatible = "ppc4xx-rng", },
{ .compatible = "amcc,ppc460ex-rng", },
{ .compatible = "amcc,ppc440epx-rng", },
{},
};
MODULE_DEVICE_TABLE(of, ppc4xx_rng_match);
static struct platform_driver ppc4xx_rng_driver = {
.driver = {
.name = MODULE_NAME,
.of_match_table = ppc4xx_rng_match,
},
.probe = ppc4xx_rng_probe,
.remove = ppc4xx_rng_remove,
};
module_platform_driver(ppc4xx_rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("HW RNG driver for PPC 4xx processors");

View File

@ -279,6 +279,14 @@ config CRYPTO_DEV_PPC4XX
help help
This option allows you to have support for AMCC crypto acceleration. This option allows you to have support for AMCC crypto acceleration.
config HW_RANDOM_PPC4XX
bool "PowerPC 4xx generic true random number generator support"
depends on CRYPTO_DEV_PPC4XX && HW_RANDOM
default y
---help---
This option provides the kernel-side support for the TRNG hardware
found in the security function of some PowerPC 4xx SoCs.
config CRYPTO_DEV_OMAP_SHAM config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator" tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
depends on ARCH_OMAP2PLUS depends on ARCH_OMAP2PLUS
@ -302,15 +310,16 @@ config CRYPTO_DEV_OMAP_AES
want to use the OMAP module for AES algorithms. want to use the OMAP module for AES algorithms.
config CRYPTO_DEV_OMAP_DES config CRYPTO_DEV_OMAP_DES
tristate "Support for OMAP DES3DES hw engine" tristate "Support for OMAP DES/3DES hw engine"
depends on ARCH_OMAP2PLUS depends on ARCH_OMAP2PLUS
select CRYPTO_DES select CRYPTO_DES
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
select CRYPTO_ENGINE
help help
OMAP processors have DES/3DES module accelerator. Select this if you OMAP processors have DES/3DES module accelerator. Select this if you
want to use the OMAP module for DES and 3DES algorithms. Currently want to use the OMAP module for DES and 3DES algorithms. Currently
the ECB and CBC modes of operation supported by the driver. Also the ECB and CBC modes of operation are supported by the driver. Also
accesses made on unaligned boundaries are also supported. accesses made on unaligned boundaries are supported.
config CRYPTO_DEV_PICOXCELL config CRYPTO_DEV_PICOXCELL
tristate "Support for picoXcell IPSEC and Layer2 crypto engines" tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
@ -340,9 +349,19 @@ config CRYPTO_DEV_SAHARA
This option enables support for the SAHARA HW crypto accelerator This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips. found in some Freescale i.MX chips.
config CRYPTO_DEV_MXC_SCC
tristate "Support for Freescale Security Controller (SCC)"
depends on ARCH_MXC && OF
select CRYPTO_BLKCIPHER
select CRYPTO_DES
help
This option enables support for the Security Controller (SCC)
found in Freescale i.MX25 chips.
config CRYPTO_DEV_S5P config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210/Exynos crypto accelerator" tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
depends on ARCH_S5PV210 || ARCH_EXYNOS depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on HAS_IOMEM && HAS_DMA
select CRYPTO_AES select CRYPTO_AES
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
help help

View File

@ -23,6 +23,7 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/ obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/

View File

@ -1,2 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o

View File

@ -40,6 +40,7 @@
#include "crypto4xx_reg_def.h" #include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h" #include "crypto4xx_core.h"
#include "crypto4xx_sa.h" #include "crypto4xx_sa.h"
#include "crypto4xx_trng.h"
#define PPC4XX_SEC_VERSION_STR "0.5" #define PPC4XX_SEC_VERSION_STR "0.5"
@ -1225,6 +1226,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
if (rc) if (rc)
goto err_start_dev; goto err_start_dev;
ppc4xx_trng_probe(core_dev);
return 0; return 0;
err_start_dev: err_start_dev:
@ -1252,6 +1254,8 @@ static int crypto4xx_remove(struct platform_device *ofdev)
struct device *dev = &ofdev->dev; struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev); struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
ppc4xx_trng_remove(core_dev);
free_irq(core_dev->irq, dev); free_irq(core_dev->irq, dev);
irq_dispose_mapping(core_dev->irq); irq_dispose_mapping(core_dev->irq);
@ -1272,7 +1276,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match);
static struct platform_driver crypto4xx_driver = { static struct platform_driver crypto4xx_driver = {
.driver = { .driver = {
.name = "crypto4xx", .name = MODULE_NAME,
.of_match_table = crypto4xx_match, .of_match_table = crypto4xx_match,
}, },
.probe = crypto4xx_probe, .probe = crypto4xx_probe,
@ -1284,4 +1288,3 @@ module_platform_driver(crypto4xx_driver);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>"); MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator"); MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");

View File

@ -24,6 +24,8 @@
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#define MODULE_NAME "crypto4xx"
#define PPC460SX_SDR0_SRST 0x201 #define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200 #define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201 #define PPC460EX_SDR0_SRST 0x201
@ -72,6 +74,7 @@ struct crypto4xx_device {
char *name; char *name;
u64 ce_phy_address; u64 ce_phy_address;
void __iomem *ce_base; void __iomem *ce_base;
void __iomem *trng_base;
void *pdr; /* base address of packet void *pdr; /* base address of packet
descriptor ring */ descriptor ring */
@ -106,6 +109,7 @@ struct crypto4xx_core_device {
struct device *device; struct device *device;
struct platform_device *ofdev; struct platform_device *ofdev;
struct crypto4xx_device *dev; struct crypto4xx_device *dev;
struct hwrng *trng;
u32 int_status; u32 int_status;
u32 irq; u32 irq;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;

View File

@ -125,6 +125,7 @@
#define PPC4XX_INTERRUPT_CLR 0x3ffff #define PPC4XX_INTERRUPT_CLR 0x3ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3 #define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1 #define PPC4XX_DC_3DES_EN 1
#define PPC4XX_TRNG_EN 0x00020000
#define PPC4XX_INT_DESCR_CNT 4 #define PPC4XX_INT_DESCR_CNT 4
#define PPC4XX_INT_TIMEOUT_CNT 0 #define PPC4XX_INT_TIMEOUT_CNT 0
#define PPC4XX_INT_CFG 1 #define PPC4XX_INT_CFG 1

View File

@ -0,0 +1,131 @@
/*
* Generic PowerPC 44x RNG driver
*
* Copyright 2011 IBM Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include "crypto4xx_core.h"
#include "crypto4xx_trng.h"
#include "crypto4xx_reg_def.h"
#define PPC4XX_TRNG_CTRL 0x0008
#define PPC4XX_TRNG_CTRL_DALM 0x20
#define PPC4XX_TRNG_STAT 0x0004
#define PPC4XX_TRNG_STAT_B 0x1
#define PPC4XX_TRNG_DATA 0x0000
static int ppc4xx_trng_data_present(struct hwrng *rng, int wait)
{
struct crypto4xx_device *dev = (void *)rng->priv;
int busy, i, present = 0;
for (i = 0; i < 20; i++) {
busy = (in_le32(dev->trng_base + PPC4XX_TRNG_STAT) &
PPC4XX_TRNG_STAT_B);
if (!busy || !wait) {
present = 1;
break;
}
udelay(10);
}
return present;
}
static int ppc4xx_trng_data_read(struct hwrng *rng, u32 *data)
{
struct crypto4xx_device *dev = (void *)rng->priv;
*data = in_le32(dev->trng_base + PPC4XX_TRNG_DATA);
return 4;
}
static void ppc4xx_trng_enable(struct crypto4xx_device *dev, bool enable)
{
u32 device_ctrl;
device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
if (enable)
device_ctrl |= PPC4XX_TRNG_EN;
else
device_ctrl &= ~PPC4XX_TRNG_EN;
writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
}
static const struct of_device_id ppc4xx_trng_match[] = {
{ .compatible = "ppc4xx-rng", },
{ .compatible = "amcc,ppc460ex-rng", },
{ .compatible = "amcc,ppc440epx-rng", },
{},
};
void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
{
struct crypto4xx_device *dev = core_dev->dev;
struct device_node *trng = NULL;
struct hwrng *rng = NULL;
int err;
/* Find the TRNG device node and map it */
trng = of_find_matching_node(NULL, ppc4xx_trng_match);
if (!trng || !of_device_is_available(trng))
return;
dev->trng_base = of_iomap(trng, 0);
of_node_put(trng);
if (!dev->trng_base)
goto err_out;
rng = kzalloc(sizeof(*rng), GFP_KERNEL);
if (!rng)
goto err_out;
rng->name = MODULE_NAME;
rng->data_present = ppc4xx_trng_data_present;
rng->data_read = ppc4xx_trng_data_read;
rng->priv = (unsigned long) dev;
core_dev->trng = rng;
ppc4xx_trng_enable(dev, true);
out_le32(dev->trng_base + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
err = devm_hwrng_register(core_dev->device, core_dev->trng);
if (err) {
ppc4xx_trng_enable(dev, false);
dev_err(core_dev->device, "failed to register hwrng (%d).\n",
err);
goto err_out;
}
return;
err_out:
of_node_put(trng);
iounmap(dev->trng_base);
kfree(rng);
dev->trng_base = NULL;
core_dev->trng = NULL;
}
void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev)
{
if (core_dev && core_dev->trng) {
struct crypto4xx_device *dev = core_dev->dev;
devm_hwrng_unregister(core_dev->device, core_dev->trng);
ppc4xx_trng_enable(dev, false);
iounmap(dev->trng_base);
kfree(core_dev->trng);
}
}
MODULE_ALIAS("ppc4xx_rng");

View File

@ -0,0 +1,34 @@
/**
* AMCC SoC PPC4xx Crypto Driver
*
* Copyright (c) 2008 Applied Micro Circuits Corporation.
* All rights reserved. James Hsiao <jhsiao@amcc.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This file defines the security context
* associate format.
*/
#ifndef __CRYPTO4XX_TRNG_H__
#define __CRYPTO4XX_TRNG_H__
#ifdef CONFIG_HW_RANDOM_PPC4XX
void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
#else
static inline void ppc4xx_trng_probe(
struct crypto4xx_device *dev __maybe_unused) { }
static inline void ppc4xx_trng_remove(
struct crypto4xx_device *dev __maybe_unused) { }
#endif
#endif

View File

@ -248,7 +248,7 @@ static void caam_jr_dequeue(unsigned long devarg)
struct device *caam_jr_alloc(void) struct device *caam_jr_alloc(void)
{ {
struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
struct device *dev = NULL; struct device *dev = ERR_PTR(-ENODEV);
int min_tfm_cnt = INT_MAX; int min_tfm_cnt = INT_MAX;
int tfm_cnt; int tfm_cnt;

View File

@ -3,6 +3,8 @@ config CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP depends on CRYPTO_DEV_CCP
default m default m
select HW_RANDOM select HW_RANDOM
select DMA_ENGINE
select DMADEVICES
select CRYPTO_SHA1 select CRYPTO_SHA1
select CRYPTO_SHA256 select CRYPTO_SHA256
help help

View File

@ -1,5 +1,9 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
ccp-objs := ccp-dev.o ccp-ops.o ccp-dev-v3.o ccp-platform.o ccp-objs := ccp-dev.o \
ccp-ops.o \
ccp-dev-v3.o \
ccp-platform.o \
ccp-dmaengine.o
ccp-$(CONFIG_PCI) += ccp-pci.o ccp-$(CONFIG_PCI) += ccp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o

View File

@ -406,6 +406,11 @@ static int ccp_init(struct ccp_device *ccp)
goto e_kthread; goto e_kthread;
} }
/* Register the DMA engine support */
ret = ccp_dmaengine_register(ccp);
if (ret)
goto e_hwrng;
ccp_add_device(ccp); ccp_add_device(ccp);
/* Enable interrupts */ /* Enable interrupts */
@ -413,6 +418,9 @@ static int ccp_init(struct ccp_device *ccp)
return 0; return 0;
e_hwrng:
hwrng_unregister(&ccp->hwrng);
e_kthread: e_kthread:
for (i = 0; i < ccp->cmd_q_count; i++) for (i = 0; i < ccp->cmd_q_count; i++)
if (ccp->cmd_q[i].kthread) if (ccp->cmd_q[i].kthread)
@ -436,6 +444,9 @@ static void ccp_destroy(struct ccp_device *ccp)
/* Remove this device from the list of available units first */ /* Remove this device from the list of available units first */
ccp_del_device(ccp); ccp_del_device(ccp);
/* Unregister the DMA engine */
ccp_dmaengine_unregister(ccp);
/* Unregister the RNG */ /* Unregister the RNG */
hwrng_unregister(&ccp->hwrng); hwrng_unregister(&ccp->hwrng);
@ -515,7 +526,7 @@ static irqreturn_t ccp_irq_handler(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static struct ccp_actions ccp3_actions = { static const struct ccp_actions ccp3_actions = {
.perform_aes = ccp_perform_aes, .perform_aes = ccp_perform_aes,
.perform_xts_aes = ccp_perform_xts_aes, .perform_xts_aes = ccp_perform_xts_aes,
.perform_sha = ccp_perform_sha, .perform_sha = ccp_perform_sha,

View File

@ -16,7 +16,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rwlock_types.h> #include <linux/spinlock_types.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/delay.h> #include <linux/delay.h>

View File

@ -22,6 +22,9 @@
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/hw_random.h> #include <linux/hw_random.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/irqreturn.h>
#include <linux/dmaengine.h>
#define MAX_CCP_NAME_LEN 16 #define MAX_CCP_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32 #define MAX_DMAPOOL_NAME_LEN 32
@ -159,7 +162,7 @@ struct ccp_actions {
/* Structure to hold CCP version-specific values */ /* Structure to hold CCP version-specific values */
struct ccp_vdata { struct ccp_vdata {
unsigned int version; unsigned int version;
struct ccp_actions *perform; const struct ccp_actions *perform;
}; };
extern struct ccp_vdata ccpv3; extern struct ccp_vdata ccpv3;
@ -167,6 +170,39 @@ extern struct ccp_vdata ccpv3;
struct ccp_device; struct ccp_device;
struct ccp_cmd; struct ccp_cmd;
struct ccp_dma_cmd {
struct list_head entry;
struct ccp_cmd ccp_cmd;
};
struct ccp_dma_desc {
struct list_head entry;
struct ccp_device *ccp;
struct list_head pending;
struct list_head active;
enum dma_status status;
struct dma_async_tx_descriptor tx_desc;
size_t len;
};
struct ccp_dma_chan {
struct ccp_device *ccp;
spinlock_t lock;
struct list_head pending;
struct list_head active;
struct list_head complete;
struct tasklet_struct cleanup_tasklet;
enum dma_status status;
struct dma_chan dma_chan;
};
struct ccp_cmd_queue { struct ccp_cmd_queue {
struct ccp_device *ccp; struct ccp_device *ccp;
@ -260,6 +296,14 @@ struct ccp_device {
struct hwrng hwrng; struct hwrng hwrng;
unsigned int hwrng_retries; unsigned int hwrng_retries;
/*
* Support for the CCP DMA capabilities
*/
struct dma_device dma_dev;
struct ccp_dma_chan *ccp_dma_chan;
struct kmem_cache *dma_cmd_cache;
struct kmem_cache *dma_desc_cache;
/* /*
* A counter used to generate job-ids for cmds submitted to the CCP * A counter used to generate job-ids for cmds submitted to the CCP
*/ */
@ -418,4 +462,7 @@ int ccp_cmd_queue_thread(void *data);
int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd); int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
int ccp_dmaengine_register(struct ccp_device *ccp);
void ccp_dmaengine_unregister(struct ccp_device *ccp);
#endif #endif

View File

@ -0,0 +1,727 @@
/*
* AMD Cryptographic Coprocessor (CCP) driver
*
* Copyright (C) 2016 Advanced Micro Devices, Inc.
*
* Author: Gary R Hook <gary.hook@amd.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/dmaengine.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/ccp.h>
#include "ccp-dev.h"
#include "../../dma/dmaengine.h"
#define CCP_DMA_WIDTH(_mask) \
({ \
u64 mask = _mask + 1; \
(mask == 0) ? 64 : fls64(mask); \
})
static void ccp_free_cmd_resources(struct ccp_device *ccp,
struct list_head *list)
{
struct ccp_dma_cmd *cmd, *ctmp;
list_for_each_entry_safe(cmd, ctmp, list, entry) {
list_del(&cmd->entry);
kmem_cache_free(ccp->dma_cmd_cache, cmd);
}
}
static void ccp_free_desc_resources(struct ccp_device *ccp,
struct list_head *list)
{
struct ccp_dma_desc *desc, *dtmp;
list_for_each_entry_safe(desc, dtmp, list, entry) {
ccp_free_cmd_resources(ccp, &desc->active);
ccp_free_cmd_resources(ccp, &desc->pending);
list_del(&desc->entry);
kmem_cache_free(ccp->dma_desc_cache, desc);
}
}
static void ccp_free_chan_resources(struct dma_chan *dma_chan)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
unsigned long flags;
dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
spin_lock_irqsave(&chan->lock, flags);
ccp_free_desc_resources(chan->ccp, &chan->complete);
ccp_free_desc_resources(chan->ccp, &chan->active);
ccp_free_desc_resources(chan->ccp, &chan->pending);
spin_unlock_irqrestore(&chan->lock, flags);
}
static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
struct list_head *list)
{
struct ccp_dma_desc *desc, *dtmp;
list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
if (!async_tx_test_ack(&desc->tx_desc))
continue;
dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
ccp_free_cmd_resources(ccp, &desc->active);
ccp_free_cmd_resources(ccp, &desc->pending);
list_del(&desc->entry);
kmem_cache_free(ccp->dma_desc_cache, desc);
}
}
static void ccp_do_cleanup(unsigned long data)
{
struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
unsigned long flags;
dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
dma_chan_name(&chan->dma_chan));
spin_lock_irqsave(&chan->lock, flags);
ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
spin_unlock_irqrestore(&chan->lock, flags);
}
static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
{
struct ccp_dma_cmd *cmd;
int ret;
cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
list_move(&cmd->entry, &desc->active);
dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
desc->tx_desc.cookie, cmd);
ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
return 0;
dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
ret, desc->tx_desc.cookie, cmd);
return ret;
}
static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
{
struct ccp_dma_cmd *cmd;
cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
entry);
if (!cmd)
return;
dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
__func__, desc->tx_desc.cookie, cmd);
list_del(&cmd->entry);
kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
}
static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
struct ccp_dma_desc *desc)
{
/* Move current DMA descriptor to the complete list */
if (desc)
list_move(&desc->entry, &chan->complete);
/* Get the next DMA descriptor on the active list */
desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
entry);
return desc;
}
static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
struct ccp_dma_desc *desc)
{
struct dma_async_tx_descriptor *tx_desc;
unsigned long flags;
/* Loop over descriptors until one is found with commands */
do {
if (desc) {
/* Remove the DMA command from the list and free it */
ccp_free_active_cmd(desc);
if (!list_empty(&desc->pending)) {
/* No errors, keep going */
if (desc->status != DMA_ERROR)
return desc;
/* Error, free remaining commands and move on */
ccp_free_cmd_resources(desc->ccp,
&desc->pending);
}
tx_desc = &desc->tx_desc;
} else {
tx_desc = NULL;
}
spin_lock_irqsave(&chan->lock, flags);
if (desc) {
if (desc->status != DMA_ERROR)
desc->status = DMA_COMPLETE;
dev_dbg(desc->ccp->dev,
"%s - tx %d complete, status=%u\n", __func__,
desc->tx_desc.cookie, desc->status);
dma_cookie_complete(tx_desc);
}
desc = __ccp_next_dma_desc(chan, desc);
spin_unlock_irqrestore(&chan->lock, flags);
if (tx_desc) {
if (tx_desc->callback &&
(tx_desc->flags & DMA_PREP_INTERRUPT))
tx_desc->callback(tx_desc->callback_param);
dma_run_dependencies(tx_desc);
}
} while (desc);
return NULL;
}
static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
{
struct ccp_dma_desc *desc;
if (list_empty(&chan->pending))
return NULL;
desc = list_empty(&chan->active)
? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
: NULL;
list_splice_tail_init(&chan->pending, &chan->active);
return desc;
}
static void ccp_cmd_callback(void *data, int err)
{
struct ccp_dma_desc *desc = data;
struct ccp_dma_chan *chan;
int ret;
if (err == -EINPROGRESS)
return;
chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
dma_chan);
dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
__func__, desc->tx_desc.cookie, err);
if (err)
desc->status = DMA_ERROR;
while (true) {
/* Check for DMA descriptor completion */
desc = ccp_handle_active_desc(chan, desc);
/* Don't submit cmd if no descriptor or DMA is paused */
if (!desc || (chan->status == DMA_PAUSED))
break;
ret = ccp_issue_next_cmd(desc);
if (!ret)
break;
desc->status = DMA_ERROR;
}
tasklet_schedule(&chan->cleanup_tasklet);
}
static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
{
struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
tx_desc);
struct ccp_dma_chan *chan;
dma_cookie_t cookie;
unsigned long flags;
chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
spin_lock_irqsave(&chan->lock, flags);
cookie = dma_cookie_assign(tx_desc);
list_add_tail(&desc->entry, &chan->pending);
spin_unlock_irqrestore(&chan->lock, flags);
dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
__func__, cookie);
return cookie;
}
static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
{
struct ccp_dma_cmd *cmd;
cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
if (cmd)
memset(cmd, 0, sizeof(*cmd));
return cmd;
}
static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
unsigned long flags)
{
struct ccp_dma_desc *desc;
desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
if (!desc)
return NULL;
memset(desc, 0, sizeof(*desc));
dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
desc->tx_desc.flags = flags;
desc->tx_desc.tx_submit = ccp_tx_submit;
desc->ccp = chan->ccp;
INIT_LIST_HEAD(&desc->pending);
INIT_LIST_HEAD(&desc->active);
desc->status = DMA_IN_PROGRESS;
return desc;
}
static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
struct scatterlist *dst_sg,
unsigned int dst_nents,
struct scatterlist *src_sg,
unsigned int src_nents,
unsigned long flags)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
struct ccp_device *ccp = chan->ccp;
struct ccp_dma_desc *desc;
struct ccp_dma_cmd *cmd;
struct ccp_cmd *ccp_cmd;
struct ccp_passthru_nomap_engine *ccp_pt;
unsigned int src_offset, src_len;
unsigned int dst_offset, dst_len;
unsigned int len;
unsigned long sflags;
size_t total_len;
if (!dst_sg || !src_sg)
return NULL;
if (!dst_nents || !src_nents)
return NULL;
desc = ccp_alloc_dma_desc(chan, flags);
if (!desc)
return NULL;
total_len = 0;
src_len = sg_dma_len(src_sg);
src_offset = 0;
dst_len = sg_dma_len(dst_sg);
dst_offset = 0;
while (true) {
if (!src_len) {
src_nents--;
if (!src_nents)
break;
src_sg = sg_next(src_sg);
if (!src_sg)
break;
src_len = sg_dma_len(src_sg);
src_offset = 0;
continue;
}
if (!dst_len) {
dst_nents--;
if (!dst_nents)
break;
dst_sg = sg_next(dst_sg);
if (!dst_sg)
break;
dst_len = sg_dma_len(dst_sg);
dst_offset = 0;
continue;
}
len = min(dst_len, src_len);
cmd = ccp_alloc_dma_cmd(chan);
if (!cmd)
goto err;
ccp_cmd = &cmd->ccp_cmd;
ccp_pt = &ccp_cmd->u.passthru_nomap;
ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
ccp_pt->src_len = len;
ccp_pt->final = 1;
ccp_cmd->callback = ccp_cmd_callback;
ccp_cmd->data = desc;
list_add_tail(&cmd->entry, &desc->pending);
dev_dbg(ccp->dev,
"%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
cmd, &ccp_pt->src_dma,
&ccp_pt->dst_dma, ccp_pt->src_len);
total_len += len;
src_len -= len;
src_offset += len;
dst_len -= len;
dst_offset += len;
}
desc->len = total_len;
if (list_empty(&desc->pending))
goto err;
dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
spin_lock_irqsave(&chan->lock, sflags);
list_add_tail(&desc->entry, &chan->pending);
spin_unlock_irqrestore(&chan->lock, sflags);
return desc;
err:
ccp_free_cmd_resources(ccp, &desc->pending);
kmem_cache_free(ccp->dma_desc_cache, desc);
return NULL;
}
static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
unsigned long flags)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
struct ccp_dma_desc *desc;
struct scatterlist dst_sg, src_sg;
dev_dbg(chan->ccp->dev,
"%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
__func__, &src, &dst, len, flags);
sg_init_table(&dst_sg, 1);
sg_dma_address(&dst_sg) = dst;
sg_dma_len(&dst_sg) = len;
sg_init_table(&src_sg, 1);
sg_dma_address(&src_sg) = src;
sg_dma_len(&src_sg) = len;
desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
if (!desc)
return NULL;
return &desc->tx_desc;
}
static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
struct dma_chan *dma_chan, struct scatterlist *dst_sg,
unsigned int dst_nents, struct scatterlist *src_sg,
unsigned int src_nents, unsigned long flags)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
struct ccp_dma_desc *desc;
dev_dbg(chan->ccp->dev,
"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
__func__, src_sg, src_nents, dst_sg, dst_nents, flags);
desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
flags);
if (!desc)
return NULL;
return &desc->tx_desc;
}
static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
struct dma_chan *dma_chan, unsigned long flags)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
struct ccp_dma_desc *desc;
desc = ccp_alloc_dma_desc(chan, flags);
if (!desc)
return NULL;
return &desc->tx_desc;
}
static void ccp_issue_pending(struct dma_chan *dma_chan)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
struct ccp_dma_desc *desc;
unsigned long flags;
dev_dbg(chan->ccp->dev, "%s\n", __func__);
spin_lock_irqsave(&chan->lock, flags);
desc = __ccp_pending_to_active(chan);
spin_unlock_irqrestore(&chan->lock, flags);
/* If there was nothing active, start processing */
if (desc)
ccp_cmd_callback(desc, 0);
}
static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
dma_cookie_t cookie,
struct dma_tx_state *state)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
struct ccp_dma_desc *desc;
enum dma_status ret;
unsigned long flags;
if (chan->status == DMA_PAUSED) {
ret = DMA_PAUSED;
goto out;
}
ret = dma_cookie_status(dma_chan, cookie, state);
if (ret == DMA_COMPLETE) {
spin_lock_irqsave(&chan->lock, flags);
/* Get status from complete chain, if still there */
list_for_each_entry(desc, &chan->complete, entry) {
if (desc->tx_desc.cookie != cookie)
continue;
ret = desc->status;
break;
}
spin_unlock_irqrestore(&chan->lock, flags);
}
out:
dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
return ret;
}
static int ccp_pause(struct dma_chan *dma_chan)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
chan->status = DMA_PAUSED;
/*TODO: Wait for active DMA to complete before returning? */
return 0;
}
static int ccp_resume(struct dma_chan *dma_chan)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
struct ccp_dma_desc *desc;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
entry);
spin_unlock_irqrestore(&chan->lock, flags);
/* Indicate the channel is running again */
chan->status = DMA_IN_PROGRESS;
/* If there was something active, re-start */
if (desc)
ccp_cmd_callback(desc, 0);
return 0;
}
static int ccp_terminate_all(struct dma_chan *dma_chan)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
unsigned long flags;
dev_dbg(chan->ccp->dev, "%s\n", __func__);
/*TODO: Wait for active DMA to complete before continuing */
spin_lock_irqsave(&chan->lock, flags);
/*TODO: Purge the complete list? */
ccp_free_desc_resources(chan->ccp, &chan->active);
ccp_free_desc_resources(chan->ccp, &chan->pending);
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
struct dma_device *dma_dev = &ccp->dma_dev;
struct dma_chan *dma_chan;
char *dma_cmd_cache_name;
char *dma_desc_cache_name;
unsigned int i;
int ret;
ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
sizeof(*(ccp->ccp_dma_chan)),
GFP_KERNEL);
if (!ccp->ccp_dma_chan)
return -ENOMEM;
dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
"%s-dmaengine-cmd-cache",
ccp->name);
if (!dma_cmd_cache_name)
return -ENOMEM;
ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
sizeof(struct ccp_dma_cmd),
sizeof(void *),
SLAB_HWCACHE_ALIGN, NULL);
if (!ccp->dma_cmd_cache)
return -ENOMEM;
dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
"%s-dmaengine-desc-cache",
ccp->name);
if (!dma_cmd_cache_name)
return -ENOMEM;
ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
sizeof(struct ccp_dma_desc),
sizeof(void *),
SLAB_HWCACHE_ALIGN, NULL);
if (!ccp->dma_desc_cache) {
ret = -ENOMEM;
goto err_cache;
}
dma_dev->dev = ccp->dev;
dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
dma_dev->directions = DMA_MEM_TO_MEM;
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_cap_set(DMA_SG, dma_dev->cap_mask);
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
INIT_LIST_HEAD(&dma_dev->channels);
for (i = 0; i < ccp->cmd_q_count; i++) {
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
chan->ccp = ccp;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending);
INIT_LIST_HEAD(&chan->active);
INIT_LIST_HEAD(&chan->complete);
tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
(unsigned long)chan);
dma_chan->device = dma_dev;
dma_cookie_init(dma_chan);
list_add_tail(&dma_chan->device_node, &dma_dev->channels);
}
dma_dev->device_free_chan_resources = ccp_free_chan_resources;
dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
dma_dev->device_issue_pending = ccp_issue_pending;
dma_dev->device_tx_status = ccp_tx_status;
dma_dev->device_pause = ccp_pause;
dma_dev->device_resume = ccp_resume;
dma_dev->device_terminate_all = ccp_terminate_all;
ret = dma_async_device_register(dma_dev);
if (ret)
goto err_reg;
return 0;
err_reg:
kmem_cache_destroy(ccp->dma_desc_cache);
err_cache:
kmem_cache_destroy(ccp->dma_cmd_cache);
return ret;
}
void ccp_dmaengine_unregister(struct ccp_device *ccp)
{
struct dma_device *dma_dev = &ccp->dma_dev;
dma_async_device_unregister(dma_dev);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
}

View File

@ -1427,6 +1427,70 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
return ret; return ret;
} }
static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
struct ccp_cmd *cmd)
{
struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
struct ccp_dm_workarea mask;
struct ccp_op op;
int ret;
if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
return -EINVAL;
if (!pt->src_dma || !pt->dst_dma)
return -EINVAL;
if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
return -EINVAL;
if (!pt->mask)
return -EINVAL;
}
BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
memset(&op, 0, sizeof(op));
op.cmd_q = cmd_q;
op.jobid = ccp_gen_jobid(cmd_q->ccp);
if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
/* Load the mask */
op.ksb_key = cmd_q->ksb_key;
mask.length = pt->mask_len;
mask.dma.address = pt->mask;
mask.dma.length = pt->mask_len;
ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
CCP_PASSTHRU_BYTESWAP_NOOP);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
return ret;
}
}
/* Send data to the CCP Passthru engine */
op.eom = 1;
op.soc = 1;
op.src.type = CCP_MEMTYPE_SYSTEM;
op.src.u.dma.address = pt->src_dma;
op.src.u.dma.offset = 0;
op.src.u.dma.length = pt->src_len;
op.dst.type = CCP_MEMTYPE_SYSTEM;
op.dst.u.dma.address = pt->dst_dma;
op.dst.u.dma.offset = 0;
op.dst.u.dma.length = pt->src_len;
ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
if (ret)
cmd->engine_error = cmd_q->cmd_error;
return ret;
}
static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{ {
struct ccp_ecc_engine *ecc = &cmd->u.ecc; struct ccp_ecc_engine *ecc = &cmd->u.ecc;
@ -1762,6 +1826,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_run_rsa_cmd(cmd_q, cmd); ret = ccp_run_rsa_cmd(cmd_q, cmd);
break; break;
case CCP_ENGINE_PASSTHRU: case CCP_ENGINE_PASSTHRU:
if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
else
ret = ccp_run_passthru_cmd(cmd_q, cmd); ret = ccp_run_passthru_cmd(cmd_q, cmd);
break; break;
case CCP_ENGINE_ECC: case CCP_ENGINE_ECC:

View File

@ -475,18 +475,18 @@ static int mv_cesa_probe(struct platform_device *pdev)
engine->regs = cesa->regs + CESA_ENGINE_OFF(i); engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
if (dram && cesa->caps->has_tdma) if (dram && cesa->caps->has_tdma)
mv_cesa_conf_mbus_windows(&cesa->engines[i], dram); mv_cesa_conf_mbus_windows(engine, dram);
writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS); writel(0, engine->regs + CESA_SA_INT_STATUS);
writel(CESA_SA_CFG_STOP_DIG_ERR, writel(CESA_SA_CFG_STOP_DIG_ERR,
cesa->engines[i].regs + CESA_SA_CFG); engine->regs + CESA_SA_CFG);
writel(engine->sram_dma & CESA_SA_SRAM_MSK, writel(engine->sram_dma & CESA_SA_SRAM_MSK,
cesa->engines[i].regs + CESA_SA_DESC_P0); engine->regs + CESA_SA_DESC_P0);
ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int, ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
IRQF_ONESHOT, IRQF_ONESHOT,
dev_name(&pdev->dev), dev_name(&pdev->dev),
&cesa->engines[i]); engine);
if (ret) if (ret)
goto err_cleanup; goto err_cleanup;
} }

View File

@ -768,7 +768,6 @@ static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
*len = creq->len; *len = creq->len;
memcpy(hash, creq->state, digsize); memcpy(hash, creq->state, digsize);
memset(cache, 0, blocksize); memset(cache, 0, blocksize);
if (creq->cache)
memcpy(cache, creq->cache, creq->cache_ptr); memcpy(cache, creq->cache, creq->cache_ptr);
return 0; return 0;

View File

@ -99,12 +99,11 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
struct mv_cesa_tdma_desc *new_tdma = NULL; struct mv_cesa_tdma_desc *new_tdma = NULL;
dma_addr_t dma_handle; dma_addr_t dma_handle;
new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags, new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
&dma_handle); &dma_handle);
if (!new_tdma) if (!new_tdma)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
memset(new_tdma, 0, sizeof(*new_tdma));
new_tdma->cur_dma = dma_handle; new_tdma->cur_dma = dma_handle;
if (chain->last) { if (chain->last) {
chain->last->next_dma = cpu_to_le32(dma_handle); chain->last->next_dma = cpu_to_le32(dma_handle);

765
drivers/crypto/mxc-scc.c Normal file
View File

@ -0,0 +1,765 @@
/*
* Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
*
* The driver is based on information gathered from
* drivers/mxc/security/mxc_scc.c which can be found in
* the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
/* Secure Memory (SCM) registers */
#define SCC_SCM_RED_START 0x0000
#define SCC_SCM_BLACK_START 0x0004
#define SCC_SCM_LENGTH 0x0008
#define SCC_SCM_CTRL 0x000C
#define SCC_SCM_STATUS 0x0010
#define SCC_SCM_ERROR_STATUS 0x0014
#define SCC_SCM_INTR_CTRL 0x0018
#define SCC_SCM_CFG 0x001C
#define SCC_SCM_INIT_VECTOR_0 0x0020
#define SCC_SCM_INIT_VECTOR_1 0x0024
#define SCC_SCM_RED_MEMORY 0x0400
#define SCC_SCM_BLACK_MEMORY 0x0800
/* Security Monitor (SMN) Registers */
#define SCC_SMN_STATUS 0x1000
#define SCC_SMN_COMMAND 0x1004
#define SCC_SMN_SEQ_START 0x1008
#define SCC_SMN_SEQ_END 0x100C
#define SCC_SMN_SEQ_CHECK 0x1010
#define SCC_SMN_BIT_COUNT 0x1014
#define SCC_SMN_BITBANK_INC_SIZE 0x1018
#define SCC_SMN_BITBANK_DECREMENT 0x101C
#define SCC_SMN_COMPARE_SIZE 0x1020
#define SCC_SMN_PLAINTEXT_CHECK 0x1024
#define SCC_SMN_CIPHERTEXT_CHECK 0x1028
#define SCC_SMN_TIMER_IV 0x102C
#define SCC_SMN_TIMER_CONTROL 0x1030
#define SCC_SMN_DEBUG_DETECT_STAT 0x1034
#define SCC_SMN_TIMER 0x1038
#define SCC_SCM_CTRL_START_CIPHER BIT(2)
#define SCC_SCM_CTRL_CBC_MODE BIT(1)
#define SCC_SCM_CTRL_DECRYPT_MODE BIT(0)
#define SCC_SCM_STATUS_LEN_ERR BIT(12)
#define SCC_SCM_STATUS_SMN_UNBLOCKED BIT(11)
#define SCC_SCM_STATUS_CIPHERING_DONE BIT(10)
#define SCC_SCM_STATUS_ZEROIZING_DONE BIT(9)
#define SCC_SCM_STATUS_INTR_STATUS BIT(8)
#define SCC_SCM_STATUS_SEC_KEY BIT(7)
#define SCC_SCM_STATUS_INTERNAL_ERR BIT(6)
#define SCC_SCM_STATUS_BAD_SEC_KEY BIT(5)
#define SCC_SCM_STATUS_ZEROIZE_FAIL BIT(4)
#define SCC_SCM_STATUS_SMN_BLOCKED BIT(3)
#define SCC_SCM_STATUS_CIPHERING BIT(2)
#define SCC_SCM_STATUS_ZEROIZING BIT(1)
#define SCC_SCM_STATUS_BUSY BIT(0)
#define SCC_SMN_STATUS_STATE_MASK 0x0000001F
#define SCC_SMN_STATE_START 0x0
/* The SMN is zeroizing its RAM during reset */
#define SCC_SMN_STATE_ZEROIZE_RAM 0x5
/* SMN has passed internal checks */
#define SCC_SMN_STATE_HEALTH_CHECK 0x6
/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
#define SCC_SMN_STATE_FAIL 0x9
/* SCC is in secure state. SCM is using secret key. */
#define SCC_SMN_STATE_SECURE 0xA
/* SCC is not secure. SCM is using default key. */
#define SCC_SMN_STATE_NON_SECURE 0xC
#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM BIT(2)
#define SCC_SCM_INTR_CTRL_CLR_INTR BIT(1)
#define SCC_SCM_INTR_CTRL_MASK_INTR BIT(0)
/* Size, in blocks, of Red memory. */
#define SCC_SCM_CFG_BLACK_SIZE_MASK 0x07fe0000
#define SCC_SCM_CFG_BLACK_SIZE_SHIFT 17
/* Size, in blocks, of Black memory. */
#define SCC_SCM_CFG_RED_SIZE_MASK 0x0001ff80
#define SCC_SCM_CFG_RED_SIZE_SHIFT 7
/* Number of bytes per block. */
#define SCC_SCM_CFG_BLOCK_SIZE_MASK 0x0000007f
#define SCC_SMN_COMMAND_TAMPER_LOCK BIT(4)
#define SCC_SMN_COMMAND_CLR_INTR BIT(3)
#define SCC_SMN_COMMAND_CLR_BIT_BANK BIT(2)
#define SCC_SMN_COMMAND_EN_INTR BIT(1)
#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM BIT(0)
#define SCC_KEY_SLOTS 20
#define SCC_MAX_KEY_SIZE 32
#define SCC_KEY_SLOT_SIZE 32
#define SCC_CRC_CCITT_START 0xFFFF
/*
* Offset into each RAM of the base of the area which is not
* used for Stored Keys.
*/
#define SCC_NON_RESERVED_OFFSET (SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
/* Fixed padding for appending to plaintext to fill out a block */
static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
enum mxc_scc_state {
SCC_STATE_OK,
SCC_STATE_UNIMPLEMENTED,
SCC_STATE_FAILED
};
struct mxc_scc {
struct device *dev;
void __iomem *base;
struct clk *clk;
bool hw_busy;
spinlock_t lock;
struct crypto_queue queue;
struct crypto_async_request *req;
int block_size_bytes;
int black_ram_size_blocks;
int memory_size_bytes;
int bytes_remaining;
void __iomem *red_memory;
void __iomem *black_memory;
};
struct mxc_scc_ctx {
struct mxc_scc *scc;
struct scatterlist *sg_src;
size_t src_nents;
struct scatterlist *sg_dst;
size_t dst_nents;
unsigned int offset;
unsigned int size;
unsigned int ctrl;
};
struct mxc_scc_crypto_tmpl {
struct mxc_scc *scc;
struct crypto_alg alg;
};
static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
struct crypto_async_request *req)
{
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mxc_scc *scc = ctx->scc;
size_t len;
void __iomem *from;
if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
from = scc->red_memory;
else
from = scc->black_memory;
dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
ctx->dst_nents * 8);
len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
from, ctx->size, ctx->offset);
if (!len) {
dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
return -EINVAL;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"red memory@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
scc->red_memory, ctx->size, 1);
print_hex_dump(KERN_ERR,
"black memory@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
scc->black_memory, ctx->size, 1);
#endif
ctx->offset += len;
if (ctx->offset < ablkreq->nbytes)
return -EINPROGRESS;
return 0;
}
static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
struct mxc_scc_ctx *ctx)
{
struct mxc_scc *scc = ctx->scc;
int nents;
nents = sg_nents_for_len(req->src, req->nbytes);
if (nents < 0) {
dev_err(scc->dev, "Invalid number of src SC");
return nents;
}
ctx->src_nents = nents;
nents = sg_nents_for_len(req->dst, req->nbytes);
if (nents < 0) {
dev_err(scc->dev, "Invalid number of dst SC");
return nents;
}
ctx->dst_nents = nents;
ctx->size = 0;
ctx->offset = 0;
return 0;
}
static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
struct mxc_scc_ctx *ctx,
int result)
{
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mxc_scc *scc = ctx->scc;
scc->req = NULL;
scc->bytes_remaining = scc->memory_size_bytes;
if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
scc->block_size_bytes);
req->complete(req, result);
scc->hw_busy = false;
return 0;
}
static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
struct ablkcipher_request *req)
{
u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
size_t len = min_t(size_t, req->nbytes - ctx->offset,
ctx->scc->bytes_remaining);
unsigned int padding_byte_count = 0;
struct mxc_scc *scc = ctx->scc;
void __iomem *to;
if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
to = scc->black_memory;
else
to = scc->red_memory;
if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
scc->block_size_bytes);
len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
to, len, ctx->offset);
if (!len) {
dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
return -EINVAL;
}
ctx->size = len;
#ifdef DEBUG
dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
print_hex_dump(KERN_ERR,
"init vector0@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
1);
print_hex_dump(KERN_ERR,
"red memory@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
scc->red_memory, ctx->size, 1);
print_hex_dump(KERN_ERR,
"black memory@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
scc->black_memory, ctx->size, 1);
#endif
scc->bytes_remaining -= len;
padding_byte_count = len % scc->block_size_bytes;
if (padding_byte_count) {
memcpy(padding_buffer, scc_block_padding, padding_byte_count);
memcpy(to + len, padding_buffer, padding_byte_count);
ctx->size += padding_byte_count;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"data to encrypt@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
to, ctx->size, 1);
#endif
return 0;
}
static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
struct crypto_async_request *req)
{
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mxc_scc *scc = ctx->scc;
int err;
dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
ablkreq->nbytes, ablkreq->src, ablkreq->dst);
writel(0, scc->base + SCC_SCM_ERROR_STATUS);
err = mxc_scc_put_data(ctx, ablkreq);
if (err) {
mxc_scc_ablkcipher_req_complete(req, ctx, err);
return;
}
dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
(void *)readl(scc->base + SCC_SCM_RED_START),
(void *)readl(scc->base + SCC_SCM_BLACK_START));
/* clear interrupt control registers */
writel(SCC_SCM_INTR_CTRL_CLR_INTR,
scc->base + SCC_SCM_INTR_CTRL);
writel((ctx->size / ctx->scc->block_size_bytes) - 1,
scc->base + SCC_SCM_LENGTH);
dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
ctx->size / ctx->scc->block_size_bytes,
(ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
scc->red_memory);
writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
}
static irqreturn_t mxc_scc_int(int irq, void *priv)
{
struct crypto_async_request *req;
struct mxc_scc_ctx *ctx;
struct mxc_scc *scc = priv;
int status;
int ret;
status = readl(scc->base + SCC_SCM_STATUS);
/* clear interrupt control registers */
writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
if (status & SCC_SCM_STATUS_BUSY)
return IRQ_NONE;
req = scc->req;
if (req) {
ctx = crypto_tfm_ctx(req->tfm);
ret = mxc_scc_get_data(ctx, req);
if (ret != -EINPROGRESS)
mxc_scc_ablkcipher_req_complete(req, ctx, ret);
else
mxc_scc_ablkcipher_next(ctx, req);
}
return IRQ_HANDLED;
}
static int mxc_scc_cra_init(struct crypto_tfm *tfm)
{
struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_alg *alg = tfm->__crt_alg;
struct mxc_scc_crypto_tmpl *algt;
algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
ctx->scc = algt->scc;
return 0;
}
static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
{
struct crypto_async_request *req, *backlog;
if (ctx->scc->hw_busy)
return;
spin_lock_bh(&ctx->scc->lock);
backlog = crypto_get_backlog(&ctx->scc->queue);
req = crypto_dequeue_request(&ctx->scc->queue);
ctx->scc->req = req;
ctx->scc->hw_busy = true;
spin_unlock_bh(&ctx->scc->lock);
if (!req)
return;
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
mxc_scc_ablkcipher_next(ctx, req);
}
static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
struct crypto_async_request *req)
{
int ret;
spin_lock_bh(&ctx->scc->lock);
ret = crypto_enqueue_request(&ctx->scc->queue, req);
spin_unlock_bh(&ctx->scc->lock);
if (ret != -EINPROGRESS)
return ret;
mxc_scc_dequeue_req_unlocked(ctx);
return -EINPROGRESS;
}
static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
struct ablkcipher_request *req)
{
int err;
err = mxc_scc_ablkcipher_req_init(req, ctx);
if (err)
return err;
return mxc_scc_queue_req(ctx, &req->base);
}
static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
return mxc_scc_des3_op(ctx, req);
}
static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
return mxc_scc_des3_op(ctx, req);
}
static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
return mxc_scc_des3_op(ctx, req);
}
static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
return mxc_scc_des3_op(ctx, req);
}
static void mxc_scc_hw_init(struct mxc_scc *scc)
{
int offset;
offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
/* Fill the RED_START register */
writel(offset, scc->base + SCC_SCM_RED_START);
/* Fill the BLACK_START register */
writel(offset, scc->base + SCC_SCM_BLACK_START);
scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
SCC_NON_RESERVED_OFFSET;
scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
SCC_NON_RESERVED_OFFSET;
scc->bytes_remaining = scc->memory_size_bytes;
}
static int mxc_scc_get_config(struct mxc_scc *scc)
{
int config;
config = readl(scc->base + SCC_SCM_CFG);
scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
scc->memory_size_bytes = (scc->block_size_bytes *
scc->black_ram_size_blocks) -
SCC_NON_RESERVED_OFFSET;
return 0;
}
static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
{
enum mxc_scc_state state;
int status;
status = readl(scc->base + SCC_SMN_STATUS) &
SCC_SMN_STATUS_STATE_MASK;
/* If in Health Check, try to bringup to secure state */
if (status & SCC_SMN_STATE_HEALTH_CHECK) {
/*
* Write a simple algorithm to the Algorithm Sequence
* Checker (ASC)
*/
writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
writel(0x5555, scc->base + SCC_SMN_SEQ_END);
writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
status = readl(scc->base + SCC_SMN_STATUS) &
SCC_SMN_STATUS_STATE_MASK;
}
switch (status) {
case SCC_SMN_STATE_NON_SECURE:
case SCC_SMN_STATE_SECURE:
state = SCC_STATE_OK;
break;
case SCC_SMN_STATE_FAIL:
state = SCC_STATE_FAILED;
break;
default:
state = SCC_STATE_UNIMPLEMENTED;
break;
}
return state;
}
static struct mxc_scc_crypto_tmpl scc_ecb_des = {
.alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-scc",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mxc_scc_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = mxc_scc_cra_init,
.cra_u.ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.encrypt = mxc_scc_ecb_des_encrypt,
.decrypt = mxc_scc_ecb_des_decrypt,
}
}
};
static struct mxc_scc_crypto_tmpl scc_cbc_des = {
.alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-scc",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct mxc_scc_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = mxc_scc_cra_init,
.cra_u.ablkcipher = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.encrypt = mxc_scc_cbc_des_encrypt,
.decrypt = mxc_scc_cbc_des_decrypt,
}
}
};
static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
&scc_ecb_des,
&scc_cbc_des,
};
static int mxc_scc_crypto_register(struct mxc_scc *scc)
{
int i;
int err = 0;
for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
scc_crypto_algs[i]->scc = scc;
err = crypto_register_alg(&scc_crypto_algs[i]->alg);
if (err)
goto err_out;
}
return 0;
err_out:
while (--i >= 0)
crypto_unregister_alg(&scc_crypto_algs[i]->alg);
return err;
}
static void mxc_scc_crypto_unregister(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
crypto_unregister_alg(&scc_crypto_algs[i]->alg);
}
static int mxc_scc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct mxc_scc *scc;
enum mxc_scc_state state;
int irq;
int ret;
int i;
scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
if (!scc)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
scc->base = devm_ioremap_resource(dev, res);
if (IS_ERR(scc->base))
return PTR_ERR(scc->base);
scc->clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(scc->clk)) {
dev_err(dev, "Could not get ipg clock\n");
return PTR_ERR(scc->clk);
}
clk_prepare_enable(scc->clk);
/* clear error status register */
writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
/* clear interrupt control registers */
writel(SCC_SCM_INTR_CTRL_CLR_INTR |
SCC_SCM_INTR_CTRL_MASK_INTR,
scc->base + SCC_SCM_INTR_CTRL);
writel(SCC_SMN_COMMAND_CLR_INTR |
SCC_SMN_COMMAND_EN_INTR,
scc->base + SCC_SMN_COMMAND);
scc->dev = dev;
platform_set_drvdata(pdev, scc);
ret = mxc_scc_get_config(scc);
if (ret)
goto err_out;
state = mxc_scc_get_state(scc);
if (state != SCC_STATE_OK) {
dev_err(dev, "SCC in unusable state %d\n", state);
ret = -EINVAL;
goto err_out;
}
mxc_scc_hw_init(scc);
spin_lock_init(&scc->lock);
/* FIXME: calculate queue from RAM slots */
crypto_init_queue(&scc->queue, 50);
for (i = 0; i < 2; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
dev_err(dev, "failed to get irq resource\n");
ret = -EINVAL;
goto err_out;
}
ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
IRQF_ONESHOT, dev_name(dev), scc);
if (ret)
goto err_out;
}
ret = mxc_scc_crypto_register(scc);
if (ret) {
dev_err(dev, "could not register algorithms");
goto err_out;
}
dev_info(dev, "registered successfully.\n");
return 0;
err_out:
clk_disable_unprepare(scc->clk);
return ret;
}
static int mxc_scc_remove(struct platform_device *pdev)
{
struct mxc_scc *scc = platform_get_drvdata(pdev);
mxc_scc_crypto_unregister();
clk_disable_unprepare(scc->clk);
return 0;
}
static const struct of_device_id mxc_scc_dt_ids[] = {
{ .compatible = "fsl,imx25-scc", .data = NULL, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
static struct platform_driver mxc_scc_driver = {
.probe = mxc_scc_probe,
.remove = mxc_scc_remove,
.driver = {
.name = "mxc-scc",
.of_match_table = mxc_scc_dt_ids,
},
};
module_platform_driver(mxc_scc_driver);
MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
MODULE_LICENSE("GPL v2");

View File

@ -1598,7 +1598,7 @@ static void *new_queue(unsigned long q_type)
static void free_queue(void *p, unsigned long q_type) static void free_queue(void *p, unsigned long q_type)
{ {
return kmem_cache_free(queue_cache[q_type - 1], p); kmem_cache_free(queue_cache[q_type - 1], p);
} }
static int queue_cache_init(void) static int queue_cache_init(void)

View File

@ -26,7 +26,6 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/omap-dma.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
@ -176,9 +175,7 @@ struct omap_aes_dev {
struct scatter_walk in_walk; struct scatter_walk in_walk;
struct scatter_walk out_walk; struct scatter_walk out_walk;
int dma_in;
struct dma_chan *dma_lch_in; struct dma_chan *dma_lch_in;
int dma_out;
struct dma_chan *dma_lch_out; struct dma_chan *dma_lch_out;
int in_sg_len; int in_sg_len;
int out_sg_len; int out_sg_len;
@ -351,30 +348,21 @@ static void omap_aes_dma_out_callback(void *data)
static int omap_aes_dma_init(struct omap_aes_dev *dd) static int omap_aes_dma_init(struct omap_aes_dev *dd)
{ {
int err = -ENOMEM; int err;
dma_cap_mask_t mask;
dd->dma_lch_out = NULL; dd->dma_lch_out = NULL;
dd->dma_lch_in = NULL; dd->dma_lch_in = NULL;
dma_cap_zero(mask); dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
dma_cap_set(DMA_SLAVE, mask); if (IS_ERR(dd->dma_lch_in)) {
dd->dma_lch_in = dma_request_slave_channel_compat(mask,
omap_dma_filter_fn,
&dd->dma_in,
dd->dev, "rx");
if (!dd->dma_lch_in) {
dev_err(dd->dev, "Unable to request in DMA channel\n"); dev_err(dd->dev, "Unable to request in DMA channel\n");
goto err_dma_in; return PTR_ERR(dd->dma_lch_in);
} }
dd->dma_lch_out = dma_request_slave_channel_compat(mask, dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
omap_dma_filter_fn, if (IS_ERR(dd->dma_lch_out)) {
&dd->dma_out,
dd->dev, "tx");
if (!dd->dma_lch_out) {
dev_err(dd->dev, "Unable to request out DMA channel\n"); dev_err(dd->dev, "Unable to request out DMA channel\n");
err = PTR_ERR(dd->dma_lch_out);
goto err_dma_out; goto err_dma_out;
} }
@ -382,14 +370,15 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
err_dma_out: err_dma_out:
dma_release_channel(dd->dma_lch_in); dma_release_channel(dd->dma_lch_in);
err_dma_in:
if (err)
pr_err("error: %d\n", err);
return err; return err;
} }
static void omap_aes_dma_cleanup(struct omap_aes_dev *dd) static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
{ {
if (dd->pio_only)
return;
dma_release_channel(dd->dma_lch_out); dma_release_channel(dd->dma_lch_out);
dma_release_channel(dd->dma_lch_in); dma_release_channel(dd->dma_lch_in);
} }
@ -1080,9 +1069,6 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
goto err; goto err;
} }
dd->dma_out = -1; /* Dummy value that's unused */
dd->dma_in = -1; /* Dummy value that's unused */
dd->pdata = match->data; dd->pdata = match->data;
err: err:
@ -1116,24 +1102,6 @@ static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
} }
memcpy(res, r, sizeof(*res)); memcpy(res, r, sizeof(*res));
/* Get the DMA out channel */
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!r) {
dev_err(dev, "no DMA out resource info\n");
err = -ENODEV;
goto err;
}
dd->dma_out = r->start;
/* Get the DMA in channel */
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!r) {
dev_err(dev, "no DMA in resource info\n");
err = -ENODEV;
goto err;
}
dd->dma_in = r->start;
/* Only OMAP2/3 can be non-DT */ /* Only OMAP2/3 can be non-DT */
dd->pdata = &omap_aes_pdata_omap2; dd->pdata = &omap_aes_pdata_omap2;
@ -1191,7 +1159,9 @@ static int omap_aes_probe(struct platform_device *pdev)
tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
err = omap_aes_dma_init(dd); err = omap_aes_dma_init(dd);
if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) { if (err == -EPROBE_DEFER) {
goto err_irq;
} else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
dd->pio_only = 1; dd->pio_only = 1;
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
@ -1248,7 +1218,7 @@ static int omap_aes_probe(struct platform_device *pdev)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_alg( crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]); &dd->pdata->algs_info[i].algs_list[j]);
if (!dd->pio_only)
omap_aes_dma_cleanup(dd); omap_aes_dma_cleanup(dd);
err_irq: err_irq:
tasklet_kill(&dd->done_task); tasklet_kill(&dd->done_task);

View File

@ -29,7 +29,6 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/omap-dma.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
@ -39,6 +38,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <crypto/des.h> #include <crypto/des.h>
#include <crypto/algapi.h>
#define DST_MAXBURST 2 #define DST_MAXBURST 2
@ -132,14 +132,10 @@ struct omap_des_dev {
unsigned long flags; unsigned long flags;
int err; int err;
/* spinlock used for queues */
spinlock_t lock;
struct crypto_queue queue;
struct tasklet_struct done_task; struct tasklet_struct done_task;
struct tasklet_struct queue_task;
struct ablkcipher_request *req; struct ablkcipher_request *req;
struct crypto_engine *engine;
/* /*
* total is used by PIO mode for book keeping so introduce * total is used by PIO mode for book keeping so introduce
* variable total_save as need it to calc page_order * variable total_save as need it to calc page_order
@ -158,9 +154,7 @@ struct omap_des_dev {
struct scatter_walk in_walk; struct scatter_walk in_walk;
struct scatter_walk out_walk; struct scatter_walk out_walk;
int dma_in;
struct dma_chan *dma_lch_in; struct dma_chan *dma_lch_in;
int dma_out;
struct dma_chan *dma_lch_out; struct dma_chan *dma_lch_out;
int in_sg_len; int in_sg_len;
int out_sg_len; int out_sg_len;
@ -340,30 +334,21 @@ static void omap_des_dma_out_callback(void *data)
static int omap_des_dma_init(struct omap_des_dev *dd) static int omap_des_dma_init(struct omap_des_dev *dd)
{ {
int err = -ENOMEM; int err;
dma_cap_mask_t mask;
dd->dma_lch_out = NULL; dd->dma_lch_out = NULL;
dd->dma_lch_in = NULL; dd->dma_lch_in = NULL;
dma_cap_zero(mask); dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
dma_cap_set(DMA_SLAVE, mask); if (IS_ERR(dd->dma_lch_in)) {
dd->dma_lch_in = dma_request_slave_channel_compat(mask,
omap_dma_filter_fn,
&dd->dma_in,
dd->dev, "rx");
if (!dd->dma_lch_in) {
dev_err(dd->dev, "Unable to request in DMA channel\n"); dev_err(dd->dev, "Unable to request in DMA channel\n");
goto err_dma_in; return PTR_ERR(dd->dma_lch_in);
} }
dd->dma_lch_out = dma_request_slave_channel_compat(mask, dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
omap_dma_filter_fn, if (IS_ERR(dd->dma_lch_out)) {
&dd->dma_out,
dd->dev, "tx");
if (!dd->dma_lch_out) {
dev_err(dd->dev, "Unable to request out DMA channel\n"); dev_err(dd->dev, "Unable to request out DMA channel\n");
err = PTR_ERR(dd->dma_lch_out);
goto err_dma_out; goto err_dma_out;
} }
@ -371,14 +356,15 @@ static int omap_des_dma_init(struct omap_des_dev *dd)
err_dma_out: err_dma_out:
dma_release_channel(dd->dma_lch_in); dma_release_channel(dd->dma_lch_in);
err_dma_in:
if (err)
pr_err("error: %d\n", err);
return err; return err;
} }
static void omap_des_dma_cleanup(struct omap_des_dev *dd) static void omap_des_dma_cleanup(struct omap_des_dev *dd)
{ {
if (dd->pio_only)
return;
dma_release_channel(dd->dma_lch_out); dma_release_channel(dd->dma_lch_out);
dma_release_channel(dd->dma_lch_in); dma_release_channel(dd->dma_lch_in);
} }
@ -520,9 +506,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
pr_debug("err: %d\n", err); pr_debug("err: %d\n", err);
pm_runtime_put(dd->dev); pm_runtime_put(dd->dev);
dd->flags &= ~FLAGS_BUSY; crypto_finalize_request(dd->engine, req, err);
req->base.complete(&req->base, err);
} }
static int omap_des_crypt_dma_stop(struct omap_des_dev *dd) static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
@ -587,32 +571,22 @@ static int omap_des_copy_sgs(struct omap_des_dev *dd)
static int omap_des_handle_queue(struct omap_des_dev *dd, static int omap_des_handle_queue(struct omap_des_dev *dd,
struct ablkcipher_request *req) struct ablkcipher_request *req)
{ {
struct crypto_async_request *async_req, *backlog;
struct omap_des_ctx *ctx;
struct omap_des_reqctx *rctx;
unsigned long flags;
int err, ret = 0;
spin_lock_irqsave(&dd->lock, flags);
if (req) if (req)
ret = ablkcipher_enqueue_request(&dd->queue, req); return crypto_transfer_request_to_engine(dd->engine, req);
if (dd->flags & FLAGS_BUSY) {
spin_unlock_irqrestore(&dd->lock, flags);
return ret;
}
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
if (async_req)
dd->flags |= FLAGS_BUSY;
spin_unlock_irqrestore(&dd->lock, flags);
if (!async_req) return 0;
return ret; }
if (backlog) static int omap_des_prepare_req(struct crypto_engine *engine,
backlog->complete(backlog, -EINPROGRESS); struct ablkcipher_request *req)
{
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
struct omap_des_reqctx *rctx;
req = ablkcipher_request_cast(async_req); if (!dd)
return -ENODEV;
/* assign new request to device */ /* assign new request to device */
dd->req = req; dd->req = req;
@ -642,16 +616,20 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
dd->ctx = ctx; dd->ctx = ctx;
ctx->dd = dd; ctx->dd = dd;
err = omap_des_write_ctrl(dd); return omap_des_write_ctrl(dd);
if (!err) }
err = omap_des_crypt_dma_start(dd);
if (err) {
/* des_task will not finish it, so do it here */
omap_des_finish_req(dd, err);
tasklet_schedule(&dd->queue_task);
}
return ret; /* return ret, which is enqueue return value */ static int omap_des_crypt_req(struct crypto_engine *engine,
struct ablkcipher_request *req)
{
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
crypto_ablkcipher_reqtfm(req));
struct omap_des_dev *dd = omap_des_find_dev(ctx);
if (!dd)
return -ENODEV;
return omap_des_crypt_dma_start(dd);
} }
static void omap_des_done_task(unsigned long data) static void omap_des_done_task(unsigned long data)
@ -683,18 +661,10 @@ static void omap_des_done_task(unsigned long data)
} }
omap_des_finish_req(dd, 0); omap_des_finish_req(dd, 0);
omap_des_handle_queue(dd, NULL);
pr_debug("exit\n"); pr_debug("exit\n");
} }
static void omap_des_queue_task(unsigned long data)
{
struct omap_des_dev *dd = (struct omap_des_dev *)data;
omap_des_handle_queue(dd, NULL);
}
static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode) static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
{ {
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx( struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
@ -999,8 +969,6 @@ static int omap_des_get_of(struct omap_des_dev *dd,
return -EINVAL; return -EINVAL;
} }
dd->dma_out = -1; /* Dummy value that's unused */
dd->dma_in = -1; /* Dummy value that's unused */
dd->pdata = match->data; dd->pdata = match->data;
return 0; return 0;
@ -1016,33 +984,10 @@ static int omap_des_get_of(struct omap_des_dev *dd,
static int omap_des_get_pdev(struct omap_des_dev *dd, static int omap_des_get_pdev(struct omap_des_dev *dd,
struct platform_device *pdev) struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev;
struct resource *r;
int err = 0;
/* Get the DMA out channel */
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!r) {
dev_err(dev, "no DMA out resource info\n");
err = -ENODEV;
goto err;
}
dd->dma_out = r->start;
/* Get the DMA in channel */
r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!r) {
dev_err(dev, "no DMA in resource info\n");
err = -ENODEV;
goto err;
}
dd->dma_in = r->start;
/* non-DT devices get pdata from pdev */ /* non-DT devices get pdata from pdev */
dd->pdata = pdev->dev.platform_data; dd->pdata = pdev->dev.platform_data;
err: return 0;
return err;
} }
static int omap_des_probe(struct platform_device *pdev) static int omap_des_probe(struct platform_device *pdev)
@ -1062,9 +1007,6 @@ static int omap_des_probe(struct platform_device *pdev)
dd->dev = dev; dd->dev = dev;
platform_set_drvdata(pdev, dd); platform_set_drvdata(pdev, dd);
spin_lock_init(&dd->lock);
crypto_init_queue(&dd->queue, OMAP_DES_QUEUE_LENGTH);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) { if (!res) {
dev_err(dev, "no MEM resource info\n"); dev_err(dev, "no MEM resource info\n");
@ -1103,10 +1045,11 @@ static int omap_des_probe(struct platform_device *pdev)
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift); (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd); tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
tasklet_init(&dd->queue_task, omap_des_queue_task, (unsigned long)dd);
err = omap_des_dma_init(dd); err = omap_des_dma_init(dd);
if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) { if (err == -EPROBE_DEFER) {
goto err_irq;
} else if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) {
dd->pio_only = 1; dd->pio_only = 1;
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
@ -1144,17 +1087,30 @@ static int omap_des_probe(struct platform_device *pdev)
} }
} }
/* Initialize des crypto engine */
dd->engine = crypto_engine_alloc_init(dev, 1);
if (!dd->engine)
goto err_algs;
dd->engine->prepare_request = omap_des_prepare_req;
dd->engine->crypt_one_request = omap_des_crypt_req;
err = crypto_engine_start(dd->engine);
if (err)
goto err_engine;
return 0; return 0;
err_engine:
crypto_engine_exit(dd->engine);
err_algs: err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_alg( crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]); &dd->pdata->algs_info[i].algs_list[j]);
if (!dd->pio_only)
omap_des_dma_cleanup(dd); omap_des_dma_cleanup(dd);
err_irq: err_irq:
tasklet_kill(&dd->done_task); tasklet_kill(&dd->done_task);
tasklet_kill(&dd->queue_task);
err_get: err_get:
pm_runtime_disable(dev); pm_runtime_disable(dev);
err_res: err_res:
@ -1182,7 +1138,6 @@ static int omap_des_remove(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]); &dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task); tasklet_kill(&dd->done_task);
tasklet_kill(&dd->queue_task);
omap_des_dma_cleanup(dd); omap_des_dma_cleanup(dd);
pm_runtime_disable(dd->dev); pm_runtime_disable(dd->dev);
dd = NULL; dd = NULL;

View File

@ -29,7 +29,6 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/omap-dma.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
@ -219,7 +218,6 @@ struct omap_sham_dev {
int irq; int irq;
spinlock_t lock; spinlock_t lock;
int err; int err;
unsigned int dma;
struct dma_chan *dma_lch; struct dma_chan *dma_lch;
struct tasklet_struct done_task; struct tasklet_struct done_task;
u8 polling_mode; u8 polling_mode;
@ -1842,7 +1840,6 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
goto err; goto err;
} }
dd->dma = -1; /* Dummy value that's unused */
dd->pdata = match->data; dd->pdata = match->data;
err: err:
@ -1884,15 +1881,6 @@ static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
goto err; goto err;
} }
/* Get the DMA */
r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!r) {
dev_err(dev, "no DMA resource info\n");
err = -ENODEV;
goto err;
}
dd->dma = r->start;
/* Only OMAP2/3 can be non-DT */ /* Only OMAP2/3 can be non-DT */
dd->pdata = &omap_sham_pdata_omap2; dd->pdata = &omap_sham_pdata_omap2;
@ -1946,9 +1934,12 @@ static int omap_sham_probe(struct platform_device *pdev)
dma_cap_zero(mask); dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask); dma_cap_set(DMA_SLAVE, mask);
dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn, dd->dma_lch = dma_request_chan(dev, "rx");
&dd->dma, dev, "rx"); if (IS_ERR(dd->dma_lch)) {
if (!dd->dma_lch) { err = PTR_ERR(dd->dma_lch);
if (err == -EPROBE_DEFER)
goto data_err;
dd->polling_mode = 1; dd->polling_mode = 1;
dev_dbg(dev, "using polling mode instead of dma\n"); dev_dbg(dev, "using polling mode instead of dma\n");
} }
@ -1995,7 +1986,7 @@ static int omap_sham_probe(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]); &dd->pdata->algs_info[i].algs_list[j]);
err_pm: err_pm:
pm_runtime_disable(dev); pm_runtime_disable(dev);
if (dd->dma_lch) if (dd->polling_mode)
dma_release_channel(dd->dma_lch); dma_release_channel(dd->dma_lch);
data_err: data_err:
dev_err(dev, "initialization failed.\n"); dev_err(dev, "initialization failed.\n");
@ -2021,7 +2012,7 @@ static int omap_sham_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task); tasklet_kill(&dd->done_task);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
if (dd->dma_lch) if (!dd->polling_mode)
dma_release_channel(dd->dma_lch); dma_release_channel(dd->dma_lch);
return 0; return 0;

View File

@ -300,9 +300,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n"); pr_err("QAT: Driver removal failed\n");
return; return;
} }
if (adf_dev_stop(accel_dev)) adf_dev_stop(accel_dev);
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev); adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev); adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev); adf_cleanup_accel(accel_dev);

View File

@ -109,29 +109,6 @@ static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{ {
} }
static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0)) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
}
return 0;
}
static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0))
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data) void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
{ {
hw_data->dev_class = &c3xxxiov_class; hw_data->dev_class = &c3xxxiov_class;

View File

@ -238,6 +238,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret) if (ret)
goto out_err_free_reg; goto out_err_free_reg;
set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
ret = adf_dev_init(accel_dev); ret = adf_dev_init(accel_dev);
if (ret) if (ret)
goto out_err_dev_shutdown; goto out_err_dev_shutdown;
@ -270,9 +272,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n"); pr_err("QAT: Driver removal failed\n");
return; return;
} }
if (adf_dev_stop(accel_dev)) adf_dev_stop(accel_dev);
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev); adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev); adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev); adf_cleanup_pci_dev(accel_dev);

View File

@ -300,9 +300,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n"); pr_err("QAT: Driver removal failed\n");
return; return;
} }
if (adf_dev_stop(accel_dev)) adf_dev_stop(accel_dev);
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev); adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev); adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev); adf_cleanup_accel(accel_dev);

View File

@ -109,29 +109,6 @@ static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{ {
} }
static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0)) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
}
return 0;
}
static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0))
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data) void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
{ {
hw_data->dev_class = &c62xiov_class; hw_data->dev_class = &c62xiov_class;

View File

@ -238,6 +238,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret) if (ret)
goto out_err_free_reg; goto out_err_free_reg;
set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
ret = adf_dev_init(accel_dev); ret = adf_dev_init(accel_dev);
if (ret) if (ret)
goto out_err_dev_shutdown; goto out_err_dev_shutdown;
@ -270,9 +272,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n"); pr_err("QAT: Driver removal failed\n");
return; return;
} }
if (adf_dev_stop(accel_dev)) adf_dev_stop(accel_dev);
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev); adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev); adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev); adf_cleanup_pci_dev(accel_dev);

View File

@ -9,7 +9,6 @@ clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \ intel_qat-objs := adf_cfg.o \
adf_isr.o \ adf_isr.o \
adf_vf_isr.o \
adf_ctl_drv.o \ adf_ctl_drv.o \
adf_dev_mgr.o \ adf_dev_mgr.o \
adf_init.o \ adf_init.o \
@ -27,4 +26,5 @@ intel_qat-objs := adf_cfg.o \
qat_hal.o qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o \
adf_vf2pf_msg.o adf_vf_isr.o

View File

@ -61,7 +61,7 @@
#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000 #define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
#define ADF_ADMINMSG_LEN 32 #define ADF_ADMINMSG_LEN 32
static const u8 const_tab[1024] = { static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,

View File

@ -57,10 +57,8 @@
#define ADF_RING_DC_SIZE "NumConcurrentRequests" #define ADF_RING_DC_SIZE "NumConcurrentRequests"
#define ADF_RING_ASYM_TX "RingAsymTx" #define ADF_RING_ASYM_TX "RingAsymTx"
#define ADF_RING_SYM_TX "RingSymTx" #define ADF_RING_SYM_TX "RingSymTx"
#define ADF_RING_RND_TX "RingNrbgTx"
#define ADF_RING_ASYM_RX "RingAsymRx" #define ADF_RING_ASYM_RX "RingAsymRx"
#define ADF_RING_SYM_RX "RingSymRx" #define ADF_RING_SYM_RX "RingSymRx"
#define ADF_RING_RND_RX "RingNrbgRx"
#define ADF_RING_DC_TX "RingTx" #define ADF_RING_DC_TX "RingTx"
#define ADF_RING_DC_RX "RingRx" #define ADF_RING_DC_RX "RingRx"
#define ADF_ETRMGR_BANK "Bank" #define ADF_ETRMGR_BANK "Bank"

View File

@ -67,7 +67,7 @@
#define ADF_STATUS_AE_INITIALISED 4 #define ADF_STATUS_AE_INITIALISED 4
#define ADF_STATUS_AE_UCODE_LOADED 5 #define ADF_STATUS_AE_UCODE_LOADED 5
#define ADF_STATUS_AE_STARTED 6 #define ADF_STATUS_AE_STARTED 6
#define ADF_STATUS_ORPHAN_TH_RUNNING 7 #define ADF_STATUS_PF_RUNNING 7
#define ADF_STATUS_IRQ_ALLOCATED 8 #define ADF_STATUS_IRQ_ALLOCATED 8
enum adf_dev_reset_mode { enum adf_dev_reset_mode {
@ -103,7 +103,7 @@ int adf_service_unregister(struct service_hndl *service);
int adf_dev_init(struct adf_accel_dev *accel_dev); int adf_dev_init(struct adf_accel_dev *accel_dev);
int adf_dev_start(struct adf_accel_dev *accel_dev); int adf_dev_start(struct adf_accel_dev *accel_dev);
int adf_dev_stop(struct adf_accel_dev *accel_dev); void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev); void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr); int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
@ -236,8 +236,13 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask); uint32_t vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev); void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void); int adf_init_pf_wq(void);
void adf_exit_pf_wq(void); void adf_exit_pf_wq(void);
int adf_init_vf_wq(void);
void adf_exit_vf_wq(void);
#else #else
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs) static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{ {
@ -256,6 +261,15 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{ {
} }
static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
{
return 0;
}
static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
{
}
static inline int adf_init_pf_wq(void) static inline int adf_init_pf_wq(void)
{ {
return 0; return 0;
@ -264,5 +278,15 @@ static inline int adf_init_pf_wq(void)
static inline void adf_exit_pf_wq(void) static inline void adf_exit_pf_wq(void)
{ {
} }
static inline int adf_init_vf_wq(void)
{
return 0;
}
static inline void adf_exit_vf_wq(void)
{
}
#endif #endif
#endif #endif

View File

@ -270,26 +270,33 @@ static int adf_ctl_is_device_in_use(int id)
return 0; return 0;
} }
static int adf_ctl_stop_devices(uint32_t id) static void adf_ctl_stop_devices(uint32_t id)
{ {
struct adf_accel_dev *accel_dev; struct adf_accel_dev *accel_dev;
int ret = 0;
list_for_each_entry_reverse(accel_dev, adf_devmgr_get_head(), list) { list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) { if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (!adf_dev_started(accel_dev)) if (!adf_dev_started(accel_dev))
continue; continue;
if (adf_dev_stop(accel_dev)) { /* First stop all VFs */
dev_err(&GET_DEV(accel_dev), if (!accel_dev->is_vf)
"Failed to stop qat_dev%d\n", id); continue;
ret = -EFAULT;
} else { adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev); adf_dev_shutdown(accel_dev);
} }
} }
list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (!adf_dev_started(accel_dev))
continue;
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
}
} }
return ret;
} }
static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd, static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
@ -318,9 +325,8 @@ static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
pr_info("QAT: Stopping acceleration device qat_dev%d.\n", pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
ctl_data->device_id); ctl_data->device_id);
ret = adf_ctl_stop_devices(ctl_data->device_id); adf_ctl_stop_devices(ctl_data->device_id);
if (ret)
pr_err("QAT: failed to stop device.\n");
out: out:
kfree(ctl_data); kfree(ctl_data);
return ret; return ret;
@ -465,12 +471,17 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_init_pf_wq()) if (adf_init_pf_wq())
goto err_pf_wq; goto err_pf_wq;
if (adf_init_vf_wq())
goto err_vf_wq;
if (qat_crypto_register()) if (qat_crypto_register())
goto err_crypto_register; goto err_crypto_register;
return 0; return 0;
err_crypto_register: err_crypto_register:
adf_exit_vf_wq();
err_vf_wq:
adf_exit_pf_wq(); adf_exit_pf_wq();
err_pf_wq: err_pf_wq:
adf_exit_aer(); adf_exit_aer();
@ -485,6 +496,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
{ {
adf_chr_drv_destroy(); adf_chr_drv_destroy();
adf_exit_aer(); adf_exit_aer();
adf_exit_vf_wq();
adf_exit_pf_wq(); adf_exit_pf_wq();
qat_crypto_unregister(); qat_crypto_unregister();
adf_clean_vf_map(false); adf_clean_vf_map(false);

View File

@ -236,9 +236,9 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
* is shuting down. * is shuting down.
* To be used by QAT device specific drivers. * To be used by QAT device specific drivers.
* *
* Return: 0 on success, error code otherwise. * Return: void
*/ */
int adf_dev_stop(struct adf_accel_dev *accel_dev) void adf_dev_stop(struct adf_accel_dev *accel_dev)
{ {
struct service_hndl *service; struct service_hndl *service;
struct list_head *list_itr; struct list_head *list_itr;
@ -246,9 +246,9 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
int ret; int ret;
if (!adf_dev_started(accel_dev) && if (!adf_dev_started(accel_dev) &&
!test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
return 0; return;
}
clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
@ -279,8 +279,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
else else
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
} }
return 0;
} }
EXPORT_SYMBOL_GPL(adf_dev_stop); EXPORT_SYMBOL_GPL(adf_dev_stop);
@ -329,6 +327,8 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, &service->init_status); clear_bit(accel_dev->accel_id, &service->init_status);
} }
hw_data->disable_iov(accel_dev);
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
hw_data->free_irq(accel_dev); hw_data->free_irq(accel_dev);
clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
@ -344,7 +344,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (hw_data->exit_admin_comms) if (hw_data->exit_admin_comms)
hw_data->exit_admin_comms(accel_dev); hw_data->exit_admin_comms(accel_dev);
hw_data->disable_iov(accel_dev);
adf_cleanup_etr_data(accel_dev); adf_cleanup_etr_data(accel_dev);
adf_dev_restore(accel_dev); adf_dev_restore(accel_dev);
} }

View File

@ -302,7 +302,7 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
} }
/** /**
* adf_vf_isr_resource_free() - Free IRQ for acceleration device * adf_isr_resource_free() - Free IRQ for acceleration device
* @accel_dev: Pointer to acceleration device. * @accel_dev: Pointer to acceleration device.
* *
* Function frees interrupts for acceleration device. * Function frees interrupts for acceleration device.
@ -317,7 +317,7 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
EXPORT_SYMBOL_GPL(adf_isr_resource_free); EXPORT_SYMBOL_GPL(adf_isr_resource_free);
/** /**
* adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
* @accel_dev: Pointer to acceleration device. * @accel_dev: Pointer to acceleration device.
* *
* Function allocates interrupts for acceleration device. * Function allocates interrupts for acceleration device.

View File

@ -249,13 +249,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return -EBUSY; return -EBUSY;
} }
if (adf_dev_stop(accel_dev)) { adf_dev_stop(accel_dev);
dev_err(&GET_DEV(accel_dev),
"Failed to stop qat_dev%d\n",
accel_dev->accel_id);
return -EFAULT;
}
adf_dev_shutdown(accel_dev); adf_dev_shutdown(accel_dev);
} }

View File

@ -0,0 +1,92 @@
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2015 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pf2vf_msg.h"
/**
* adf_vf2pf_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends an init messge from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0)) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
}
set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
return 0;
}
EXPORT_SYMBOL_GPL(adf_vf2pf_init);
/**
* adf_vf2pf_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends a shutdown messge from the VF to a PF
*
* Return: void
*/
void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
if (adf_iov_putmsg(accel_dev, msg, 0))
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);

View File

@ -51,6 +51,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/workqueue.h>
#include "adf_accel_devices.h" #include "adf_accel_devices.h"
#include "adf_common_drv.h" #include "adf_common_drv.h"
#include "adf_cfg.h" #include "adf_cfg.h"
@ -64,6 +65,13 @@
#define ADF_VINTSOU_BUN BIT(0) #define ADF_VINTSOU_BUN BIT(0)
#define ADF_VINTSOU_PF2VF BIT(1) #define ADF_VINTSOU_PF2VF BIT(1)
static struct workqueue_struct *adf_vf_stop_wq;
struct adf_vf_stop_data {
struct adf_accel_dev *accel_dev;
struct work_struct work;
};
static int adf_enable_msi(struct adf_accel_dev *accel_dev) static int adf_enable_msi(struct adf_accel_dev *accel_dev)
{ {
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@ -90,6 +98,20 @@ static void adf_disable_msi(struct adf_accel_dev *accel_dev)
pci_disable_msi(pdev); pci_disable_msi(pdev);
} }
static void adf_dev_stop_async(struct work_struct *work)
{
struct adf_vf_stop_data *stop_data =
container_of(work, struct adf_vf_stop_data, work);
struct adf_accel_dev *accel_dev = stop_data->accel_dev;
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
kfree(stop_data);
}
static void adf_pf2vf_bh_handler(void *data) static void adf_pf2vf_bh_handler(void *data)
{ {
struct adf_accel_dev *accel_dev = data; struct adf_accel_dev *accel_dev = data;
@ -107,11 +129,29 @@ static void adf_pf2vf_bh_handler(void *data)
goto err; goto err;
switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) { switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
case ADF_PF2VF_MSGTYPE_RESTARTING: case ADF_PF2VF_MSGTYPE_RESTARTING: {
struct adf_vf_stop_data *stop_data;
dev_dbg(&GET_DEV(accel_dev), dev_dbg(&GET_DEV(accel_dev),
"Restarting msg received from PF 0x%x\n", msg); "Restarting msg received from PF 0x%x\n", msg);
adf_dev_stop(accel_dev);
break; clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
if (!stop_data) {
dev_err(&GET_DEV(accel_dev),
"Couldn't schedule stop for vf_%d\n",
accel_dev->accel_id);
return;
}
stop_data->accel_dev = accel_dev;
INIT_WORK(&stop_data->work, adf_dev_stop_async);
queue_work(adf_vf_stop_wq, &stop_data->work);
/* To ack, clear the PF2VFINT bit */
msg &= ~BIT(0);
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
return;
}
case ADF_PF2VF_MSGTYPE_VERSION_RESP: case ADF_PF2VF_MSGTYPE_VERSION_RESP:
dev_dbg(&GET_DEV(accel_dev), dev_dbg(&GET_DEV(accel_dev),
"Version resp received from PF 0x%x\n", msg); "Version resp received from PF 0x%x\n", msg);
@ -278,3 +318,18 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
return -EFAULT; return -EFAULT;
} }
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
int __init adf_init_vf_wq(void)
{
adf_vf_stop_wq = create_workqueue("adf_vf_stop_wq");
return !adf_vf_stop_wq ? -EFAULT : 0;
}
void adf_exit_vf_wq(void)
{
if (adf_vf_stop_wq)
destroy_workqueue(adf_vf_stop_wq);
adf_vf_stop_wq = NULL;
}

View File

@ -593,7 +593,7 @@ int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
ret = -ENOMEM; ret = -ENOMEM;
ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
if (!ctx->n) if (!ctx->d)
goto err; goto err;
memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
@ -711,7 +711,7 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
} }
qat_crypto_put_instance(ctx->inst); qat_crypto_put_instance(ctx->inst);
ctx->n = NULL; ctx->n = NULL;
ctx->d = NULL; ctx->e = NULL;
ctx->d = NULL; ctx->d = NULL;
} }

View File

@ -302,9 +302,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n"); pr_err("QAT: Driver removal failed\n");
return; return;
} }
if (adf_dev_stop(accel_dev)) adf_dev_stop(accel_dev);
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev); adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev); adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev); adf_cleanup_accel(accel_dev);

View File

@ -109,29 +109,6 @@ static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{ {
} }
static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0)) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
}
return 0;
}
static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_putmsg(accel_dev, msg, 0))
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data) void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
{ {
hw_data->dev_class = &dh895xcciov_class; hw_data->dev_class = &dh895xcciov_class;

View File

@ -238,6 +238,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret) if (ret)
goto out_err_free_reg; goto out_err_free_reg;
set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
ret = adf_dev_init(accel_dev); ret = adf_dev_init(accel_dev);
if (ret) if (ret)
goto out_err_dev_shutdown; goto out_err_dev_shutdown;
@ -270,9 +272,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n"); pr_err("QAT: Driver removal failed\n");
return; return;
} }
if (adf_dev_stop(accel_dev)) adf_dev_stop(accel_dev);
dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
adf_dev_shutdown(accel_dev); adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev); adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev); adf_cleanup_pci_dev(accel_dev);

View File

@ -11,65 +11,64 @@
* *
*/ */
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/crypto.h>
#include <linux/interrupt.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#define _SBF(s, v) ((v) << (s)) #define _SBF(s, v) ((v) << (s))
#define _BIT(b) _SBF(b, 1)
/* Feed control registers */ /* Feed control registers */
#define SSS_REG_FCINTSTAT 0x0000 #define SSS_REG_FCINTSTAT 0x0000
#define SSS_FCINTSTAT_BRDMAINT _BIT(3) #define SSS_FCINTSTAT_BRDMAINT BIT(3)
#define SSS_FCINTSTAT_BTDMAINT _BIT(2) #define SSS_FCINTSTAT_BTDMAINT BIT(2)
#define SSS_FCINTSTAT_HRDMAINT _BIT(1) #define SSS_FCINTSTAT_HRDMAINT BIT(1)
#define SSS_FCINTSTAT_PKDMAINT _BIT(0) #define SSS_FCINTSTAT_PKDMAINT BIT(0)
#define SSS_REG_FCINTENSET 0x0004 #define SSS_REG_FCINTENSET 0x0004
#define SSS_FCINTENSET_BRDMAINTENSET _BIT(3) #define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
#define SSS_FCINTENSET_BTDMAINTENSET _BIT(2) #define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
#define SSS_FCINTENSET_HRDMAINTENSET _BIT(1) #define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
#define SSS_FCINTENSET_PKDMAINTENSET _BIT(0) #define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
#define SSS_REG_FCINTENCLR 0x0008 #define SSS_REG_FCINTENCLR 0x0008
#define SSS_FCINTENCLR_BRDMAINTENCLR _BIT(3) #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
#define SSS_FCINTENCLR_BTDMAINTENCLR _BIT(2) #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
#define SSS_FCINTENCLR_HRDMAINTENCLR _BIT(1) #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
#define SSS_FCINTENCLR_PKDMAINTENCLR _BIT(0) #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
#define SSS_REG_FCINTPEND 0x000C #define SSS_REG_FCINTPEND 0x000C
#define SSS_FCINTPEND_BRDMAINTP _BIT(3) #define SSS_FCINTPEND_BRDMAINTP BIT(3)
#define SSS_FCINTPEND_BTDMAINTP _BIT(2) #define SSS_FCINTPEND_BTDMAINTP BIT(2)
#define SSS_FCINTPEND_HRDMAINTP _BIT(1) #define SSS_FCINTPEND_HRDMAINTP BIT(1)
#define SSS_FCINTPEND_PKDMAINTP _BIT(0) #define SSS_FCINTPEND_PKDMAINTP BIT(0)
#define SSS_REG_FCFIFOSTAT 0x0010 #define SSS_REG_FCFIFOSTAT 0x0010
#define SSS_FCFIFOSTAT_BRFIFOFUL _BIT(7) #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
#define SSS_FCFIFOSTAT_BRFIFOEMP _BIT(6) #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
#define SSS_FCFIFOSTAT_BTFIFOFUL _BIT(5) #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
#define SSS_FCFIFOSTAT_BTFIFOEMP _BIT(4) #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
#define SSS_FCFIFOSTAT_HRFIFOFUL _BIT(3) #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
#define SSS_FCFIFOSTAT_HRFIFOEMP _BIT(2) #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
#define SSS_FCFIFOSTAT_PKFIFOFUL _BIT(1) #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
#define SSS_FCFIFOSTAT_PKFIFOEMP _BIT(0) #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
#define SSS_REG_FCFIFOCTRL 0x0014 #define SSS_REG_FCFIFOCTRL 0x0014
#define SSS_FCFIFOCTRL_DESSEL _BIT(2) #define SSS_FCFIFOCTRL_DESSEL BIT(2)
#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00) #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01) #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02) #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
@ -77,52 +76,52 @@
#define SSS_REG_FCBRDMAS 0x0020 #define SSS_REG_FCBRDMAS 0x0020
#define SSS_REG_FCBRDMAL 0x0024 #define SSS_REG_FCBRDMAL 0x0024
#define SSS_REG_FCBRDMAC 0x0028 #define SSS_REG_FCBRDMAC 0x0028
#define SSS_FCBRDMAC_BYTESWAP _BIT(1) #define SSS_FCBRDMAC_BYTESWAP BIT(1)
#define SSS_FCBRDMAC_FLUSH _BIT(0) #define SSS_FCBRDMAC_FLUSH BIT(0)
#define SSS_REG_FCBTDMAS 0x0030 #define SSS_REG_FCBTDMAS 0x0030
#define SSS_REG_FCBTDMAL 0x0034 #define SSS_REG_FCBTDMAL 0x0034
#define SSS_REG_FCBTDMAC 0x0038 #define SSS_REG_FCBTDMAC 0x0038
#define SSS_FCBTDMAC_BYTESWAP _BIT(1) #define SSS_FCBTDMAC_BYTESWAP BIT(1)
#define SSS_FCBTDMAC_FLUSH _BIT(0) #define SSS_FCBTDMAC_FLUSH BIT(0)
#define SSS_REG_FCHRDMAS 0x0040 #define SSS_REG_FCHRDMAS 0x0040
#define SSS_REG_FCHRDMAL 0x0044 #define SSS_REG_FCHRDMAL 0x0044
#define SSS_REG_FCHRDMAC 0x0048 #define SSS_REG_FCHRDMAC 0x0048
#define SSS_FCHRDMAC_BYTESWAP _BIT(1) #define SSS_FCHRDMAC_BYTESWAP BIT(1)
#define SSS_FCHRDMAC_FLUSH _BIT(0) #define SSS_FCHRDMAC_FLUSH BIT(0)
#define SSS_REG_FCPKDMAS 0x0050 #define SSS_REG_FCPKDMAS 0x0050
#define SSS_REG_FCPKDMAL 0x0054 #define SSS_REG_FCPKDMAL 0x0054
#define SSS_REG_FCPKDMAC 0x0058 #define SSS_REG_FCPKDMAC 0x0058
#define SSS_FCPKDMAC_BYTESWAP _BIT(3) #define SSS_FCPKDMAC_BYTESWAP BIT(3)
#define SSS_FCPKDMAC_DESCEND _BIT(2) #define SSS_FCPKDMAC_DESCEND BIT(2)
#define SSS_FCPKDMAC_TRANSMIT _BIT(1) #define SSS_FCPKDMAC_TRANSMIT BIT(1)
#define SSS_FCPKDMAC_FLUSH _BIT(0) #define SSS_FCPKDMAC_FLUSH BIT(0)
#define SSS_REG_FCPKDMAO 0x005C #define SSS_REG_FCPKDMAO 0x005C
/* AES registers */ /* AES registers */
#define SSS_REG_AES_CONTROL 0x00 #define SSS_REG_AES_CONTROL 0x00
#define SSS_AES_BYTESWAP_DI _BIT(11) #define SSS_AES_BYTESWAP_DI BIT(11)
#define SSS_AES_BYTESWAP_DO _BIT(10) #define SSS_AES_BYTESWAP_DO BIT(10)
#define SSS_AES_BYTESWAP_IV _BIT(9) #define SSS_AES_BYTESWAP_IV BIT(9)
#define SSS_AES_BYTESWAP_CNT _BIT(8) #define SSS_AES_BYTESWAP_CNT BIT(8)
#define SSS_AES_BYTESWAP_KEY _BIT(7) #define SSS_AES_BYTESWAP_KEY BIT(7)
#define SSS_AES_KEY_CHANGE_MODE _BIT(6) #define SSS_AES_KEY_CHANGE_MODE BIT(6)
#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00) #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01) #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02) #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
#define SSS_AES_FIFO_MODE _BIT(3) #define SSS_AES_FIFO_MODE BIT(3)
#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00) #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01) #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02) #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
#define SSS_AES_MODE_DECRYPT _BIT(0) #define SSS_AES_MODE_DECRYPT BIT(0)
#define SSS_REG_AES_STATUS 0x04 #define SSS_REG_AES_STATUS 0x04
#define SSS_AES_BUSY _BIT(2) #define SSS_AES_BUSY BIT(2)
#define SSS_AES_INPUT_READY _BIT(1) #define SSS_AES_INPUT_READY BIT(1)
#define SSS_AES_OUTPUT_READY _BIT(0) #define SSS_AES_OUTPUT_READY BIT(0)
#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2)) #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2)) #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
@ -139,7 +138,7 @@
SSS_AES_REG(dev, reg)) SSS_AES_REG(dev, reg))
/* HW engine modes */ /* HW engine modes */
#define FLAGS_AES_DECRYPT _BIT(0) #define FLAGS_AES_DECRYPT BIT(0)
#define FLAGS_AES_MODE_MASK _SBF(1, 0x03) #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
#define FLAGS_AES_CBC _SBF(1, 0x01) #define FLAGS_AES_CBC _SBF(1, 0x01)
#define FLAGS_AES_CTR _SBF(1, 0x02) #define FLAGS_AES_CTR _SBF(1, 0x02)
@ -149,7 +148,6 @@
/** /**
* struct samsung_aes_variant - platform specific SSS driver data * struct samsung_aes_variant - platform specific SSS driver data
* @has_hash_irq: true if SSS module uses hash interrupt, false otherwise
* @aes_offset: AES register offset from SSS module's base. * @aes_offset: AES register offset from SSS module's base.
* *
* Specifies platform specific configuration of SSS module. * Specifies platform specific configuration of SSS module.
@ -157,7 +155,6 @@
* expansion of its usage. * expansion of its usage.
*/ */
struct samsung_aes_variant { struct samsung_aes_variant {
bool has_hash_irq;
unsigned int aes_offset; unsigned int aes_offset;
}; };
@ -178,7 +175,6 @@ struct s5p_aes_dev {
struct clk *clk; struct clk *clk;
void __iomem *ioaddr; void __iomem *ioaddr;
void __iomem *aes_ioaddr; void __iomem *aes_ioaddr;
int irq_hash;
int irq_fc; int irq_fc;
struct ablkcipher_request *req; struct ablkcipher_request *req;
@ -186,6 +182,10 @@ struct s5p_aes_dev {
struct scatterlist *sg_src; struct scatterlist *sg_src;
struct scatterlist *sg_dst; struct scatterlist *sg_dst;
/* In case of unaligned access: */
struct scatterlist *sg_src_cpy;
struct scatterlist *sg_dst_cpy;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
struct crypto_queue queue; struct crypto_queue queue;
bool busy; bool busy;
@ -197,12 +197,10 @@ struct s5p_aes_dev {
static struct s5p_aes_dev *s5p_dev; static struct s5p_aes_dev *s5p_dev;
static const struct samsung_aes_variant s5p_aes_data = { static const struct samsung_aes_variant s5p_aes_data = {
.has_hash_irq = true,
.aes_offset = 0x4000, .aes_offset = 0x4000,
}; };
static const struct samsung_aes_variant exynos_aes_data = { static const struct samsung_aes_variant exynos_aes_data = {
.has_hash_irq = false,
.aes_offset = 0x200, .aes_offset = 0x200,
}; };
@ -245,8 +243,45 @@ static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg)); SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
} }
static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
{
int len;
if (!*sg)
return;
len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
free_pages((unsigned long)sg_virt(*sg), get_order(len));
kfree(*sg);
*sg = NULL;
}
static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
unsigned int nbytes, int out)
{
struct scatter_walk walk;
if (!nbytes)
return;
scatterwalk_start(&walk, sg);
scatterwalk_copychunks(buf, &walk, nbytes, out);
scatterwalk_done(&walk, out, 0);
}
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err) static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
{ {
if (dev->sg_dst_cpy) {
dev_dbg(dev->dev,
"Copying %d bytes of output data back to original place\n",
dev->req->nbytes);
s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
dev->req->nbytes, 1);
}
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
/* holding a lock outside */ /* holding a lock outside */
dev->req->base.complete(&dev->req->base, err); dev->req->base.complete(&dev->req->base, err);
dev->busy = false; dev->busy = false;
@ -262,15 +297,37 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev)
dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE); dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
} }
static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
struct scatterlist **dst)
{
void *pages;
int len;
*dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
if (!*dst)
return -ENOMEM;
len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
if (!pages) {
kfree(*dst);
*dst = NULL;
return -ENOMEM;
}
s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
sg_init_table(*dst, 1);
sg_set_buf(*dst, pages, len);
return 0;
}
static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{ {
int err; int err;
if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) { if (!sg->length) {
err = -EINVAL;
goto exit;
}
if (!sg_dma_len(sg)) {
err = -EINVAL; err = -EINVAL;
goto exit; goto exit;
} }
@ -284,7 +341,7 @@ static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
dev->sg_dst = sg; dev->sg_dst = sg;
err = 0; err = 0;
exit: exit:
return err; return err;
} }
@ -292,11 +349,7 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{ {
int err; int err;
if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) { if (!sg->length) {
err = -EINVAL;
goto exit;
}
if (!sg_dma_len(sg)) {
err = -EINVAL; err = -EINVAL;
goto exit; goto exit;
} }
@ -310,47 +363,59 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
dev->sg_src = sg; dev->sg_src = sg;
err = 0; err = 0;
exit: exit:
return err; return err;
} }
static void s5p_aes_tx(struct s5p_aes_dev *dev) /*
* Returns true if new transmitting (output) data is ready and its
* address+length have to be written to device (by calling
* s5p_set_dma_outdata()). False otherwise.
*/
static bool s5p_aes_tx(struct s5p_aes_dev *dev)
{ {
int err = 0; int err = 0;
bool ret = false;
s5p_unset_outdata(dev); s5p_unset_outdata(dev);
if (!sg_is_last(dev->sg_dst)) { if (!sg_is_last(dev->sg_dst)) {
err = s5p_set_outdata(dev, sg_next(dev->sg_dst)); err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
if (err) { if (err)
s5p_aes_complete(dev, err); s5p_aes_complete(dev, err);
return; else
} ret = true;
s5p_set_dma_outdata(dev, dev->sg_dst);
} else { } else {
s5p_aes_complete(dev, err); s5p_aes_complete(dev, err);
dev->busy = true; dev->busy = true;
tasklet_schedule(&dev->tasklet); tasklet_schedule(&dev->tasklet);
} }
return ret;
} }
static void s5p_aes_rx(struct s5p_aes_dev *dev) /*
* Returns true if new receiving (input) data is ready and its
* address+length have to be written to device (by calling
* s5p_set_dma_indata()). False otherwise.
*/
static bool s5p_aes_rx(struct s5p_aes_dev *dev)
{ {
int err; int err;
bool ret = false;
s5p_unset_indata(dev); s5p_unset_indata(dev);
if (!sg_is_last(dev->sg_src)) { if (!sg_is_last(dev->sg_src)) {
err = s5p_set_indata(dev, sg_next(dev->sg_src)); err = s5p_set_indata(dev, sg_next(dev->sg_src));
if (err) { if (err)
s5p_aes_complete(dev, err); s5p_aes_complete(dev, err);
return; else
ret = true;
} }
s5p_set_dma_indata(dev, dev->sg_src); return ret;
}
} }
static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id) static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
@ -359,18 +424,29 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
struct s5p_aes_dev *dev = platform_get_drvdata(pdev); struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
uint32_t status; uint32_t status;
unsigned long flags; unsigned long flags;
bool set_dma_tx = false;
bool set_dma_rx = false;
spin_lock_irqsave(&dev->lock, flags); spin_lock_irqsave(&dev->lock, flags);
if (irq == dev->irq_fc) {
status = SSS_READ(dev, FCINTSTAT); status = SSS_READ(dev, FCINTSTAT);
if (status & SSS_FCINTSTAT_BRDMAINT) if (status & SSS_FCINTSTAT_BRDMAINT)
s5p_aes_rx(dev); set_dma_rx = s5p_aes_rx(dev);
if (status & SSS_FCINTSTAT_BTDMAINT) if (status & SSS_FCINTSTAT_BTDMAINT)
s5p_aes_tx(dev); set_dma_tx = s5p_aes_tx(dev);
SSS_WRITE(dev, FCINTPEND, status); SSS_WRITE(dev, FCINTPEND, status);
}
/*
* Writing length of DMA block (either receiving or transmitting)
* will start the operation immediately, so this should be done
* at the end (even after clearing pending interrupts to not miss the
* interrupt).
*/
if (set_dma_tx)
s5p_set_dma_outdata(dev, dev->sg_dst);
if (set_dma_rx)
s5p_set_dma_indata(dev, dev->sg_src);
spin_unlock_irqrestore(&dev->lock, flags); spin_unlock_irqrestore(&dev->lock, flags);
@ -395,6 +471,71 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
memcpy_toio(keystart, key, keylen); memcpy_toio(keystart, key, keylen);
} }
static bool s5p_is_sg_aligned(struct scatterlist *sg)
{
while (sg) {
if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
return false;
sg = sg_next(sg);
}
return true;
}
static int s5p_set_indata_start(struct s5p_aes_dev *dev,
struct ablkcipher_request *req)
{
struct scatterlist *sg;
int err;
dev->sg_src_cpy = NULL;
sg = req->src;
if (!s5p_is_sg_aligned(sg)) {
dev_dbg(dev->dev,
"At least one unaligned source scatter list, making a copy\n");
err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
if (err)
return err;
sg = dev->sg_src_cpy;
}
err = s5p_set_indata(dev, sg);
if (err) {
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
return err;
}
return 0;
}
static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
struct ablkcipher_request *req)
{
struct scatterlist *sg;
int err;
dev->sg_dst_cpy = NULL;
sg = req->dst;
if (!s5p_is_sg_aligned(sg)) {
dev_dbg(dev->dev,
"At least one unaligned dest scatter list, making a copy\n");
err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
if (err)
return err;
sg = dev->sg_dst_cpy;
}
err = s5p_set_outdata(dev, sg);
if (err) {
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
return err;
}
return 0;
}
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode) static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{ {
struct ablkcipher_request *req = dev->req; struct ablkcipher_request *req = dev->req;
@ -431,19 +572,19 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR); SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
SSS_WRITE(dev, FCFIFOCTRL, 0x00); SSS_WRITE(dev, FCFIFOCTRL, 0x00);
err = s5p_set_indata(dev, req->src); err = s5p_set_indata_start(dev, req);
if (err) if (err)
goto indata_error; goto indata_error;
err = s5p_set_outdata(dev, req->dst); err = s5p_set_outdata_start(dev, req);
if (err) if (err)
goto outdata_error; goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control); SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen); s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
s5p_set_dma_indata(dev, req->src); s5p_set_dma_indata(dev, dev->sg_src);
s5p_set_dma_outdata(dev, req->dst); s5p_set_dma_outdata(dev, dev->sg_dst);
SSS_WRITE(dev, FCINTENSET, SSS_WRITE(dev, FCINTENSET,
SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET); SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
@ -452,10 +593,10 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
return; return;
outdata_error: outdata_error:
s5p_unset_indata(dev); s5p_unset_indata(dev);
indata_error: indata_error:
s5p_aes_complete(dev, err); s5p_aes_complete(dev, err);
spin_unlock_irqrestore(&dev->lock, flags); spin_unlock_irqrestore(&dev->lock, flags);
} }
@ -506,7 +647,7 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
tasklet_schedule(&dev->tasklet); tasklet_schedule(&dev->tasklet);
exit: exit:
return err; return err;
} }
@ -671,21 +812,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_irq; goto err_irq;
} }
if (variant->has_hash_irq) {
pdata->irq_hash = platform_get_irq(pdev, 1);
if (pdata->irq_hash < 0) {
err = pdata->irq_hash;
dev_warn(dev, "hash interrupt is not available.\n");
goto err_irq;
}
err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
IRQF_SHARED, pdev->name, pdev);
if (err < 0) {
dev_warn(dev, "hash interrupt is not available.\n");
goto err_irq;
}
}
pdata->busy = false; pdata->busy = false;
pdata->variant = variant; pdata->variant = variant;
pdata->dev = dev; pdata->dev = dev;
@ -705,7 +831,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
return 0; return 0;
err_algs: err_algs:
dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err); dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
@ -713,7 +839,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
tasklet_kill(&pdata->tasklet); tasklet_kill(&pdata->tasklet);
err_irq: err_irq:
clk_disable_unprepare(pdata->clk); clk_disable_unprepare(pdata->clk);
s5p_dev = NULL; s5p_dev = NULL;

View File

@ -35,6 +35,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
unsigned int todo; unsigned int todo;
struct sg_mapping_iter mi, mo; struct sg_mapping_iter mi, mo;
unsigned int oi, oo; /* offset for in and out */ unsigned int oi, oo; /* offset for in and out */
unsigned long flags;
if (areq->nbytes == 0) if (areq->nbytes == 0)
return 0; return 0;
@ -49,7 +50,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
return -EINVAL; return -EINVAL;
} }
spin_lock_bh(&ss->slock); spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4) for (i = 0; i < op->keylen; i += 4)
writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
@ -117,7 +118,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
sg_miter_stop(&mi); sg_miter_stop(&mi);
sg_miter_stop(&mo); sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL); writel(0, ss->base + SS_CTL);
spin_unlock_bh(&ss->slock); spin_unlock_irqrestore(&ss->slock, flags);
return err; return err;
} }
@ -149,6 +150,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
unsigned int ob = 0; /* offset in buf */ unsigned int ob = 0; /* offset in buf */
unsigned int obo = 0; /* offset in bufo*/ unsigned int obo = 0; /* offset in bufo*/
unsigned int obl = 0; /* length of data in bufo */ unsigned int obl = 0; /* length of data in bufo */
unsigned long flags;
if (areq->nbytes == 0) if (areq->nbytes == 0)
return 0; return 0;
@ -181,7 +183,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
if (no_chunk == 1) if (no_chunk == 1)
return sun4i_ss_opti_poll(areq); return sun4i_ss_opti_poll(areq);
spin_lock_bh(&ss->slock); spin_lock_irqsave(&ss->slock, flags);
for (i = 0; i < op->keylen; i += 4) for (i = 0; i < op->keylen; i += 4)
writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
@ -307,7 +309,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
sg_miter_stop(&mi); sg_miter_stop(&mi);
sg_miter_stop(&mo); sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL); writel(0, ss->base + SS_CTL);
spin_unlock_bh(&ss->slock); spin_unlock_irqrestore(&ss->slock, flags);
return err; return err;
} }

View File

@ -835,6 +835,16 @@ struct talitos_ahash_req_ctx {
struct scatterlist *psrc; struct scatterlist *psrc;
}; };
struct talitos_export_state {
u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
u8 buf[HASH_MAX_BLOCK_SIZE];
unsigned int swinit;
unsigned int first;
unsigned int last;
unsigned int to_hash_later;
unsigned int nbuf;
};
static int aead_setkey(struct crypto_aead *authenc, static int aead_setkey(struct crypto_aead *authenc,
const u8 *key, unsigned int keylen) const u8 *key, unsigned int keylen)
{ {
@ -1981,6 +1991,46 @@ static int ahash_digest(struct ahash_request *areq)
return ahash_process_req(areq, areq->nbytes); return ahash_process_req(areq, areq->nbytes);
} }
static int ahash_export(struct ahash_request *areq, void *out)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct talitos_export_state *export = out;
memcpy(export->hw_context, req_ctx->hw_context,
req_ctx->hw_context_size);
memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
export->swinit = req_ctx->swinit;
export->first = req_ctx->first;
export->last = req_ctx->last;
export->to_hash_later = req_ctx->to_hash_later;
export->nbuf = req_ctx->nbuf;
return 0;
}
static int ahash_import(struct ahash_request *areq, const void *in)
{
struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
const struct talitos_export_state *export = in;
memset(req_ctx, 0, sizeof(*req_ctx));
req_ctx->hw_context_size =
(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
memcpy(req_ctx->hw_context, export->hw_context,
req_ctx->hw_context_size);
memcpy(req_ctx->buf, export->buf, export->nbuf);
req_ctx->swinit = export->swinit;
req_ctx->first = export->first;
req_ctx->last = export->last;
req_ctx->to_hash_later = export->to_hash_later;
req_ctx->nbuf = export->nbuf;
return 0;
}
struct keyhash_result { struct keyhash_result {
struct completion completion; struct completion completion;
int err; int err;
@ -2458,6 +2508,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = MD5_DIGEST_SIZE, .halg.digestsize = MD5_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "md5", .cra_name = "md5",
.cra_driver_name = "md5-talitos", .cra_driver_name = "md5-talitos",
@ -2473,6 +2524,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = SHA1_DIGEST_SIZE, .halg.digestsize = SHA1_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "sha1", .cra_name = "sha1",
.cra_driver_name = "sha1-talitos", .cra_driver_name = "sha1-talitos",
@ -2488,6 +2540,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = SHA224_DIGEST_SIZE, .halg.digestsize = SHA224_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "sha224", .cra_name = "sha224",
.cra_driver_name = "sha224-talitos", .cra_driver_name = "sha224-talitos",
@ -2503,6 +2556,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = SHA256_DIGEST_SIZE, .halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "sha256", .cra_name = "sha256",
.cra_driver_name = "sha256-talitos", .cra_driver_name = "sha256-talitos",
@ -2518,6 +2572,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = SHA384_DIGEST_SIZE, .halg.digestsize = SHA384_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "sha384", .cra_name = "sha384",
.cra_driver_name = "sha384-talitos", .cra_driver_name = "sha384-talitos",
@ -2533,6 +2588,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = SHA512_DIGEST_SIZE, .halg.digestsize = SHA512_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "sha512", .cra_name = "sha512",
.cra_driver_name = "sha512-talitos", .cra_driver_name = "sha512-talitos",
@ -2548,6 +2604,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = MD5_DIGEST_SIZE, .halg.digestsize = MD5_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "hmac(md5)", .cra_name = "hmac(md5)",
.cra_driver_name = "hmac-md5-talitos", .cra_driver_name = "hmac-md5-talitos",
@ -2563,6 +2620,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = SHA1_DIGEST_SIZE, .halg.digestsize = SHA1_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "hmac(sha1)", .cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-talitos", .cra_driver_name = "hmac-sha1-talitos",
@ -2578,6 +2636,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = SHA224_DIGEST_SIZE, .halg.digestsize = SHA224_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "hmac(sha224)", .cra_name = "hmac(sha224)",
.cra_driver_name = "hmac-sha224-talitos", .cra_driver_name = "hmac-sha224-talitos",
@ -2593,6 +2652,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = SHA256_DIGEST_SIZE, .halg.digestsize = SHA256_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "hmac(sha256)", .cra_name = "hmac(sha256)",
.cra_driver_name = "hmac-sha256-talitos", .cra_driver_name = "hmac-sha256-talitos",
@ -2608,6 +2668,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = SHA384_DIGEST_SIZE, .halg.digestsize = SHA384_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "hmac(sha384)", .cra_name = "hmac(sha384)",
.cra_driver_name = "hmac-sha384-talitos", .cra_driver_name = "hmac-sha384-talitos",
@ -2623,6 +2684,7 @@ static struct talitos_alg_template driver_algs[] = {
{ .type = CRYPTO_ALG_TYPE_AHASH, { .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = { .alg.hash = {
.halg.digestsize = SHA512_DIGEST_SIZE, .halg.digestsize = SHA512_DIGEST_SIZE,
.halg.statesize = sizeof(struct talitos_export_state),
.halg.base = { .halg.base = {
.cra_name = "hmac(sha512)", .cra_name = "hmac(sha512)",
.cra_driver_name = "hmac-sha512-talitos", .cra_driver_name = "hmac-sha512-talitos",
@ -2814,6 +2876,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt.alg.hash.finup = ahash_finup; t_alg->algt.alg.hash.finup = ahash_finup;
t_alg->algt.alg.hash.digest = ahash_digest; t_alg->algt.alg.hash.digest = ahash_digest;
t_alg->algt.alg.hash.setkey = ahash_setkey; t_alg->algt.alg.hash.setkey = ahash_setkey;
t_alg->algt.alg.hash.import = ahash_import;
t_alg->algt.alg.hash.export = ahash_export;
if (!(priv->features & TALITOS_FTR_HMAC_OK) && if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
!strncmp(alg->cra_name, "hmac", 4)) { !strncmp(alg->cra_name, "hmac", 4)) {

View File

@ -139,6 +139,26 @@ my $vmr = sub {
" vor $vx,$vy,$vy"; " vor $vx,$vy,$vy";
}; };
# Some ABIs specify vrsave, special-purpose register #256, as reserved
# for system use.
my $no_vrsave = ($flavour =~ /aix|linux64le/);
my $mtspr = sub {
my ($f,$idx,$ra) = @_;
if ($idx == 256 && $no_vrsave) {
" or $ra,$ra,$ra";
} else {
" mtspr $idx,$ra";
}
};
my $mfspr = sub {
my ($f,$rd,$idx) = @_;
if ($idx == 256 && $no_vrsave) {
" li $rd,-1";
} else {
" mfspr $rd,$idx";
}
};
# PowerISA 2.06 stuff # PowerISA 2.06 stuff
sub vsxmem_op { sub vsxmem_op {
my ($f, $vrt, $ra, $rb, $op) = @_; my ($f, $vrt, $ra, $rb, $op) = @_;

View File

@ -105,19 +105,7 @@ static int ecryptfs_calculate_md5(char *dst,
struct crypto_shash *tfm; struct crypto_shash *tfm;
int rc = 0; int rc = 0;
mutex_lock(&crypt_stat->cs_hash_tfm_mutex);
tfm = crypt_stat->hash_tfm; tfm = crypt_stat->hash_tfm;
if (!tfm) {
tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0);
if (IS_ERR(tfm)) {
rc = PTR_ERR(tfm);
ecryptfs_printk(KERN_ERR, "Error attempting to "
"allocate crypto context; rc = [%d]\n",
rc);
goto out;
}
crypt_stat->hash_tfm = tfm;
}
rc = ecryptfs_hash_digest(tfm, src, len, dst); rc = ecryptfs_hash_digest(tfm, src, len, dst);
if (rc) { if (rc) {
printk(KERN_ERR printk(KERN_ERR
@ -126,7 +114,6 @@ static int ecryptfs_calculate_md5(char *dst,
goto out; goto out;
} }
out: out:
mutex_unlock(&crypt_stat->cs_hash_tfm_mutex);
return rc; return rc;
} }
@ -207,16 +194,29 @@ int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
* *
* Initialize the crypt_stat structure. * Initialize the crypt_stat structure.
*/ */
void int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
{ {
struct crypto_shash *tfm;
int rc;
tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0);
if (IS_ERR(tfm)) {
rc = PTR_ERR(tfm);
ecryptfs_printk(KERN_ERR, "Error attempting to "
"allocate crypto context; rc = [%d]\n",
rc);
return rc;
}
memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat)); memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
INIT_LIST_HEAD(&crypt_stat->keysig_list); INIT_LIST_HEAD(&crypt_stat->keysig_list);
mutex_init(&crypt_stat->keysig_list_mutex); mutex_init(&crypt_stat->keysig_list_mutex);
mutex_init(&crypt_stat->cs_mutex); mutex_init(&crypt_stat->cs_mutex);
mutex_init(&crypt_stat->cs_tfm_mutex); mutex_init(&crypt_stat->cs_tfm_mutex);
mutex_init(&crypt_stat->cs_hash_tfm_mutex); crypt_stat->hash_tfm = tfm;
crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED; crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED;
return 0;
} }
/** /**

View File

@ -242,7 +242,6 @@ struct ecryptfs_crypt_stat {
struct list_head keysig_list; struct list_head keysig_list;
struct mutex keysig_list_mutex; struct mutex keysig_list_mutex;
struct mutex cs_tfm_mutex; struct mutex cs_tfm_mutex;
struct mutex cs_hash_tfm_mutex;
struct mutex cs_mutex; struct mutex cs_mutex;
}; };
@ -577,7 +576,7 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
int sg_size); int sg_size);
int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat); int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat);
void ecryptfs_rotate_iv(unsigned char *iv); void ecryptfs_rotate_iv(unsigned char *iv);
void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat); int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat); void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
void ecryptfs_destroy_mount_crypt_stat( void ecryptfs_destroy_mount_crypt_stat(
struct ecryptfs_mount_crypt_stat *mount_crypt_stat); struct ecryptfs_mount_crypt_stat *mount_crypt_stat);

View File

@ -898,8 +898,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
struct ecryptfs_crypt_stat *crypt_stat; struct ecryptfs_crypt_stat *crypt_stat;
crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat; crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat;
if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) {
ecryptfs_init_crypt_stat(crypt_stat); rc = ecryptfs_init_crypt_stat(crypt_stat);
if (rc)
return rc;
}
inode = d_inode(dentry); inode = d_inode(dentry);
lower_inode = ecryptfs_inode_to_lower(inode); lower_inode = ecryptfs_inode_to_lower(inode);
lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_dentry = ecryptfs_dentry_to_lower(dentry);

View File

@ -55,7 +55,10 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
inode_info = kmem_cache_alloc(ecryptfs_inode_info_cache, GFP_KERNEL); inode_info = kmem_cache_alloc(ecryptfs_inode_info_cache, GFP_KERNEL);
if (unlikely(!inode_info)) if (unlikely(!inode_info))
goto out; goto out;
ecryptfs_init_crypt_stat(&inode_info->crypt_stat); if (ecryptfs_init_crypt_stat(&inode_info->crypt_stat)) {
kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
goto out;
}
mutex_init(&inode_info->lower_file_mutex); mutex_init(&inode_info->lower_file_mutex);
atomic_set(&inode_info->lower_file_count, 0); atomic_set(&inode_info->lower_file_count, 0);
inode_info->lower_file = NULL; inode_info->lower_file = NULL;

View File

@ -405,8 +405,7 @@ static inline void aead_request_set_tfm(struct aead_request *req,
* encrypt and decrypt API calls. During the allocation, the provided aead * encrypt and decrypt API calls. During the allocation, the provided aead
* handle is registered in the request data structure. * handle is registered in the request data structure.
* *
* Return: allocated request handle in case of success; IS_ERR() is true in case * Return: allocated request handle in case of success, or NULL if out of memory
* of an error, PTR_ERR() returns the error code.
*/ */
static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
gfp_t gfp) gfp_t gfp)

View File

@ -547,8 +547,7 @@ static inline void ahash_request_set_tfm(struct ahash_request *req,
* the allocation, the provided ahash handle * the allocation, the provided ahash handle
* is registered in the request data structure. * is registered in the request data structure.
* *
* Return: allocated request handle in case of success; IS_ERR() is true in case * Return: allocated request handle in case of success, or NULL if out of memory
* of an error, PTR_ERR() returns the error code.
*/ */
static inline struct ahash_request *ahash_request_alloc( static inline struct ahash_request *ahash_request_alloc(
struct crypto_ahash *tfm, gfp_t gfp) struct crypto_ahash *tfm, gfp_t gfp)

View File

@ -425,8 +425,7 @@ static inline struct skcipher_request *skcipher_request_cast(
* encrypt and decrypt API calls. During the allocation, the provided skcipher * encrypt and decrypt API calls. During the allocation, the provided skcipher
* handle is registered in the request data structure. * handle is registered in the request data structure.
* *
* Return: allocated request handle in case of success; IS_ERR() is true in case * Return: allocated request handle in case of success, or NULL if out of memory
* of an error, PTR_ERR() returns the error code.
*/ */
static inline struct skcipher_request *skcipher_request_alloc( static inline struct skcipher_request *skcipher_request_alloc(
struct crypto_skcipher *tfm, gfp_t gfp) struct crypto_skcipher *tfm, gfp_t gfp)

View File

@ -1,9 +1,10 @@
/* /*
* AMD Cryptographic Coprocessor (CCP) driver * AMD Cryptographic Coprocessor (CCP) driver
* *
* Copyright (C) 2013 Advanced Micro Devices, Inc. * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
* *
* Author: Tom Lendacky <thomas.lendacky@amd.com> * Author: Tom Lendacky <thomas.lendacky@amd.com>
* Author: Gary R Hook <gary.hook@amd.com>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
@ -381,6 +382,35 @@ struct ccp_passthru_engine {
u32 final; u32 final;
}; };
/**
* struct ccp_passthru_nomap_engine - CCP pass-through operation
* without performing DMA mapping
* @bit_mod: bitwise operation to perform
* @byte_swap: byteswap operation to perform
* @mask: mask to be applied to data
* @mask_len: length in bytes of mask
* @src: data to be used for this operation
* @dst: data produced by this operation
* @src_len: length in bytes of data used for this operation
* @final: indicate final pass-through operation
*
* Variables required to be set when calling ccp_enqueue_cmd():
* - bit_mod, byte_swap, src, dst, src_len
* - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP
*/
struct ccp_passthru_nomap_engine {
enum ccp_passthru_bitwise bit_mod;
enum ccp_passthru_byteswap byte_swap;
dma_addr_t mask;
u32 mask_len; /* In bytes */
dma_addr_t src_dma, dst_dma;
u64 src_len; /* In bytes */
u32 final;
};
/***** ECC engine *****/ /***** ECC engine *****/
#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */ #define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */
#define CCP_ECC_MAX_OPERANDS 6 #define CCP_ECC_MAX_OPERANDS 6
@ -523,6 +553,7 @@ enum ccp_engine {
/* Flag values for flags member of ccp_cmd */ /* Flag values for flags member of ccp_cmd */
#define CCP_CMD_MAY_BACKLOG 0x00000001 #define CCP_CMD_MAY_BACKLOG 0x00000001
#define CCP_CMD_PASSTHRU_NO_DMA_MAP 0x00000002
/** /**
* struct ccp_cmd - CPP operation request * struct ccp_cmd - CPP operation request
@ -562,6 +593,7 @@ struct ccp_cmd {
struct ccp_sha_engine sha; struct ccp_sha_engine sha;
struct ccp_rsa_engine rsa; struct ccp_rsa_engine rsa;
struct ccp_passthru_engine passthru; struct ccp_passthru_engine passthru;
struct ccp_passthru_nomap_engine passthru_nomap;
struct ccp_ecc_engine ecc; struct ccp_ecc_engine ecc;
} u; } u;

View File

@ -948,8 +948,7 @@ static inline struct ablkcipher_request *ablkcipher_request_cast(
* encrypt and decrypt API calls. During the allocation, the provided ablkcipher * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
* handle is registered in the request data structure. * handle is registered in the request data structure.
* *
* Return: allocated request handle in case of success; IS_ERR() is true in case * Return: allocated request handle in case of success, or NULL if out of memory
* of an error, PTR_ERR() returns the error code.
*/ */
static inline struct ablkcipher_request *ablkcipher_request_alloc( static inline struct ablkcipher_request *ablkcipher_request_alloc(
struct crypto_ablkcipher *tfm, gfp_t gfp) struct crypto_ablkcipher *tfm, gfp_t gfp)

View File

@ -12,6 +12,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/module.h>
#include <linux/asn1_decoder.h> #include <linux/asn1_decoder.h>
#include <linux/asn1_ber_bytecode.h> #include <linux/asn1_ber_bytecode.h>
@ -506,3 +507,5 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
return -EBADMSG; return -EBADMSG;
} }
EXPORT_SYMBOL_GPL(asn1_ber_decoder); EXPORT_SYMBOL_GPL(asn1_ber_decoder);
MODULE_LICENSE("GPL");

View File

@ -20,6 +20,8 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/count_zeros.h> #include <linux/count_zeros.h>
#include <linux/byteorder/generic.h>
#include <linux/string.h>
#include "mpi-internal.h" #include "mpi-internal.h"
#define MAX_EXTERN_MPI_BITS 16384 #define MAX_EXTERN_MPI_BITS 16384
@ -163,7 +165,13 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign) int *sign)
{ {
uint8_t *p; uint8_t *p;
mpi_limb_t alimb; #if BYTES_PER_MPI_LIMB == 4
__be32 alimb;
#elif BYTES_PER_MPI_LIMB == 8
__be64 alimb;
#else
#error please implement for this limb size.
#endif
unsigned int n = mpi_get_size(a); unsigned int n = mpi_get_size(a);
int i, lzeros; int i, lzeros;
@ -183,38 +191,19 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
p = buf; p = buf;
*nbytes = n - lzeros; *nbytes = n - lzeros;
for (i = a->nlimbs - 1; i >= 0; i--) { for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
alimb = a->d[i]; lzeros %= BYTES_PER_MPI_LIMB;
i >= 0; i--) {
#if BYTES_PER_MPI_LIMB == 4 #if BYTES_PER_MPI_LIMB == 4
*p++ = alimb >> 24; alimb = cpu_to_be32(a->d[i]);
*p++ = alimb >> 16;
*p++ = alimb >> 8;
*p++ = alimb;
#elif BYTES_PER_MPI_LIMB == 8 #elif BYTES_PER_MPI_LIMB == 8
*p++ = alimb >> 56; alimb = cpu_to_be64(a->d[i]);
*p++ = alimb >> 48;
*p++ = alimb >> 40;
*p++ = alimb >> 32;
*p++ = alimb >> 24;
*p++ = alimb >> 16;
*p++ = alimb >> 8;
*p++ = alimb;
#else #else
#error please implement for this limb size. #error please implement for this limb size.
#endif #endif
memcpy(p, (u8 *)&alimb + lzeros, BYTES_PER_MPI_LIMB - lzeros);
if (lzeros > 0) { p += BYTES_PER_MPI_LIMB - lzeros;
if (lzeros >= sizeof(alimb)) { lzeros = 0;
p -= sizeof(alimb);
} else {
mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
+ lzeros;
*limb1 = *limb2;
p -= lzeros;
}
lzeros -= sizeof(alimb);
}
} }
return 0; return 0;
} }
@ -359,7 +348,13 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
int *sign) int *sign)
{ {
u8 *p, *p2; u8 *p, *p2;
mpi_limb_t alimb, alimb2; #if BYTES_PER_MPI_LIMB == 4
__be32 alimb;
#elif BYTES_PER_MPI_LIMB == 8
__be64 alimb;
#else
#error please implement for this limb size.
#endif
unsigned int n = mpi_get_size(a); unsigned int n = mpi_get_size(a);
int i, x, y = 0, lzeros, buf_len; int i, x, y = 0, lzeros, buf_len;
@ -380,42 +375,22 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
buf_len = sgl->length; buf_len = sgl->length;
p2 = sg_virt(sgl); p2 = sg_virt(sgl);
for (i = a->nlimbs - 1; i >= 0; i--) { for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
alimb = a->d[i]; lzeros %= BYTES_PER_MPI_LIMB;
p = (u8 *)&alimb2; i >= 0; i--) {
#if BYTES_PER_MPI_LIMB == 4 #if BYTES_PER_MPI_LIMB == 4
*p++ = alimb >> 24; alimb = cpu_to_be32(a->d[i]);
*p++ = alimb >> 16;
*p++ = alimb >> 8;
*p++ = alimb;
#elif BYTES_PER_MPI_LIMB == 8 #elif BYTES_PER_MPI_LIMB == 8
*p++ = alimb >> 56; alimb = cpu_to_be64(a->d[i]);
*p++ = alimb >> 48;
*p++ = alimb >> 40;
*p++ = alimb >> 32;
*p++ = alimb >> 24;
*p++ = alimb >> 16;
*p++ = alimb >> 8;
*p++ = alimb;
#else #else
#error please implement for this limb size. #error please implement for this limb size.
#endif #endif
if (lzeros > 0) { if (lzeros) {
if (lzeros >= sizeof(alimb)) {
p -= sizeof(alimb);
continue;
} else {
mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
+ lzeros;
*limb1 = *limb2;
p -= lzeros;
y = lzeros; y = lzeros;
} lzeros = 0;
lzeros -= sizeof(alimb);
} }
p = p - (sizeof(alimb) - y); p = (u8 *)&alimb + y;
for (x = 0; x < sizeof(alimb) - y; x++) { for (x = 0; x < sizeof(alimb) - y; x++) {
if (!buf_len) { if (!buf_len) {
@ -443,15 +418,15 @@ EXPORT_SYMBOL_GPL(mpi_write_to_sgl);
* a new MPI and reads the content of the sgl to the MPI. * a new MPI and reads the content of the sgl to the MPI.
* *
* @sgl: scatterlist to read from * @sgl: scatterlist to read from
* @len: number of bytes to read * @nbytes: number of bytes to read
* *
* Return: Pointer to a new MPI or NULL on error * Return: Pointer to a new MPI or NULL on error
*/ */
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len) MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
{ {
struct scatterlist *sg; struct scatterlist *sg;
int x, i, j, z, lzeros, ents; int x, i, j, z, lzeros, ents;
unsigned int nbits, nlimbs, nbytes; unsigned int nbits, nlimbs;
mpi_limb_t a; mpi_limb_t a;
MPI val = NULL; MPI val = NULL;
@ -472,16 +447,12 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
break; break;
ents--; ents--;
nbytes -= lzeros;
lzeros = 0; lzeros = 0;
} }
sgl = sg; sgl = sg;
nbytes -= lzeros;
if (!ents)
nbytes = 0;
else
nbytes = len - lzeros;
nbits = nbytes * 8; nbits = nbytes * 8;
if (nbits > MAX_EXTERN_MPI_BITS) { if (nbits > MAX_EXTERN_MPI_BITS) {
pr_info("MPI: mpi too large (%u bits)\n", nbits); pr_info("MPI: mpi too large (%u bits)\n", nbits);
@ -489,9 +460,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
} }
if (nbytes > 0) if (nbytes > 0)
nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros)); nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros)) -
else (BITS_PER_LONG - 8);
nbits = 0;
nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
val = mpi_alloc(nlimbs); val = mpi_alloc(nlimbs);
@ -507,19 +477,14 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
j = nlimbs - 1; j = nlimbs - 1;
a = 0; a = 0;
z = 0; z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
x = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; z %= BYTES_PER_MPI_LIMB;
x %= BYTES_PER_MPI_LIMB;
for_each_sg(sgl, sg, ents, i) { for_each_sg(sgl, sg, ents, i) {
const u8 *buffer = sg_virt(sg) + lzeros; const u8 *buffer = sg_virt(sg) + lzeros;
int len = sg->length - lzeros; int len = sg->length - lzeros;
int buf_shift = x;
if (sg_is_last(sg) && (len % BYTES_PER_MPI_LIMB)) for (x = 0; x < len; x++) {
len += BYTES_PER_MPI_LIMB - (len % BYTES_PER_MPI_LIMB);
for (; x < len + buf_shift; x++) {
a <<= 8; a <<= 8;
a |= *buffer++; a |= *buffer++;
if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) { if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) {
@ -528,7 +493,6 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
} }
} }
z += x; z += x;
x = 0;
lzeros = 0; lzeros = 0;
} }
return val; return val;