crypto: inside-secure - move request dequeueing into a workqueue
This patch moves the request dequeueing into a workqueue to improve the coalescing of interrupts when sending requests to the engine; as the engine is capable of having one single interrupt for n requests sent. Using a workqueue allows to send more request at once. Suggested-by: Ofer Heifetz <oferh@marvell.com> Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
5290ad6e9a
commit
8472e77810
|
@ -429,8 +429,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
|
||||||
struct safexcel_request *request;
|
struct safexcel_request *request;
|
||||||
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
|
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
|
||||||
|
|
||||||
priv->ring[ring].need_dequeue = false;
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
spin_lock_bh(&priv->ring[ring].queue_lock);
|
spin_lock_bh(&priv->ring[ring].queue_lock);
|
||||||
backlog = crypto_get_backlog(&priv->ring[ring].queue);
|
backlog = crypto_get_backlog(&priv->ring[ring].queue);
|
||||||
|
@ -445,8 +443,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
|
||||||
spin_lock_bh(&priv->ring[ring].queue_lock);
|
spin_lock_bh(&priv->ring[ring].queue_lock);
|
||||||
crypto_enqueue_request(&priv->ring[ring].queue, req);
|
crypto_enqueue_request(&priv->ring[ring].queue, req);
|
||||||
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
||||||
|
|
||||||
priv->ring[ring].need_dequeue = true;
|
|
||||||
goto finalize;
|
goto finalize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -455,7 +451,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kfree(request);
|
kfree(request);
|
||||||
req->complete(req, ret);
|
req->complete(req, ret);
|
||||||
priv->ring[ring].need_dequeue = true;
|
|
||||||
goto finalize;
|
goto finalize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -471,9 +466,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
|
||||||
} while (nreq++ < EIP197_MAX_BATCH_SZ);
|
} while (nreq++ < EIP197_MAX_BATCH_SZ);
|
||||||
|
|
||||||
finalize:
|
finalize:
|
||||||
if (nreq == EIP197_MAX_BATCH_SZ)
|
if (!nreq)
|
||||||
priv->ring[ring].need_dequeue = true;
|
|
||||||
else if (!nreq)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_bh(&priv->ring[ring].lock);
|
spin_lock_bh(&priv->ring[ring].lock);
|
||||||
|
@ -628,12 +621,17 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
|
||||||
static void safexcel_handle_result_work(struct work_struct *work)
|
static void safexcel_handle_result_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct safexcel_work_data *data =
|
struct safexcel_work_data *data =
|
||||||
container_of(work, struct safexcel_work_data, work);
|
container_of(work, struct safexcel_work_data, result_work);
|
||||||
struct safexcel_crypto_priv *priv = data->priv;
|
struct safexcel_crypto_priv *priv = data->priv;
|
||||||
|
|
||||||
safexcel_handle_result_descriptor(priv, data->ring);
|
safexcel_handle_result_descriptor(priv, data->ring);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void safexcel_dequeue_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct safexcel_work_data *data =
|
||||||
|
container_of(work, struct safexcel_work_data, work);
|
||||||
|
|
||||||
if (priv->ring[data->ring].need_dequeue)
|
|
||||||
safexcel_dequeue(data->priv, data->ring);
|
safexcel_dequeue(data->priv, data->ring);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -665,7 +663,10 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
|
||||||
*/
|
*/
|
||||||
dev_err(priv->dev, "RDR: fatal error.");
|
dev_err(priv->dev, "RDR: fatal error.");
|
||||||
} else if (likely(stat & EIP197_xDR_THRESH)) {
|
} else if (likely(stat & EIP197_xDR_THRESH)) {
|
||||||
queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
|
queue_work(priv->ring[ring].workqueue,
|
||||||
|
&priv->ring[ring].work_data.result_work);
|
||||||
|
queue_work(priv->ring[ring].workqueue,
|
||||||
|
&priv->ring[ring].work_data.work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ACK the interrupts */
|
/* ACK the interrupts */
|
||||||
|
@ -846,7 +847,9 @@ static int safexcel_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
priv->ring[i].work_data.priv = priv;
|
priv->ring[i].work_data.priv = priv;
|
||||||
priv->ring[i].work_data.ring = i;
|
priv->ring[i].work_data.ring = i;
|
||||||
INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
|
INIT_WORK(&priv->ring[i].work_data.result_work,
|
||||||
|
safexcel_handle_result_work);
|
||||||
|
INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
|
||||||
|
|
||||||
snprintf(wq_name, 9, "wq_ring%d", i);
|
snprintf(wq_name, 9, "wq_ring%d", i);
|
||||||
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
|
priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
|
||||||
|
|
|
@ -459,6 +459,7 @@ struct safexcel_config {
|
||||||
|
|
||||||
struct safexcel_work_data {
|
struct safexcel_work_data {
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
|
struct work_struct result_work;
|
||||||
struct safexcel_crypto_priv *priv;
|
struct safexcel_crypto_priv *priv;
|
||||||
int ring;
|
int ring;
|
||||||
};
|
};
|
||||||
|
@ -489,7 +490,6 @@ struct safexcel_crypto_priv {
|
||||||
/* queue */
|
/* queue */
|
||||||
struct crypto_queue queue;
|
struct crypto_queue queue;
|
||||||
spinlock_t queue_lock;
|
spinlock_t queue_lock;
|
||||||
bool need_dequeue;
|
|
||||||
} ring[EIP197_MAX_RINGS];
|
} ring[EIP197_MAX_RINGS];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -358,8 +358,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
|
||||||
if (enq_ret != -EINPROGRESS)
|
if (enq_ret != -EINPROGRESS)
|
||||||
*ret = enq_ret;
|
*ret = enq_ret;
|
||||||
|
|
||||||
if (!priv->ring[ring].need_dequeue)
|
queue_work(priv->ring[ring].workqueue,
|
||||||
safexcel_dequeue(priv, ring);
|
&priv->ring[ring].work_data.work);
|
||||||
|
|
||||||
*should_complete = false;
|
*should_complete = false;
|
||||||
|
|
||||||
|
@ -448,8 +448,8 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
|
||||||
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
||||||
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
||||||
|
|
||||||
if (!priv->ring[ring].need_dequeue)
|
queue_work(priv->ring[ring].workqueue,
|
||||||
safexcel_dequeue(priv, ring);
|
&priv->ring[ring].work_data.work);
|
||||||
|
|
||||||
wait_for_completion_interruptible(&result.completion);
|
wait_for_completion_interruptible(&result.completion);
|
||||||
|
|
||||||
|
@ -495,8 +495,8 @@ static int safexcel_aes(struct skcipher_request *req,
|
||||||
ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
||||||
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
||||||
|
|
||||||
if (!priv->ring[ring].need_dequeue)
|
queue_work(priv->ring[ring].workqueue,
|
||||||
safexcel_dequeue(priv, ring);
|
&priv->ring[ring].work_data.work);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -381,8 +381,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
|
||||||
if (enq_ret != -EINPROGRESS)
|
if (enq_ret != -EINPROGRESS)
|
||||||
*ret = enq_ret;
|
*ret = enq_ret;
|
||||||
|
|
||||||
if (!priv->ring[ring].need_dequeue)
|
queue_work(priv->ring[ring].workqueue,
|
||||||
safexcel_dequeue(priv, ring);
|
&priv->ring[ring].work_data.work);
|
||||||
|
|
||||||
*should_complete = false;
|
*should_complete = false;
|
||||||
|
|
||||||
|
@ -470,8 +470,8 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
|
||||||
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
|
||||||
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
||||||
|
|
||||||
if (!priv->ring[ring].need_dequeue)
|
queue_work(priv->ring[ring].workqueue,
|
||||||
safexcel_dequeue(priv, ring);
|
&priv->ring[ring].work_data.work);
|
||||||
|
|
||||||
wait_for_completion_interruptible(&result.completion);
|
wait_for_completion_interruptible(&result.completion);
|
||||||
|
|
||||||
|
@ -556,8 +556,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
|
||||||
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
|
ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
|
||||||
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
spin_unlock_bh(&priv->ring[ring].queue_lock);
|
||||||
|
|
||||||
if (!priv->ring[ring].need_dequeue)
|
queue_work(priv->ring[ring].workqueue,
|
||||||
safexcel_dequeue(priv, ring);
|
&priv->ring[ring].work_data.work);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue