mirror of https://gitee.com/openkylin/linux.git
x86/amd-iommu: Cleanup completion-wait handling
This patch cleans up the implementation of completion-wait command sending. It also switches the completion indicator from the MMIO bit to a memory store which can be checked without IOMMU locking. As a side effect this patch makes the __iommu_queue_command function obsolete and so it is removed too. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
parent
11b6402c66
commit
815b33fdc2
|
@ -25,6 +25,7 @@
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/iommu-helper.h>
|
#include <linux/iommu-helper.h>
|
||||||
#include <linux/iommu.h>
|
#include <linux/iommu.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
#include <asm/proto.h>
|
#include <asm/proto.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
#include <asm/gart.h>
|
#include <asm/gart.h>
|
||||||
|
@ -34,7 +35,7 @@
|
||||||
|
|
||||||
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
|
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
|
||||||
|
|
||||||
#define EXIT_LOOP_COUNT 10000000
|
#define LOOP_TIMEOUT 100000
|
||||||
|
|
||||||
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
|
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
|
||||||
|
|
||||||
|
@ -383,10 +384,14 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
|
||||||
*
|
*
|
||||||
****************************************************************************/
|
****************************************************************************/
|
||||||
|
|
||||||
static void build_completion_wait(struct iommu_cmd *cmd)
|
static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
|
||||||
{
|
{
|
||||||
|
WARN_ON(address & 0x7ULL);
|
||||||
|
|
||||||
memset(cmd, 0, sizeof(*cmd));
|
memset(cmd, 0, sizeof(*cmd));
|
||||||
cmd->data[0] = CMD_COMPL_WAIT_INT_MASK;
|
cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
|
||||||
|
cmd->data[1] = upper_32_bits(__pa(address));
|
||||||
|
cmd->data[2] = 1;
|
||||||
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
|
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -432,12 +437,14 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
|
||||||
* Writes the command to the IOMMUs command buffer and informs the
|
* Writes the command to the IOMMUs command buffer and informs the
|
||||||
* hardware about the new command. Must be called with iommu->lock held.
|
* hardware about the new command. Must be called with iommu->lock held.
|
||||||
*/
|
*/
|
||||||
static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
u32 tail, head;
|
u32 tail, head;
|
||||||
u8 *target;
|
u8 *target;
|
||||||
|
|
||||||
WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
|
WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
|
||||||
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
||||||
target = iommu->cmd_buf + tail;
|
target = iommu->cmd_buf + tail;
|
||||||
memcpy_toio(target, cmd, sizeof(*cmd));
|
memcpy_toio(target, cmd, sizeof(*cmd));
|
||||||
|
@ -446,99 +453,41 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
||||||
if (tail == head)
|
if (tail == head)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
|
||||||
|
iommu->need_sync = true;
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* General queuing function for commands. Takes iommu->lock and calls
|
|
||||||
* __iommu_queue_command().
|
|
||||||
*/
|
|
||||||
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
|
||||||
ret = __iommu_queue_command(iommu, cmd);
|
|
||||||
if (!ret)
|
|
||||||
iommu->need_sync = true;
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This function waits until an IOMMU has completed a completion
|
|
||||||
* wait command
|
|
||||||
*/
|
|
||||||
static void __iommu_wait_for_completion(struct amd_iommu *iommu)
|
|
||||||
{
|
|
||||||
int ready = 0;
|
|
||||||
unsigned status = 0;
|
|
||||||
unsigned long i = 0;
|
|
||||||
|
|
||||||
INC_STATS_COUNTER(compl_wait);
|
|
||||||
|
|
||||||
while (!ready && (i < EXIT_LOOP_COUNT)) {
|
|
||||||
++i;
|
|
||||||
/* wait for the bit to become one */
|
|
||||||
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
|
||||||
ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* set bit back to zero */
|
|
||||||
status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
|
|
||||||
writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
|
|
||||||
|
|
||||||
if (unlikely(i == EXIT_LOOP_COUNT))
|
|
||||||
iommu->reset_in_progress = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function queues a completion wait command into the command
|
* This function queues a completion wait command into the command
|
||||||
* buffer of an IOMMU
|
* buffer of an IOMMU
|
||||||
*/
|
*/
|
||||||
static int __iommu_completion_wait(struct amd_iommu *iommu)
|
|
||||||
{
|
|
||||||
struct iommu_cmd cmd;
|
|
||||||
|
|
||||||
build_completion_wait(&cmd);
|
|
||||||
|
|
||||||
return __iommu_queue_command(iommu, &cmd);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This function is called whenever we need to ensure that the IOMMU has
|
|
||||||
* completed execution of all commands we sent. It sends a
|
|
||||||
* COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
|
|
||||||
* us about that by writing a value to a physical address we pass with
|
|
||||||
* the command.
|
|
||||||
*/
|
|
||||||
static int iommu_completion_wait(struct amd_iommu *iommu)
|
static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
struct iommu_cmd cmd;
|
||||||
unsigned long flags;
|
volatile u64 sem = 0;
|
||||||
|
int ret, i = 0;
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
|
||||||
|
|
||||||
if (!iommu->need_sync)
|
if (!iommu->need_sync)
|
||||||
goto out;
|
return 0;
|
||||||
|
|
||||||
ret = __iommu_completion_wait(iommu);
|
build_completion_wait(&cmd, (u64)&sem);
|
||||||
|
|
||||||
iommu->need_sync = false;
|
|
||||||
|
|
||||||
|
ret = iommu_queue_command(iommu, &cmd);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
return ret;
|
||||||
|
|
||||||
__iommu_wait_for_completion(iommu);
|
while (sem == 0 && i < LOOP_TIMEOUT) {
|
||||||
|
udelay(1);
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
if (i == LOOP_TIMEOUT) {
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
|
||||||
|
iommu->reset_in_progress = true;
|
||||||
if (iommu->reset_in_progress)
|
|
||||||
reset_iommu_command_buffer(iommu);
|
reset_iommu_command_buffer(iommu);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue