summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-26 15:35:33 +0100
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 14:20:36 +0100
commitb00d3bcff4d996f65e337d404b0df5dc201a01ab (patch)
treeb15d2bc22e3c8938078875c4e1b62d70eaea5a7f
parent3fa43655d81d471d47c44b0db4e2be1f8af32207 (diff)
downloadop-kernel-dev-b00d3bcff4d996f65e337d404b0df5dc201a01ab.zip
op-kernel-dev-b00d3bcff4d996f65e337d404b0df5dc201a01ab.tar.gz
x86/amd-iommu: Cleanup DTE flushing code
This patch cleans up the code to flush device table entries in the IOMMU. With this chance the driver can get rid of the iommu_queue_inv_dev_entry() function. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h1
-rw-r--r--arch/x86/kernel/amd_iommu.c100
2 files changed, 35 insertions, 66 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 93953d1..f92d1b3 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -253,6 +253,7 @@ struct protection_domain {
*/
struct iommu_dev_data {
struct list_head list; /* For domain->dev_list */
+ struct device *dev; /* Device this data belong to */
struct device *alias; /* The Alias Device */
struct protection_domain *domain; /* Domain the device is bound to */
atomic_t bind; /* Domain attach reverent count */
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 41c4ebe..0eafca5 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -146,6 +146,8 @@ static int iommu_init_device(struct device *dev)
if (!dev_data)
return -ENOMEM;
+ dev_data->dev = dev;
+
devid = get_device_id(dev);
alias = amd_iommu_alias_table[devid];
pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
@@ -478,31 +480,21 @@ static void iommu_flush_complete(struct protection_domain *domain)
/*
* Command send function for invalidating a device table entry
*/
-static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
-{
- struct iommu_cmd cmd;
- int ret;
-
- BUG_ON(iommu == NULL);
-
- memset(&cmd, 0, sizeof(cmd));
- CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
- cmd.data[0] = devid;
-
- ret = iommu_queue_command(iommu, &cmd);
-
- return ret;
-}
-
static int iommu_flush_device(struct device *dev)
{
struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
u16 devid;
devid = get_device_id(dev);
iommu = amd_iommu_rlookup_table[devid];
- return iommu_queue_inv_dev_entry(iommu, devid);
+ /* Build command */
+ memset(&cmd, 0, sizeof(cmd));
+ CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
+ cmd.data[0] = devid;
+
+ return iommu_queue_command(iommu, &cmd);
}
static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
@@ -592,30 +584,43 @@ static void iommu_flush_tlb_pde(struct protection_domain *domain)
__iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
}
+
/*
- * This function flushes all domains that have devices on the given IOMMU
+ * This function flushes the DTEs for all devices in domain
*/
-static void flush_all_domains_on_iommu(struct amd_iommu *iommu)
+static void iommu_flush_domain_devices(struct protection_domain *domain)
+{
+ struct iommu_dev_data *dev_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ list_for_each_entry(dev_data, &domain->dev_list, list)
+ iommu_flush_device(dev_data->dev);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+static void iommu_flush_all_domain_devices(void)
{
- u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
struct protection_domain *domain;
unsigned long flags;
spin_lock_irqsave(&amd_iommu_pd_lock, flags);
list_for_each_entry(domain, &amd_iommu_pd_list, list) {
- if (domain->dev_iommu[iommu->index] == 0)
- continue;
-
- spin_lock(&domain->lock);
- iommu_queue_inv_iommu_pages(iommu, address, domain->id, 1, 1);
+ iommu_flush_domain_devices(domain);
iommu_flush_complete(domain);
- spin_unlock(&domain->lock);
}
spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
}
+void amd_iommu_flush_all_devices(void)
+{
+ iommu_flush_all_domain_devices();
+}
+
/*
* This function uses heavy locking and may disable irqs for some time. But
* this is no issue because it is only called during resume.
@@ -637,38 +642,6 @@ void amd_iommu_flush_all_domains(void)
spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
}
-static void flush_all_devices_for_iommu(struct amd_iommu *iommu)
-{
- int i;
-
- for (i = 0; i <= amd_iommu_last_bdf; ++i) {
- if (iommu != amd_iommu_rlookup_table[i])
- continue;
-
- iommu_queue_inv_dev_entry(iommu, i);
- iommu_completion_wait(iommu);
- }
-}
-
-static void flush_devices_by_domain(struct protection_domain *domain)
-{
- struct amd_iommu *iommu;
- int i;
-
- for (i = 0; i <= amd_iommu_last_bdf; ++i) {
- if ((domain == NULL && amd_iommu_pd_table[i] == NULL) ||
- (amd_iommu_pd_table[i] != domain))
- continue;
-
- iommu = amd_iommu_rlookup_table[i];
- if (!iommu)
- continue;
-
- iommu_queue_inv_dev_entry(iommu, i);
- iommu_completion_wait(iommu);
- }
-}
-
static void reset_iommu_command_buffer(struct amd_iommu *iommu)
{
pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
@@ -679,17 +652,12 @@ static void reset_iommu_command_buffer(struct amd_iommu *iommu)
iommu->reset_in_progress = true;
amd_iommu_reset_cmd_buffer(iommu);
- flush_all_devices_for_iommu(iommu);
- flush_all_domains_on_iommu(iommu);
+ amd_iommu_flush_all_devices();
+ amd_iommu_flush_all_domains();
iommu->reset_in_progress = false;
}
-void amd_iommu_flush_all_devices(void)
-{
- flush_devices_by_domain(NULL);
-}
-
/****************************************************************************
*
* The functions below are used the create the page table mappings for
@@ -1692,7 +1660,7 @@ static void update_domain(struct protection_domain *domain)
return;
update_device_table(domain);
- flush_devices_by_domain(domain);
+ iommu_flush_domain_devices(domain);
iommu_flush_tlb_pde(domain);
domain->updated = false;
OpenPOWER on IntegriCloud