summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-23 18:30:32 +0100
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 14:16:18 +0100
commit6de8ad9b9ee0ec5b52ec8ec41401833e5e89186f (patch)
tree769a99c63c1c6cb72cf326dccecb51efae178868 /arch
parent0518a3a4585cb3eeeaf14ca57131f11d252130c6 (diff)
downloadop-kernel-dev-6de8ad9b9ee0ec5b52ec8ec41401833e5e89186f.zip
op-kernel-dev-6de8ad9b9ee0ec5b52ec8ec41401833e5e89186f.tar.gz
x86/amd-iommu: Make iommu_flush_pages aware of multiple IOMMUs
This patch extends the iommu_flush_pages function to flush the TLB entries on all IOMMUs the domain has devices on. This basically gives up the former assumption that dma_ops domains are only bound to one IOMMU in the system. For dma_ops domains this is still true but not for IOMMU-API managed domains. Giving this assumption up for dma_ops domains too allows code simplification. Further it splits out the main logic into a generic function which can be used by iommu_flush_tlb too. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/amd_iommu.c31
1 files changed, 24 insertions, 7 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 8fa5cc3..7c06e57 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -447,10 +447,10 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
* It invalidates a single PTE if the range to flush is within a single
* page. Otherwise it flushes the whole TLB of the IOMMU.
*/
-static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
- u64 address, size_t size)
+static void __iommu_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size, int pde)
{
- int s = 0;
+ int s = 0, i;
unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
address &= PAGE_MASK;
@@ -464,9 +464,26 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
s = 1;
}
- iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
- return 0;
+ for (i = 0; i < amd_iommus_present; ++i) {
+ if (!domain->dev_iommu[i])
+ continue;
+
+ /*
+ * Devices of this domain are behind this IOMMU
+ * We need a TLB flush
+ */
+ iommu_queue_inv_iommu_pages(amd_iommus[i], address,
+ domain->id, pde, s);
+ }
+
+ return;
+}
+
+static void iommu_flush_pages(struct protection_domain *domain,
+ u64 address, size_t size)
+{
+ __iommu_flush_pages(domain, address, size, 0);
}
/* Flush the whole IO/TLB for a given protection domain */
@@ -1683,7 +1700,7 @@ retry:
iommu_flush_tlb(iommu, dma_dom->domain.id);
dma_dom->need_flush = false;
} else if (unlikely(iommu_has_npcache(iommu)))
- iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
+ iommu_flush_pages(&dma_dom->domain, address, size);
out:
return address;
@@ -1731,7 +1748,7 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_ops_free_addresses(dma_dom, dma_addr, pages);
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
- iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
+ iommu_flush_pages(&dma_dom->domain, dma_addr, size);
dma_dom->need_flush = false;
}
}
OpenPOWER on IntegriCloud