summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-23 18:32:38 +0100
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 14:16:29 +0100
commit318afd41d2eca3224de3fd85a3b9a27a3010a98d (patch)
tree83434f1a39ee76536b4a987a25f4ff1153c41438 /arch
parent09b4280439ef6fdc55f1353a9135034336eb5d26 (diff)
downloadop-kernel-dev-318afd41d2eca3224de3fd85a3b9a27a3010a98d.zip
op-kernel-dev-318afd41d2eca3224de3fd85a3b9a27a3010a98d.tar.gz
x86/amd-iommu: Make np-cache a global flag
The non-present cache flag was IOMMU local until now which doesn't make sense. Make this a global flag so we can remove the lase user of 'struct iommu' in the map/unmap path. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h3
-rw-r--r--arch/x86/kernel/amd_iommu.c8
-rw-r--r--arch/x86/kernel/amd_iommu_init.c6
3 files changed, 10 insertions, 7 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index b332b7f..4899f783 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -211,6 +211,9 @@ extern bool amd_iommu_dump;
printk(KERN_INFO "AMD-Vi: " format, ## arg); \
} while(0);
+/* global flag if IOMMUs cache non-present entries */
+extern bool amd_iommu_np_cache;
+
/*
* Make iterating over all IOMMUs easier
*/
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index a1bd99d..5ebd24e 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -131,12 +131,6 @@ static void amd_iommu_stats_init(void)
#endif
-/* returns !0 if the IOMMU is caching non-present entries in its TLB */
-static int iommu_has_npcache(struct amd_iommu *iommu)
-{
- return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
-}
-
/****************************************************************************
*
* Interrupt handling functions
@@ -1713,7 +1707,7 @@ retry:
if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
iommu_flush_tlb(&dma_dom->domain);
dma_dom->need_flush = false;
- } else if (unlikely(iommu_has_npcache(iommu)))
+ } else if (unlikely(amd_iommu_np_cache))
iommu_flush_pages(&dma_dom->domain, address, size);
out:
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 73d5173..fbe4c3c 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -141,6 +141,9 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
struct amd_iommu *amd_iommus[MAX_IOMMUS];
int amd_iommus_present;
+/* IOMMUs have a non-present cache? */
+bool amd_iommu_np_cache __read_mostly;
+
/*
* List of protection domains - used during resume
*/
@@ -891,6 +894,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
init_iommu_from_acpi(iommu, h);
init_iommu_devices(iommu);
+ if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
+ amd_iommu_np_cache = true;
+
return pci_enable_device(iommu->dev);
}
OpenPOWER on IntegriCloud