summaryrefslogtreecommitdiffstats
path: root/drivers/pci/iova.c
diff options
context:
space:
mode:
authorKeshavamurthy, Anil S <anil.s.keshavamurthy@intel.com>2007-10-21 16:41:58 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-22 08:13:19 -0700
commitf76aec76ec7f68829a66624d11a50ed6cb404185 (patch)
treee470670d6c26aa06ca1fa28e97b25717e80dfa8a /drivers/pci/iova.c
parent49a0429e53f29109cbf1eadd89497286ba81f1ae (diff)
downloadop-kernel-dev-f76aec76ec7f68829a66624d11a50ed6cb404185.zip
op-kernel-dev-f76aec76ec7f68829a66624d11a50ed6cb404185.tar.gz
intel-iommu: optimize sg map/unmap calls
This patch adds PageSelectiveInvalidation support replacing existing DomainSelectiveInvalidation for intel_{map/unmap}_sg() calls and also enables to mapping one big contiguous DMA virtual address which is mapped to discontiguous physical address for SG map/unmap calls. "Doamin selective invalidations" wipes out the IOMMU address translation cache based on domain ID where as "Page selective invalidations" wipes out the IOMMU address translation cache for that address mask range which is more cache friendly when compared to Domain selective invalidations. Here is how it is done. 1) changes to iova.c alloc_iova() now takes a bool size_aligned argument, which when when set, returns the io virtual address that is naturally aligned to 2 ^ x, where x is the order of the size requested. Returning this io vitual address which is naturally aligned helps iommu to do the "page selective invalidations" which is IOMMU cache friendly over "domain selective invalidations". 2) Changes to driver/pci/intel-iommu.c Clean up intel_{map/unmap}_{single/sg} () calls so that s/g map/unamp calls is no more dependent on intel_{map/unmap}_single() intel_map_sg() now computes the total DMA virtual address required and allocates the size aligned total DMA virtual address and maps the discontiguous physical address to the allocated contiguous DMA virtual address. In the intel_unmap_sg() case since the DMA virtual address is contiguous and size_aligned, PageSelectiveInvalidation is used replacing earlier DomainSelectiveInvalidations. Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Cc: Greg KH <greg@kroah.com> Cc: Ashok Raj <ashok.raj@intel.com> Cc: Suresh B <suresh.b.siddha@intel.com> Cc: Andi Kleen <ak@suse.de> Cc: Arjan van de Ven <arjan@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/pci/iova.c')
-rw-r--r--drivers/pci/iova.c63
1 files changed, 50 insertions, 13 deletions
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 717fafa..a84571c 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -57,12 +57,28 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
iovad->cached32_node = rb_next(&free->node);
}
-static int __alloc_iova_range(struct iova_domain *iovad,
- unsigned long size, unsigned long limit_pfn, struct iova *new)
+/* Computes the padding size required, to make the
+ * the start address naturally aligned on its size
+ */
+static int
+iova_get_pad_size(int size, unsigned int limit_pfn)
+{
+ unsigned int pad_size = 0;
+ unsigned int order = ilog2(size);
+
+ if (order)
+ pad_size = (limit_pfn + 1) % (1 << order);
+
+ return pad_size;
+}
+
+static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn, struct iova *new, bool size_aligned)
{
struct rb_node *curr = NULL;
unsigned long flags;
unsigned long saved_pfn;
+ unsigned int pad_size = 0;
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
@@ -72,22 +88,32 @@ static int __alloc_iova_range(struct iova_domain *iovad,
struct iova *curr_iova = container_of(curr, struct iova, node);
if (limit_pfn < curr_iova->pfn_lo)
goto move_left;
- if (limit_pfn < curr_iova->pfn_hi)
+ else if (limit_pfn < curr_iova->pfn_hi)
goto adjust_limit_pfn;
- if ((curr_iova->pfn_hi + size) <= limit_pfn)
- break; /* found a free slot */
+ else {
+ if (size_aligned)
+ pad_size = iova_get_pad_size(size, limit_pfn);
+ if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
+ break; /* found a free slot */
+ }
adjust_limit_pfn:
limit_pfn = curr_iova->pfn_lo - 1;
move_left:
curr = rb_prev(curr);
}
- if ((!curr) && !(IOVA_START_PFN + size <= limit_pfn)) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return -ENOMEM;
+ if (!curr) {
+ if (size_aligned)
+ pad_size = iova_get_pad_size(size, limit_pfn);
+ if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
+ }
}
- new->pfn_hi = limit_pfn;
- new->pfn_lo = limit_pfn - size + 1;
+
+ /* pfn_lo will point to size aligned address if size_aligned is set */
+ new->pfn_lo = limit_pfn - (size + pad_size) + 1;
+ new->pfn_hi = new->pfn_lo + size - 1;
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return 0;
@@ -119,12 +145,16 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
* @iovad - iova domain in question
* @size - size of page frames to allocate
* @limit_pfn - max limit address
+ * @size_aligned - set if size_aligned address range is required
* This function allocates an iova in the range limit_pfn to IOVA_START_PFN
- * looking from limit_pfn instead from IOVA_START_PFN.
+ * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
+ * flag is set then the allocated address iova->pfn_lo will be naturally
+ * aligned on roundup_power_of_two(size).
*/
struct iova *
alloc_iova(struct iova_domain *iovad, unsigned long size,
- unsigned long limit_pfn)
+ unsigned long limit_pfn,
+ bool size_aligned)
{
unsigned long flags;
struct iova *new_iova;
@@ -134,8 +164,15 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (!new_iova)
return NULL;
+ /* If size aligned is set then round the size to
+ * to next power of two.
+ */
+ if (size_aligned)
+ size = __roundup_pow_of_two(size);
+
spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
- ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova);
+ ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
+ size_aligned);
if (ret) {
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
OpenPOWER on IntegriCloud