diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2009-06-28 10:53:37 +0100 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-06-29 13:33:11 +0100 |
commit | 1c5a46ed49e37f56f8aa9000bb1c2ac59670c372 (patch) | |
tree | 70d61d1fdb94824e000a12a7ea086281074823bc /drivers | |
parent | b026fd28ea23af24a3eea6e5be3f3d0193a8e87d (diff) | |
download | op-kernel-dev-1c5a46ed49e37f56f8aa9000bb1c2ac59670c372.zip op-kernel-dev-1c5a46ed49e37f56f8aa9000bb1c2ac59670c372.tar.gz |
intel-iommu: Clean up address handling in domain_page_mapping()
No more masking and alignment; just use pfns.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/pci/intel-iommu.c | 16 |
1 files changed, 7 insertions, 9 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index dfbabd1..f08d786 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -1647,20 +1647,18 @@ static int domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, u64 hpa, size_t size, int prot) { - u64 start_pfn, end_pfn; + unsigned long start_pfn = hpa >> VTD_PAGE_SHIFT; + unsigned long last_pfn = (hpa + size - 1) >> VTD_PAGE_SHIFT; struct dma_pte *pte; - int index; - int addr_width = agaw_to_width(domain->agaw); + int index = 0; + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - BUG_ON(hpa >> addr_width); + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) return -EINVAL; - iova &= PAGE_MASK; - start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; - end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; - index = 0; - while (start_pfn < end_pfn) { + + while (start_pfn <= last_pfn) { pte = pfn_to_dma_pte(domain, (iova >> VTD_PAGE_SHIFT) + index); if (!pte) return -ENOMEM; |