diff options
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 115 |
1 files changed, 57 insertions, 58 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 550a193..1bfa706b 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -32,6 +32,7 @@ #include <linux/dma-mapping.h> #include <linux/bitops.h> #include <linux/iommu-helper.h> +#include <linux/crash_dump.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/iommu.h> @@ -51,17 +52,6 @@ static int protect4gb = 1; static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); -static inline unsigned long iommu_num_pages(unsigned long vaddr, - unsigned long slen) -{ - unsigned long npages; - - npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK); - npages >>= IOMMU_PAGE_SHIFT; - - return npages; -} - static int __init setup_protect4gb(char *str) { if (strcmp(str, "on") == 0) @@ -325,7 +315,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, } /* Allocate iommu entries for that segment */ vaddr = (unsigned long) sg_virt(s); - npages = iommu_num_pages(vaddr, slen); + npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); align = 0; if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && (vaddr & ~PAGE_MASK) == 0) @@ -418,7 +408,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, unsigned long vaddr, npages; vaddr = s->dma_address & IOMMU_PAGE_MASK; - npages = iommu_num_pages(s->dma_address, s->dma_length); + npages = iommu_num_pages(s->dma_address, s->dma_length, + IOMMU_PAGE_SIZE); __iommu_free(tbl, vaddr, npages); s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; @@ -452,7 +443,8 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, if (sg->dma_length == 0) break; - npages = iommu_num_pages(dma_handle, sg->dma_length); + npages = iommu_num_pages(dma_handle, sg->dma_length, + IOMMU_PAGE_SIZE); __iommu_free(tbl, dma_handle, npages); sg = sg_next(sg); } @@ -467,6 +459,42 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, spin_unlock_irqrestore(&(tbl->it_lock), flags); } +static void iommu_table_clear(struct iommu_table *tbl) +{ + if (!is_kdump_kernel()) { + /* Clear the table in case firmware left allocations in it */ + ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); + return; + } + +#ifdef CONFIG_CRASH_DUMP + if (ppc_md.tce_get) { + unsigned long index, tceval, tcecount = 0; + + /* Reserve the existing mappings left by the first kernel. */ + for (index = 0; index < tbl->it_size; index++) { + tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); + /* + * Freed TCE entry contains 0x7fffffffffffffff on JS20 + */ + if (tceval && (tceval != 0x7fffffffffffffffUL)) { + __set_bit(index, tbl->it_map); + tcecount++; + } + } + + if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { + printk(KERN_WARNING "TCE table is full; freeing "); + printk(KERN_WARNING "%d entries for the kdump boot\n", + KDUMP_MIN_TCE_ENTRIES); + for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; + index < tbl->it_size; index++) + __clear_bit(index, tbl->it_map); + } + } +#endif +} + /* * Build a iommu_table structure. This contains a bit map which * is used to manage allocation of the tce space. @@ -493,38 +521,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) tbl->it_largehint = tbl->it_halfpoint; spin_lock_init(&tbl->it_lock); -#ifdef CONFIG_CRASH_DUMP - if (ppc_md.tce_get) { - unsigned long index; - unsigned long tceval; - unsigned long tcecount = 0; - - /* - * Reserve the existing mappings left by the first kernel. - */ - for (index = 0; index < tbl->it_size; index++) { - tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); - /* - * Freed TCE entry contains 0x7fffffffffffffff on JS20 - */ - if (tceval && (tceval != 0x7fffffffffffffffUL)) { - __set_bit(index, tbl->it_map); - tcecount++; - } - } - if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { - printk(KERN_WARNING "TCE table is full; "); - printk(KERN_WARNING "freeing %d entries for the kdump boot\n", - KDUMP_MIN_TCE_ENTRIES); - for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; - index < tbl->it_size; index++) - __clear_bit(index, tbl->it_map); - } - } -#else - /* Clear the hardware table in case firmware left allocations in it */ - ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); -#endif + iommu_table_clear(tbl); if (!welcomed) { printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", @@ -568,23 +565,25 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) } /* Creates TCEs for a user provided buffer. The user buffer must be - * contiguous real kernel storage (not vmalloc). The address of the buffer - * passed here is the kernel (virtual) address of the buffer. The buffer - * need not be page aligned, the dma_addr_t returned will point to the same - * byte within the page as vaddr. + * contiguous real kernel storage (not vmalloc). The address passed here + * comprises a page address and offset into that page. The dma_addr_t + * returned will point to the same byte within the page as was passed in. */ -dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, - void *vaddr, size_t size, unsigned long mask, - enum dma_data_direction direction, struct dma_attrs *attrs) +dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, + struct page *page, unsigned long offset, size_t size, + unsigned long mask, enum dma_data_direction direction, + struct dma_attrs *attrs) { dma_addr_t dma_handle = DMA_ERROR_CODE; + void *vaddr; unsigned long uaddr; unsigned int npages, align; BUG_ON(direction == DMA_NONE); + vaddr = page_address(page) + offset; uaddr = (unsigned long)vaddr; - npages = iommu_num_pages(uaddr, size); + npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); if (tbl) { align = 0; @@ -608,16 +607,16 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl, return dma_handle; } -void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, - size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) +void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, + size_t size, enum dma_data_direction direction, + struct dma_attrs *attrs) { unsigned int npages; BUG_ON(direction == DMA_NONE); if (tbl) { - npages = iommu_num_pages(dma_handle, size); + npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); iommu_free(tbl, dma_handle, npages); } } |