summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-03-12 17:03:48 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-03-12 23:09:09 +0000
commit1522ac3ec95ff0230e7aa516f86b674fdf72866c (patch)
tree77444039536e70b3e9fbb38f686104cb5054aba3 /arch
parent305b07680f6c6a7e59f996c5bd85f009caff5bb1 (diff)
downloadop-kernel-dev-1522ac3ec95ff0230e7aa516f86b674fdf72866c.zip
op-kernel-dev-1522ac3ec95ff0230e7aa516f86b674fdf72866c.tar.gz
[ARM] Fix virtual to physical translation macro corner cases
The current use of these macros works well when the conversion is entirely linear. In this case, we can be assured that the following holds true: __va(p + s) - s = __va(p) However, this is not always the case, especially when there is a non-linear conversion (eg, when there is a 3.5GB hole in memory.) In this case, if 's' is the size of the region (eg, PAGE_SIZE) and 'p' is the final page, the above is most definitely not true. So, we must ensure that __va() and __pa() are only used with valid kernel direct mapped RAM addresses. This patch tweaks the code to achieve this. Tested-by: Charles Moschel <fred99@carolina.rr.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mmap.c2
3 files changed, 14 insertions, 10 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 310e479..f1ef561 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -490,26 +490,30 @@ core_initcall(consistent_init);
*/
void dma_cache_maint(const void *start, size_t size, int direction)
{
- const void *end = start + size;
+ void (*inner_op)(const void *, const void *);
+ void (*outer_op)(unsigned long, unsigned long);
- BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1));
+ BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
- dmac_inv_range(start, end);
- outer_inv_range(__pa(start), __pa(end));
+ inner_op = dmac_inv_range;
+ outer_op = outer_inv_range;
break;
case DMA_TO_DEVICE: /* writeback only */
- dmac_clean_range(start, end);
- outer_clean_range(__pa(start), __pa(end));
+ inner_op = dmac_clean_range;
+ outer_op = outer_clean_range;
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- dmac_flush_range(start, end);
- outer_flush_range(__pa(start), __pa(end));
+ inner_op = dmac_flush_range;
+ outer_op = outer_flush_range;
break;
default:
BUG();
}
+
+ inner_op(start, start + size);
+ outer_op(__pa(start), __pa(start) + size);
}
EXPORT_SYMBOL(dma_cache_maint);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 34df4d9..80fd3b6 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -382,7 +382,7 @@ void __init bootmem_init(void)
for_each_node(node)
bootmem_free_node(node, mi);
- high_memory = __va(memend_pfn << PAGE_SHIFT);
+ high_memory = __va((memend_pfn << PAGE_SHIFT) - 1) + 1;
/*
* This doesn't seem to be used by the Linux memory manager any
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 5358fcc..f7457fe 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -124,7 +124,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size)
{
if (addr < PHYS_OFFSET)
return 0;
- if (addr + size > __pa(high_memory))
+ if (addr + size >= __pa(high_memory - 1))
return 0;
return 1;
OpenPOWER on IntegriCloud