summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLaura Abbott <labbott@redhat.com>2016-09-21 15:25:04 -0700
committerWill Deacon <will.deacon@arm.com>2016-09-22 10:17:22 +0100
commitca219452c6b8a6cd1369b6a78b1cf069d0386865 (patch)
treeb2b7de70e9429cf33bbe50698dbbdfa25362fe2e /arch
parent0edfa8391664a4f795c67d0e07480fbe801a0e1d (diff)
downloadop-kernel-dev-ca219452c6b8a6cd1369b6a78b1cf069d0386865.zip
op-kernel-dev-ca219452c6b8a6cd1369b6a78b1cf069d0386865.tar.gz
arm64: Correctly bounds check virt_addr_valid
virt_addr_valid is supposed to return true if and only if virt_to_page returns a valid page structure. The current macro does math on whatever address is given and passes that to pfn_valid to verify. vmalloc and module addresses can happen to generate a pfn that 'happens' to be valid. Fix this by only performing the pfn_valid check on addresses that have the potential to be valid. Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Laura Abbott <labbott@redhat.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/memory.h8
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 31b7322..ba62df8 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -214,7 +214,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#ifndef CONFIG_SPARSEMEM_VMEMMAP
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
@@ -222,11 +222,15 @@ static inline void *phys_to_virt(phys_addr_t x)
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
-#define virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
+#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
+ PHYS_OFFSET) >> PAGE_SHIFT)
#endif
#endif
+#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
+#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
+ _virt_addr_valid(kaddr))
+
#include <asm-generic/memory_model.h>
#endif
OpenPOWER on IntegriCloud