diff options
author | Dave Kleikamp <shaggy@linux.vnet.ibm.com> | 2008-07-08 00:28:51 +1000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-07-09 16:30:45 +1000 |
commit | b845f313d78e4e259ec449909e3bbadf77b53a6d (patch) | |
tree | 03239e77dbc43f627ce112963736c8b4c53117e6 /include | |
parent | e5093ff05d36c64e8f36a9ddb26358256dc133ea (diff) | |
download | op-kernel-dev-b845f313d78e4e259ec449909e3bbadf77b53a6d.zip op-kernel-dev-b845f313d78e4e259ec449909e3bbadf77b53a6d.tar.gz |
mm: Allow architectures to define additional protection bits
This patch allows architectures to define functions to deal with
additional protections bits for mmap() and mprotect().
arch_calc_vm_prot_bits() maps additonal protection bits to vm_flags
arch_vm_get_page_prot() maps additional vm_flags to the vma's vm_page_prot
arch_validate_prot() checks for valid values of the protection bits
Note: vm_get_page_prot() is now pretty ugly, but the generated code
should be identical for architectures that don't define additional
protection bits.
Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com>
Acked-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mman.h | 29 |
1 files changed, 28 insertions, 1 deletions
diff --git a/include/linux/mman.h b/include/linux/mman.h index dab8892..30d1073 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -34,6 +34,32 @@ static inline void vm_unacct_memory(long pages) } /* + * Allow architectures to handle additional protection bits + */ + +#ifndef arch_calc_vm_prot_bits +#define arch_calc_vm_prot_bits(prot) 0 +#endif + +#ifndef arch_vm_get_page_prot +#define arch_vm_get_page_prot(vm_flags) __pgprot(0) +#endif + +#ifndef arch_validate_prot +/* + * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have + * already been masked out. + * + * Returns true if the prot flags are valid + */ +static inline int arch_validate_prot(unsigned long prot) +{ + return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; +} +#define arch_validate_prot arch_validate_prot +#endif + +/* * Optimisation macro. It is equivalent to: * (x & bit1) ? bit2 : 0 * but this version is faster. @@ -51,7 +77,8 @@ calc_vm_prot_bits(unsigned long prot) { return _calc_vm_trans(prot, PROT_READ, VM_READ ) | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | - _calc_vm_trans(prot, PROT_EXEC, VM_EXEC ); + _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | + arch_calc_vm_prot_bits(prot); } /* |