summaryrefslogtreecommitdiffstats
path: root/arch/um/kernel/tlb.c
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2005-09-03 15:57:36 -0700
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 00:06:22 -0700
commitc56004901fa5dcf55f92318f192ab3c0e87db2d1 (patch)
treeac53ded16ab9886ce05d4b2d424dfed80dce9e57 /arch/um/kernel/tlb.c
parent77fa5adcda6d686d2f45a2b55dcb9a03e7d33fa1 (diff)
downloadop-kernel-dev-c56004901fa5dcf55f92318f192ab3c0e87db2d1.zip
op-kernel-dev-c56004901fa5dcf55f92318f192ab3c0e87db2d1.tar.gz
[PATCH] uml: TLB operation batching
This adds VM op batching to skas0. Rather than having a context switch to and from the userspace stub for each address space change, we write a number of operations to the stub data page and invoke a different stub which loops over them and executes them all in one go. The operations are stored as [ system call number, arg1, arg2, ... ] tuples. The set is terminated by a system call number of 0. Single operations, i.e. page faults, are handled in the old way, since that is slightly more efficient. For a kernel build, a minority (~1/4) of the operations are part of a set. These sets averaged ~100 in length, so for this quarter, the context switching overhead is greatly reduced. Signed-off-by: Jeff Dike <jdike@addtoit.com> Cc: Paolo Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/kernel/tlb.c')
-rw-r--r--arch/um/kernel/tlb.c226
1 files changed, 116 insertions, 110 deletions
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 83ec8d47..7d914bb 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -15,12 +15,116 @@
#include "mem_user.h"
#include "os.h"
+static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
+ int r, int w, int x, struct host_vm_op *ops, int index,
+ int last_filled, union mm_context *mmu, void **flush,
+ void *(*do_ops)(union mm_context *, struct host_vm_op *,
+ int, int, void *))
+{
+ __u64 offset;
+ struct host_vm_op *last;
+ int fd;
+
+ fd = phys_mapping(phys, &offset);
+ if(index != -1){
+ last = &ops[index];
+ if((last->type == MMAP) &&
+ (last->u.mmap.addr + last->u.mmap.len == virt) &&
+ (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
+ (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
+ (last->u.mmap.offset + last->u.mmap.len == offset)){
+ last->u.mmap.len += len;
+ return index;
+ }
+ }
+
+ if(index == last_filled){
+ *flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
+ index = -1;
+ }
+
+ ops[++index] = ((struct host_vm_op) { .type = MMAP,
+ .u = { .mmap = {
+ .addr = virt,
+ .len = len,
+ .r = r,
+ .w = w,
+ .x = x,
+ .fd = fd,
+ .offset = offset }
+ } });
+ return index;
+}
+
+static int add_munmap(unsigned long addr, unsigned long len,
+ struct host_vm_op *ops, int index, int last_filled,
+ union mm_context *mmu, void **flush,
+ void *(*do_ops)(union mm_context *, struct host_vm_op *,
+ int, int, void *))
+{
+ struct host_vm_op *last;
+
+ if(index != -1){
+ last = &ops[index];
+ if((last->type == MUNMAP) &&
+ (last->u.munmap.addr + last->u.mmap.len == addr)){
+ last->u.munmap.len += len;
+ return index;
+ }
+ }
+
+ if(index == last_filled){
+ *flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
+ index = -1;
+ }
+
+ ops[++index] = ((struct host_vm_op) { .type = MUNMAP,
+ .u = { .munmap = {
+ .addr = addr,
+ .len = len } } });
+ return index;
+}
+
+static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
+ int x, struct host_vm_op *ops, int index,
+ int last_filled, union mm_context *mmu, void **flush,
+ void *(*do_ops)(union mm_context *,
+ struct host_vm_op *, int, int, void *))
+{
+ struct host_vm_op *last;
+
+ if(index != -1){
+ last = &ops[index];
+ if((last->type == MPROTECT) &&
+ (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
+ (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
+ (last->u.mprotect.x == x)){
+ last->u.mprotect.len += len;
+ return index;
+ }
+ }
+
+ if(index == last_filled){
+ *flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
+ index = -1;
+ }
+
+ ops[++index] = ((struct host_vm_op) { .type = MPROTECT,
+ .u = { .mprotect = {
+ .addr = addr,
+ .len = len,
+ .r = r,
+ .w = w,
+ .x = x } } });
+ return index;
+}
+
#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force,
- void (*do_ops)(union mm_context *, struct host_vm_op *,
- int))
+ void *(*do_ops)(union mm_context *, struct host_vm_op *,
+ int, int, void *))
{
pgd_t *npgd;
pud_t *npud;
@@ -29,11 +133,13 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
union mm_context *mmu = &mm->context;
unsigned long addr, end;
int r, w, x;
- struct host_vm_op ops[16];
+ struct host_vm_op ops[1];
+ void *flush = NULL;
int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
if(mm == NULL) return;
+ ops[0].type = NONE;
for(addr = start_addr; addr < end_addr;){
npgd = pgd_offset(mm, addr);
if(!pgd_present(*npgd)){
@@ -43,7 +149,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
if(force || pgd_newpage(*npgd)){
op_index = add_munmap(addr, end - addr, ops,
op_index, last_op, mmu,
- do_ops);
+ &flush, do_ops);
pgd_mkuptodate(*npgd);
}
addr = end;
@@ -58,7 +164,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
if(force || pud_newpage(*npud)){
op_index = add_munmap(addr, end - addr, ops,
op_index, last_op, mmu,
- do_ops);
+ &flush, do_ops);
pud_mkuptodate(*npud);
}
addr = end;
@@ -73,7 +179,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
if(force || pmd_newpage(*npmd)){
op_index = add_munmap(addr, end - addr, ops,
op_index, last_op, mmu,
- do_ops);
+ &flush, do_ops);
pmd_mkuptodate(*npmd);
}
addr = end;
@@ -96,20 +202,20 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
pte_val(*npte) & PAGE_MASK,
PAGE_SIZE, r, w, x, ops,
op_index, last_op, mmu,
- do_ops);
+ &flush, do_ops);
else op_index = add_munmap(addr, PAGE_SIZE, ops,
op_index, last_op, mmu,
- do_ops);
+ &flush, do_ops);
}
else if(pte_newprot(*npte))
op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
op_index, last_op, mmu,
- do_ops);
+ &flush, do_ops);
*npte = pte_mkuptodate(*npte);
addr += PAGE_SIZE;
}
- (*do_ops)(mmu, ops, op_index);
+ flush = (*do_ops)(mmu, ops, op_index, 1, flush);
}
int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
@@ -226,106 +332,6 @@ pte_t *addr_pte(struct task_struct *task, unsigned long addr)
return(pte_offset_map(pmd, addr));
}
-int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
- int r, int w, int x, struct host_vm_op *ops, int index,
- int last_filled, union mm_context *mmu,
- void (*do_ops)(union mm_context *, struct host_vm_op *, int))
-{
- __u64 offset;
- struct host_vm_op *last;
- int fd;
-
- fd = phys_mapping(phys, &offset);
- if(index != -1){
- last = &ops[index];
- if((last->type == MMAP) &&
- (last->u.mmap.addr + last->u.mmap.len == virt) &&
- (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
- (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
- (last->u.mmap.offset + last->u.mmap.len == offset)){
- last->u.mmap.len += len;
- return(index);
- }
- }
-
- if(index == last_filled){
- (*do_ops)(mmu, ops, last_filled);
- index = -1;
- }
-
- ops[++index] = ((struct host_vm_op) { .type = MMAP,
- .u = { .mmap = {
- .addr = virt,
- .len = len,
- .r = r,
- .w = w,
- .x = x,
- .fd = fd,
- .offset = offset }
- } });
- return(index);
-}
-
-int add_munmap(unsigned long addr, unsigned long len, struct host_vm_op *ops,
- int index, int last_filled, union mm_context *mmu,
- void (*do_ops)(union mm_context *, struct host_vm_op *, int))
-{
- struct host_vm_op *last;
-
- if(index != -1){
- last = &ops[index];
- if((last->type == MUNMAP) &&
- (last->u.munmap.addr + last->u.mmap.len == addr)){
- last->u.munmap.len += len;
- return(index);
- }
- }
-
- if(index == last_filled){
- (*do_ops)(mmu, ops, last_filled);
- index = -1;
- }
-
- ops[++index] = ((struct host_vm_op) { .type = MUNMAP,
- .u = { .munmap = {
- .addr = addr,
- .len = len } } });
- return(index);
-}
-
-int add_mprotect(unsigned long addr, unsigned long len, int r, int w, int x,
- struct host_vm_op *ops, int index, int last_filled,
- union mm_context *mmu,
- void (*do_ops)(union mm_context *, struct host_vm_op *, int))
-{
- struct host_vm_op *last;
-
- if(index != -1){
- last = &ops[index];
- if((last->type == MPROTECT) &&
- (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
- (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
- (last->u.mprotect.x == x)){
- last->u.mprotect.len += len;
- return(index);
- }
- }
-
- if(index == last_filled){
- (*do_ops)(mmu, ops, last_filled);
- index = -1;
- }
-
- ops[++index] = ((struct host_vm_op) { .type = MPROTECT,
- .u = { .mprotect = {
- .addr = addr,
- .len = len,
- .r = r,
- .w = w,
- .x = x } } });
- return(index);
-}
-
void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
{
address &= PAGE_MASK;
OpenPOWER on IntegriCloud