summaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-01-06 14:40:44 +1100
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2009-01-06 14:40:44 +1100
commitd2859751cd0bf586941ffa7308635a293f943c17 (patch)
tree24f5f4ba78bf3722609e20a9346976226b95878a /fs/xfs
parent195ec037ff8f6fa800616e0dad8d57a98b6fb37e (diff)
downloadop-kernel-dev-d2859751cd0bf586941ffa7308635a293f943c17.zip
op-kernel-dev-d2859751cd0bf586941ffa7308635a293f943c17.tar.gz
[XFS] remove old vmap cache
XFS's vmap batching simply defers a number (up to 64) of vunmaps, and keeps track of them in a list. To purge the batch, it just goes through the list and calls vunamp on each one. This is pretty poor: a global TLB flush is generally still performed on each vunmap, with the most expensive parts of the operation being the broadcast IPIs and locking involved in the SMP callouts, and the locking involved in the vmap management -- none of these are avoided by just batching up the calls. I'm actually surprised it ever made much difference. (Now that the lazy vmap allocator is upstream, this description is not quite right, but the vunmap batching still doesn't seem to do much) Rip all this logic out of XFS completely. I will improve vmap performance and scalability directly in subsequent patch. Signed-off-by: Nick Piggin <npiggin@suse.de> Reviewed-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c75
1 files changed, 1 insertions, 74 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index cb329ed..0b2177a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -166,75 +166,6 @@ test_page_region(
}
/*
- * Mapping of multi-page buffers into contiguous virtual space
- */
-
-typedef struct a_list {
- void *vm_addr;
- struct a_list *next;
-} a_list_t;
-
-static a_list_t *as_free_head;
-static int as_list_len;
-static DEFINE_SPINLOCK(as_lock);
-
-/*
- * Try to batch vunmaps because they are costly.
- */
-STATIC void
-free_address(
- void *addr)
-{
- a_list_t *aentry;
-
-#ifdef CONFIG_XEN
- /*
- * Xen needs to be able to make sure it can get an exclusive
- * RO mapping of pages it wants to turn into a pagetable. If
- * a newly allocated page is also still being vmap()ed by xfs,
- * it will cause pagetable construction to fail. This is a
- * quick workaround to always eagerly unmap pages so that Xen
- * is happy.
- */
- vunmap(addr);
- return;
-#endif
-
- aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
- if (likely(aentry)) {
- spin_lock(&as_lock);
- aentry->next = as_free_head;
- aentry->vm_addr = addr;
- as_free_head = aentry;
- as_list_len++;
- spin_unlock(&as_lock);
- } else {
- vunmap(addr);
- }
-}
-
-STATIC void
-purge_addresses(void)
-{
- a_list_t *aentry, *old;
-
- if (as_free_head == NULL)
- return;
-
- spin_lock(&as_lock);
- aentry = as_free_head;
- as_free_head = NULL;
- as_list_len = 0;
- spin_unlock(&as_lock);
-
- while ((old = aentry) != NULL) {
- vunmap(aentry->vm_addr);
- aentry = aentry->next;
- kfree(old);
- }
-}
-
-/*
* Internal xfs_buf_t object manipulation
*/
@@ -333,7 +264,7 @@ xfs_buf_free(
uint i;
if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
- free_address(bp->b_addr - bp->b_offset);
+ vunmap(bp->b_addr - bp->b_offset);
for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i];
@@ -455,8 +386,6 @@ _xfs_buf_map_pages(
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) {
- if (as_list_len > 64)
- purge_addresses();
bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
VM_MAP, PAGE_KERNEL);
if (unlikely(bp->b_addr == NULL))
@@ -1743,8 +1672,6 @@ xfsbufd(
count++;
}
- if (as_list_len > 0)
- purge_addresses();
if (count)
blk_run_address_space(target->bt_mapping);
OpenPOWER on IntegriCloud