summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-07-14 21:21:17 +0000
committeralc <alc@FreeBSD.org>2007-07-14 21:21:17 +0000
commitdc39b85c98a3f1fda07d9e48a4a05717dc0556a4 (patch)
tree7c05eb9c9b736b3a1d9bc05dcb13efd1b7d91d99 /sys
parentdef1240ba27369b4e457dc3a8adf7d667aaa3744 (diff)
downloadFreeBSD-src-dc39b85c98a3f1fda07d9e48a4a05717dc0556a4.zip
FreeBSD-src-dc39b85c98a3f1fda07d9e48a4a05717dc0556a4.tar.gz
Eliminate two unused functions: vm_phys_alloc_pages() and
vm_phys_free_pages(). Rename vm_phys_alloc_pages_locked() to vm_phys_alloc_pages() and vm_phys_free_pages_locked() to vm_phys_free_pages(). Add comments regarding the need for the free page queues lock to be held by callers to these functions. No functional changes. Approved by: re (hrs)
Diffstat (limited to 'sys')
-rw-r--r--sys/vm/vm_page.c8
-rw-r--r--sys/vm/vm_phys.c47
-rw-r--r--sys/vm/vm_phys.h2
3 files changed, 17 insertions, 40 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 947e5d4..750e295 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -887,7 +887,7 @@ loop:
* Allocate from the free queue if the number of free pages
* exceeds the minimum for the request class.
*/
- m = vm_phys_alloc_pages_locked(object != NULL ?
+ m = vm_phys_alloc_pages(object != NULL ?
VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
} else if (page_req != VM_ALLOC_INTERRUPT) {
mtx_unlock(&vm_page_queue_free_mtx);
@@ -913,7 +913,7 @@ loop:
mtx_unlock(&vm_page_queue_free_mtx);
return (NULL);
}
- m = vm_phys_alloc_pages_locked(object != NULL ?
+ m = vm_phys_alloc_pages(object != NULL ?
VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
} else {
vm_page_unlock_queues();
@@ -1161,10 +1161,10 @@ vm_page_free_toq(vm_page_t m)
m->flags |= PG_FREE;
mtx_lock(&vm_page_queue_free_mtx);
if ((m->flags & PG_ZERO) != 0) {
- vm_phys_free_pages_locked(m, 0);
+ vm_phys_free_pages(m, 0);
++vm_page_zero_count;
} else {
- vm_phys_free_pages_locked(m, 0);
+ vm_phys_free_pages(m, 0);
vm_page_zero_idle_wakeup();
}
vm_page_free_wakeup();
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index f2f66f4..81d597c 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -285,30 +285,19 @@ vm_phys_add_page(vm_paddr_t pa)
m, m->order));
m->pool = VM_FREEPOOL_DEFAULT;
pmap_page_init(m);
- vm_phys_free_pages(m, 0);
-}
-
-/*
- * Allocate a contiguous, power of two-sized set of physical pages
- * from the free lists.
- */
-vm_page_t
-vm_phys_alloc_pages(int pool, int order)
-{
- vm_page_t m;
-
mtx_lock(&vm_page_queue_free_mtx);
- m = vm_phys_alloc_pages_locked(pool, order);
+ vm_phys_free_pages(m, 0);
mtx_unlock(&vm_page_queue_free_mtx);
- return (m);
}
/*
* Allocate a contiguous, power of two-sized set of physical pages
* from the free lists.
+ *
+ * The free page queues must be locked.
*/
vm_page_t
-vm_phys_alloc_pages_locked(int pool, int order)
+vm_phys_alloc_pages(int pool, int order)
{
struct vm_freelist *fl;
struct vm_freelist *alt;
@@ -316,9 +305,9 @@ vm_phys_alloc_pages_locked(int pool, int order)
vm_page_t m;
KASSERT(pool < VM_NFREEPOOL,
- ("vm_phys_alloc_pages_locked: pool %d is out of range", pool));
+ ("vm_phys_alloc_pages: pool %d is out of range", pool));
KASSERT(order < VM_NFREEORDER,
- ("vm_phys_alloc_pages_locked: order %d is out of range", order));
+ ("vm_phys_alloc_pages: order %d is out of range", order));
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
for (flind = 0; flind < vm_nfreelists; flind++) {
fl = vm_phys_free_queues[flind][pool];
@@ -417,35 +406,25 @@ vm_phys_paddr_to_segind(vm_paddr_t pa)
/*
* Free a contiguous, power of two-sized set of physical pages.
+ *
+ * The free page queues must be locked.
*/
void
vm_phys_free_pages(vm_page_t m, int order)
{
-
- mtx_lock(&vm_page_queue_free_mtx);
- vm_phys_free_pages_locked(m, order);
- mtx_unlock(&vm_page_queue_free_mtx);
-}
-
-/*
- * Free a contiguous, power of two-sized set of physical pages.
- */
-void
-vm_phys_free_pages_locked(vm_page_t m, int order)
-{
struct vm_freelist *fl;
struct vm_phys_seg *seg;
vm_paddr_t pa, pa_buddy;
vm_page_t m_buddy;
KASSERT(m->order == VM_NFREEORDER,
- ("vm_phys_free_pages_locked: page %p has unexpected order %d",
+ ("vm_phys_free_pages: page %p has unexpected order %d",
m, m->order));
KASSERT(m->pool < VM_NFREEPOOL,
- ("vm_phys_free_pages_locked: page %p has unexpected pool %d",
+ ("vm_phys_free_pages: page %p has unexpected pool %d",
m, m->pool));
KASSERT(order < VM_NFREEORDER,
- ("vm_phys_free_pages_locked: order %d is out of range", order));
+ ("vm_phys_free_pages: order %d is out of range", order));
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
pa = VM_PAGE_TO_PHYS(m);
seg = &vm_phys_segs[m->segind];
@@ -518,7 +497,7 @@ vm_phys_zero_pages_idle(void)
}
cnt_prezero += zeroed;
mtx_lock(&vm_page_queue_free_mtx);
- vm_phys_free_pages_locked(m, q);
+ vm_phys_free_pages(m, q);
vm_page_zero_count += zeroed;
return (TRUE);
}
@@ -649,7 +628,7 @@ done:
KASSERT(m->order == VM_NFREEORDER,
("vm_phys_alloc_contig: page %p has unexpected order %d",
m, m->order));
- vm_phys_free_pages_locked(m, 0);
+ vm_phys_free_pages(m, 0);
}
mtx_unlock(&vm_page_queue_free_mtx);
return (m_ret);
diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h
index 4d34ccf..0debc01 100644
--- a/sys/vm/vm_phys.h
+++ b/sys/vm/vm_phys.h
@@ -42,10 +42,8 @@ vm_page_t vm_phys_alloc_contig(unsigned long npages,
vm_paddr_t low, vm_paddr_t high,
unsigned long alignment, unsigned long boundary);
vm_page_t vm_phys_alloc_pages(int pool, int order);
-vm_page_t vm_phys_alloc_pages_locked(int pool, int order);
vm_paddr_t vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment);
void vm_phys_free_pages(vm_page_t m, int order);
-void vm_phys_free_pages_locked(vm_page_t m, int order);
void vm_phys_init(void);
boolean_t vm_phys_zero_pages_idle(void);
OpenPOWER on IntegriCloud