summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorrgrimes <rgrimes@FreeBSD.org>1995-05-30 08:16:23 +0000
committerrgrimes <rgrimes@FreeBSD.org>1995-05-30 08:16:23 +0000
commitc86f0c7a71e7ade3e38b325c186a9cf374e0411e (patch)
tree176f04f674860c7cfae9ac5d2ff4d4e1d73cb2b7 /sys/vm
parent423ba8f9bc23d93bfc244aca9b12563b1c9de90d (diff)
downloadFreeBSD-src-c86f0c7a71e7ade3e38b325c186a9cf374e0411e.zip
FreeBSD-src-c86f0c7a71e7ade3e38b325c186a9cf374e0411e.tar.gz
Remove trailing whitespace.
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/device_pager.c4
-rw-r--r--sys/vm/kern_lock.c34
-rw-r--r--sys/vm/swap_pager.c8
-rw-r--r--sys/vm/vm_fault.c26
-rw-r--r--sys/vm/vm_glue.c4
-rw-r--r--sys/vm/vm_kern.c20
-rw-r--r--sys/vm/vm_map.c64
-rw-r--r--sys/vm/vm_mmap.c6
-rw-r--r--sys/vm/vm_object.c38
-rw-r--r--sys/vm/vm_page.c34
-rw-r--r--sys/vm/vm_pageout.c8
-rw-r--r--sys/vm/vm_pageout.h4
-rw-r--r--sys/vm/vm_swap.c4
-rw-r--r--sys/vm/vnode_pager.c6
14 files changed, 130 insertions, 130 deletions
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index 5dad126..8ce0bc16 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)device_pager.c 8.1 (Berkeley) 6/11/93
- * $Id: device_pager.c,v 1.9 1995/05/10 18:56:01 davidg Exp $
+ * $Id: device_pager.c,v 1.10 1995/05/18 02:59:18 davidg Exp $
*/
/*
@@ -142,7 +142,7 @@ dev_pager_alloc(handle, size, prot, foff)
/*
* Check that the specified range of the device allows the desired
* protection.
- *
+ *
* XXX assumes VM_PROT_* == PROT_*
*/
npages = atop(round_page(size));
diff --git a/sys/vm/kern_lock.c b/sys/vm/kern_lock.c
index dbc13ae..30be676 100644
--- a/sys/vm/kern_lock.c
+++ b/sys/vm/kern_lock.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: kern_lock.c,v 1.4 1995/03/01 21:37:37 davidg Exp $
+ * $Id: kern_lock.c,v 1.5 1995/04/16 12:56:12 davidg Exp $
*/
/*
@@ -114,14 +114,14 @@ typedef int *thread_t;
* may only be used for exclusive locks.
*/
-void
+void
simple_lock_init(l)
simple_lock_t l;
{
*(boolean_t *) l = FALSE;
}
-void
+void
simple_lock(l)
simple_lock_t l;
{
@@ -129,14 +129,14 @@ simple_lock(l)
continue;
}
-void
+void
simple_unlock(l)
simple_lock_t l;
{
*(boolean_t *) l = FALSE;
}
-boolean_t
+boolean_t
simple_lock_try(l)
simple_lock_t l;
{
@@ -167,7 +167,7 @@ int lock_wait_time;
* variables and then initialize them, rather
* than getting a new one from this module.
*/
-void
+void
lock_init(l, can_sleep)
lock_t l;
boolean_t can_sleep;
@@ -182,7 +182,7 @@ lock_init(l, can_sleep)
l->recursion_depth = 0;
}
-void
+void
lock_sleepable(l, can_sleep)
lock_t l;
boolean_t can_sleep;
@@ -199,7 +199,7 @@ lock_sleepable(l, can_sleep)
* for the lock. These work on uniprocessor systems.
*/
-void
+void
lock_write(l)
register lock_t l;
{
@@ -252,7 +252,7 @@ lock_write(l)
simple_unlock(&l->interlock);
}
-void
+void
lock_done(l)
register lock_t l;
{
@@ -274,7 +274,7 @@ lock_done(l)
simple_unlock(&l->interlock);
}
-void
+void
lock_read(l)
register lock_t l;
{
@@ -318,7 +318,7 @@ lock_read(l)
*
* Returns TRUE if the upgrade *failed*.
*/
-boolean_t
+boolean_t
lock_read_to_write(l)
register lock_t l;
{
@@ -368,7 +368,7 @@ lock_read_to_write(l)
return (FALSE);
}
-void
+void
lock_write_to_read(l)
register lock_t l;
{
@@ -398,7 +398,7 @@ lock_write_to_read(l)
* Returns FALSE if the lock is not held on return.
*/
-boolean_t
+boolean_t
lock_try_write(l)
register lock_t l;
{
@@ -437,7 +437,7 @@ lock_try_write(l)
* Returns FALSE if the lock is not held on return.
*/
-boolean_t
+boolean_t
lock_try_read(l)
register lock_t l;
{
@@ -470,7 +470,7 @@ lock_try_read(l)
*
* Returns FALSE if the upgrade *failed*.
*/
-boolean_t
+boolean_t
lock_try_read_to_write(l)
register lock_t l;
{
@@ -507,7 +507,7 @@ lock_try_read_to_write(l)
* Allow a process that has a lock for write to acquire it
* recursively (for read, write, or update).
*/
-void
+void
lock_set_recursive(l)
lock_t l;
{
@@ -522,7 +522,7 @@ lock_set_recursive(l)
/*
* Prevent a lock from being re-acquired.
*/
-void
+void
lock_clear_recursive(l)
lock_t l;
{
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index e672bb6..16ec7bb 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
- * $Id: swap_pager.c,v 1.39 1995/05/14 03:00:08 davidg Exp $
+ * $Id: swap_pager.c,v 1.40 1995/05/18 02:59:20 davidg Exp $
*/
/*
@@ -135,7 +135,7 @@ int require_swap_init;
void swap_pager_finish();
int dmmin, dmmax;
-static inline void
+static inline void
swapsizecheck()
{
if (vm_swap_size < 128 * btodb(PAGE_SIZE)) {
@@ -793,7 +793,7 @@ swap_pager_putpage(pager, m, sync)
}
static inline int
-const
+const
swap_pager_block_index(swp, offset)
sw_pager_t swp;
vm_offset_t offset;
@@ -802,7 +802,7 @@ swap_pager_block_index(swp, offset)
}
static inline int
-const
+const
swap_pager_block_offset(swp, offset)
sw_pager_t swp;
vm_offset_t offset;
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index aba9ecf..c061c3e 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_fault.c,v 1.23 1995/04/16 14:12:12 davidg Exp $
+ * $Id: vm_fault.c,v 1.24 1995/05/18 02:59:22 davidg Exp $
*/
/*
@@ -221,27 +221,27 @@ RetryFault:;
/*
* INVARIANTS (through entire routine):
- *
+ *
* 1) At all times, we must either have the object lock or a busy
* page in some object to prevent some other thread from trying to
* bring in the same page.
- *
+ *
* Note that we cannot hold any locks during the pager access or when
* waiting for memory, so we use a busy page then.
- *
+ *
* Note also that we aren't as concerned about more than one thead
* attempting to pager_data_unlock the same page at once, so we don't
* hold the page as busy then, but do record the highest unlock value
* so far. [Unlock requests may also be delivered out of order.]
- *
+ *
* 2) Once we have a busy page, we must remove it from the pageout
* queues, so that the pageout daemon will not grab it away.
- *
+ *
* 3) To prevent another thread from racing us down the shadow chain
* and entering a new page in the top object before we do, we must
* keep a busy page in the top object while following the shadow
* chain.
- *
+ *
* 4) We must increment paging_in_progress on any object for which
* we have a busy page, to prevent vm_object_collapse from removing
* the busy page without our noticing.
@@ -381,7 +381,7 @@ readrest:
UNLOCK_AND_DEALLOCATE;
goto RetryFault;
}
-
+
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
m->valid = VM_PAGE_BITS_ALL;
hardfault++;
@@ -391,10 +391,10 @@ readrest:
* Remove the bogus page (which does not exist at this
* object/offset); before doing so, we must get back
* our object lock to preserve our invariant.
- *
+ *
* Also wake up any other thread that may want to bring
* in this page.
- *
+ *
* If this is the top-level object, we must leave the
* busy page to prevent another thread from rushing
* past us, and inserting the page in that object at
@@ -501,11 +501,11 @@ readrest:
* object locked). We can't unlock the bottom object,
* because the page we found may move (by collapse) if
* we do.
- *
+ *
* Instead, we first copy the page. Then, when we have
* no more use for the bottom object, we unlock it and
* try to collapse.
- *
+ *
* Note that we copy the page even if we didn't need
* to... that's the breaks.
*/
@@ -523,7 +523,7 @@ readrest:
* we have to flush all uses of the original page,
* since we can't distinguish those which want the
* original from those which need the new copy.
- *
+ *
* XXX If we know that only one map has access to this
* page, then we could avoid the pmap_page_protect()
* call.
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 973b867..02b1b70 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_glue.c,v 1.18 1995/03/28 07:58:53 bde Exp $
+ * $Id: vm_glue.c,v 1.19 1995/04/16 12:56:15 davidg Exp $
*/
#include <sys/param.h>
@@ -111,7 +111,7 @@ useracc(addr, len, rw)
/*
* XXX - check separately to disallow access to user area and user
* page tables - they are in the map.
- *
+ *
* XXX - VM_MAXUSER_ADDRESS is an end address, not a max. It was once
* only used (as an end address) in trap.c. Use it as an end address
* here too. This bogusness has spread. I just fixed where it was
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 47cbb91..07b4c49 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_kern.c,v 1.11 1995/02/21 01:22:45 davidg Exp $
+ * $Id: vm_kern.c,v 1.12 1995/03/15 07:52:06 davidg Exp $
*/
/*
@@ -99,7 +99,7 @@ extern int mb_map_full;
* map must be "kernel_map" below.
*/
-vm_offset_t
+vm_offset_t
kmem_alloc_pageable(map, size)
vm_map_t map;
register vm_size_t size;
@@ -127,7 +127,7 @@ kmem_alloc_pageable(map, size)
* Allocate wired-down memory in the kernel's address map
* or a submap.
*/
-vm_offset_t
+vm_offset_t
kmem_alloc(map, size)
register vm_map_t map;
register vm_size_t size;
@@ -162,7 +162,7 @@ kmem_alloc(map, size)
* Guarantee that there are pages already in this object before
* calling vm_map_pageable. This is to prevent the following
* scenario:
- *
+ *
* 1) Threads have swapped out, so that there is a pager for the
* kernel_object. 2) The kmsg zone is empty, and so we are
* kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
@@ -171,7 +171,7 @@ kmem_alloc(map, size)
* kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
* we get the data back from the pager, it will be (very stale)
* non-zero data. kmem_alloc is defined to return zero-filled memory.
- *
+ *
* We're intentionally not activating the pages we allocate to prevent a
* race with page-out. vm_map_pageable will wire the pages.
*/
@@ -212,7 +212,7 @@ kmem_alloc(map, size)
* with kmem_alloc, and return the physical pages
* associated with that region.
*/
-void
+void
kmem_free(map, addr, size)
vm_map_t map;
register vm_offset_t addr;
@@ -234,7 +234,7 @@ kmem_free(map, addr, size)
* min, max Returned endpoints of map
* pageable Can the region be paged
*/
-vm_map_t
+vm_map_t
kmem_suballoc(parent, min, max, size, pageable)
register vm_map_t parent;
vm_offset_t *min, *max;
@@ -397,7 +397,7 @@ kmem_malloc(map, size, waitflag)
* has no room, the caller sleeps waiting for more memory in the submap.
*
*/
-vm_offset_t
+vm_offset_t
kmem_alloc_wait(map, size)
vm_map_t map;
vm_size_t size;
@@ -434,7 +434,7 @@ kmem_alloc_wait(map, size)
* Returns memory to a submap of the kernel, and wakes up any threads
* waiting for memory in that map.
*/
-void
+void
kmem_free_wakeup(map, addr, size)
vm_map_t map;
vm_offset_t addr;
@@ -452,7 +452,7 @@ kmem_free_wakeup(map, addr, size)
* map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
* the range between `start' and `end' as free.
*/
-void
+void
kmem_init(start, end)
vm_offset_t start, end;
{
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index d7c1b1f..c87fe77 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.20 1995/03/25 17:36:57 davidg Exp $
+ * $Id: vm_map.c,v 1.21 1995/04/16 12:56:17 davidg Exp $
*/
/*
@@ -148,7 +148,7 @@ static int mapvmpgcnt;
static void _vm_map_clip_end __P((vm_map_t, vm_map_entry_t, vm_offset_t));
static void _vm_map_clip_start __P((vm_map_t, vm_map_entry_t, vm_offset_t));
-void
+void
vm_map_startup()
{
register int i;
@@ -245,7 +245,7 @@ vmspace_free(vm)
* the given physical map structure, and having
* the given lower and upper address bounds.
*/
-vm_map_t
+vm_map_t
vm_map_create(pmap, min, max, pageable)
pmap_t pmap;
vm_offset_t min, max;
@@ -429,7 +429,7 @@ vm_map_entry_dispose(map, entry)
* Creates another valid reference to the given map.
*
*/
-void
+void
vm_map_reference(map)
register vm_map_t map;
{
@@ -448,7 +448,7 @@ vm_map_reference(map)
* destroying it if no references remain.
* The map should not be locked.
*/
-void
+void
vm_map_deallocate(map)
register vm_map_t map;
{
@@ -626,7 +626,7 @@ vm_map_insert(map, object, offset, start, end)
* result indicates whether the address is
* actually contained in the map.
*/
-boolean_t
+boolean_t
vm_map_lookup_entry(map, address, entry)
register vm_map_t map;
register vm_offset_t address;
@@ -649,7 +649,7 @@ vm_map_lookup_entry(map, address, entry)
if (address >= cur->start) {
/*
* Go from hint to end of list.
- *
+ *
* But first, make a quick check to see if we are already looking
* at the entry we want (which is usually the case). Note also
* that we don't need to save the hint here... it is the same
@@ -804,7 +804,7 @@ vm_map_find(map, object, offset, addr, length, find_space)
* removing extra sharing maps
* [XXX maybe later] merging with a neighbor
*/
-void
+void
vm_map_simplify_entry(map, entry)
vm_map_t map;
vm_map_entry_t entry;
@@ -843,16 +843,16 @@ vm_map_simplify_entry(map, entry)
} else {
/*
* Try to merge with our neighbors.
- *
+ *
* Conditions for merge are:
- *
+ *
* 1. entries are adjacent. 2. both entries point to objects
* with null pagers.
- *
+ *
* If a merge is possible, we replace the two entries with a
* single entry, then merge the two objects into a single
* object.
- *
+ *
* Now, all that is left to do is write the code!
*/
}
@@ -875,7 +875,7 @@ vm_map_simplify_entry(map, entry)
* This routine is called only when it is known that
* the entry must be split.
*/
-static void
+static void
_vm_map_clip_start(map, entry, start)
register vm_map_t map;
register vm_map_entry_t entry;
@@ -928,7 +928,7 @@ _vm_map_clip_start(map, entry, start)
* This routine is called only when it is known that
* the entry must be split.
*/
-static void
+static void
_vm_map_clip_end(map, entry, end)
register vm_map_t map;
register vm_map_entry_t entry;
@@ -1279,17 +1279,17 @@ vm_map_pageable(map, start, end, new_pageable)
} else {
/*
* Wiring. We must do this in two passes:
- *
+ *
* 1. Holding the write lock, we create any shadow or zero-fill
* objects that need to be created. Then we clip each map
* entry to the region to be wired and increment its wiring
* count. We create objects before clipping the map entries
* to avoid object proliferation.
- *
+ *
* 2. We downgrade to a read lock, and call vm_fault_wire to
* fault in the pages for any newly wired area (wired_count is
* 1).
- *
+ *
* Downgrading to a read lock for vm_fault_wire avoids a possible
* deadlock with another thread that may have faulted on one
* of the pages to be wired (it would mark the page busy,
@@ -1313,7 +1313,7 @@ vm_map_pageable(map, start, end, new_pageable)
* the write lock on the map: create a shadow
* object for a copy-on-write region, or an
* object for a zero-fill region.
- *
+ *
* We don't have to do this for entries that
* point to sharing maps, because we won't
* hold the lock on the sharing map.
@@ -1366,14 +1366,14 @@ vm_map_pageable(map, start, end, new_pageable)
/*
* HACK HACK HACK HACK
- *
+ *
* If we are wiring in the kernel map or a submap of it, unlock
* the map to avoid deadlocks. We trust that the kernel
* threads are well-behaved, and therefore will not do
* anything destructive to this region of the map while we
* have it unlocked. We cannot trust user threads to do the
* same.
- *
+ *
* HACK HACK HACK HACK
*/
if (vm_map_pmap(map) == kernel_pmap) {
@@ -1391,7 +1391,7 @@ vm_map_pageable(map, start, end, new_pageable)
* what has been done. We decrement the wiring count
* for those pages which have not yet been wired (now)
* and unwire those that have (later).
- *
+ *
* XXX this violates the locking protocol on the map,
* needs to be fixed.
*/
@@ -1525,7 +1525,7 @@ vm_map_clean(map, start, end, syncio, invalidate)
* The map in question should be locked.
* [This is the reason for this routine's existence.]
*/
-void
+void
vm_map_entry_unwire(map, entry)
vm_map_t map;
register vm_map_entry_t entry;
@@ -1539,7 +1539,7 @@ vm_map_entry_unwire(map, entry)
*
* Deallocate the given entry from the target map.
*/
-void
+void
vm_map_entry_delete(map, entry)
register vm_map_t map;
register vm_map_entry_t entry;
@@ -1689,7 +1689,7 @@ vm_map_remove(map, start, end)
* privilege on the entire address region given.
* The entire region must be allocated.
*/
-boolean_t
+boolean_t
vm_map_check_protection(map, start, end, protection)
register vm_map_t map;
register vm_offset_t start;
@@ -1736,7 +1736,7 @@ vm_map_check_protection(map, start, end, protection)
* Copies the contents of the source entry to the destination
* entry. The entries *must* be aligned properly.
*/
-void
+void
vm_map_copy_entry(src_map, dst_map, src_entry, dst_entry)
vm_map_t src_map, dst_map;
register vm_map_entry_t src_entry, dst_entry;
@@ -1958,9 +1958,9 @@ vm_map_copy(dst_map, src_map,
}
/*
* Find the start entries and clip.
- *
+ *
* Note that checking protection asserts that the lookup cannot fail.
- *
+ *
* Also note that we wait to do the second lookup until we have done the
* first clip, as the clip may affect which entry we get!
*/
@@ -2014,7 +2014,7 @@ vm_map_copy(dst_map, src_map,
/*
* Both entries now match in size and relative endpoints.
- *
+ *
* If both entries refer to a VM object, we can deal with them
* now.
*/
@@ -2438,7 +2438,7 @@ RetryLookup:;
/*
* If we want to write the page, we may as well handle that
* now since we've got the sharing map locked.
- *
+ *
* If we don't need to write the page, we just demote the
* permissions allowed.
*/
@@ -2520,7 +2520,7 @@ RetryLookup:;
* (according to the handle returned by that lookup).
*/
-void
+void
vm_map_lookup_done(map, entry)
register vm_map_t map;
vm_map_entry_t entry;
@@ -2551,7 +2551,7 @@ vm_map_lookup_done(map, entry)
* at allocation time because the adjacent entry
* is often wired down.
*/
-void
+void
vm_map_simplify(map, start)
vm_map_t map;
vm_offset_t start;
@@ -2603,7 +2603,7 @@ vm_map_simplify(map, start)
/*
* vm_map_print: [ debug ]
*/
-void
+void
vm_map_print(map, full)
register vm_map_t map;
boolean_t full;
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index 381603e..1f9c041 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -38,7 +38,7 @@
* from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$
*
* @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
- * $Id: vm_mmap.c,v 1.22 1995/04/16 12:56:18 davidg Exp $
+ * $Id: vm_mmap.c,v 1.23 1995/05/18 02:59:24 davidg Exp $
*/
/*
@@ -181,7 +181,7 @@ mmap(p, uap, retval)
/*
* XXX if no hint provided for a non-fixed mapping place it after the
* end of the largest possible heap.
- *
+ *
* There should really be a pmap call to determine a reasonable location.
*/
if (addr == 0 && (flags & MAP_FIXED) == 0)
@@ -760,7 +760,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
vm_object_deallocate(user_object);
goto out;
}
-
+
/*
* this is a consistancy check, gets the map entry, and should
* never fail
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 1d5ee25..7309034 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_object.c,v 1.45 1995/05/02 05:57:10 davidg Exp $
+ * $Id: vm_object.c,v 1.46 1995/05/21 21:39:30 davidg Exp $
*/
/*
@@ -171,7 +171,7 @@ vm_object_init(vm_offset_t nothing)
vm_object_count = 0;
simple_lock_init(&vm_cache_lock);
simple_lock_init(&vm_object_list_lock);
-
+
vm_object_cache_max = 84;
if (cnt.v_page_count > 1000)
vm_object_cache_max += (cnt.v_page_count - 1000) / 4;
@@ -592,7 +592,7 @@ startover:
TAILQ_INSERT_TAIL(&object->memq, p, listq);
pgcount--;
}
- if ((!allclean && (pass == 0)) ||
+ if ((!allclean && (pass == 0)) ||
(entireobj && (object->flags & OBJ_WRITEABLE))) {
pass = 1;
if (entireobj)
@@ -604,7 +604,7 @@ startover:
void
-vm_object_page_clean(object, start, end, syncio)
+vm_object_page_clean(object, start, end, syncio)
register vm_object_t object;
register vm_offset_t start;
register vm_offset_t end;
@@ -700,7 +700,7 @@ vm_object_cache_trim()
*
* The object must *not* be locked.
*/
-void
+void
vm_object_pmap_copy(object, start, end)
register vm_object_t object;
register vm_offset_t start;
@@ -772,7 +772,7 @@ again:
* May defer the copy until later if the object is not backed
* up by a non-default pager.
*/
-void
+void
vm_object_copy(src_object, src_offset, size,
dst_object, dst_offset, src_needs_copy)
register vm_object_t src_object;
@@ -840,7 +840,7 @@ vm_object_copy(src_object, src_offset, size,
/*
* If the object has a pager, the pager wants to see all of the
* changes. We need a copy-object for the changed pages.
- *
+ *
* If there is a copy-object, and it is empty, no changes have been made
* to the object since the copy-object was made. We can use the same
* copy- object.
@@ -882,7 +882,7 @@ Retry1:
* If the object has a pager, the pager wants to see all of the
* changes. We must make a copy-object and put the changed pages
* there.
- *
+ *
* The copy-object is always made large enough to completely shadow the
* original object, since it may have several users who want to shadow
* the original object at different points.
@@ -1013,7 +1013,7 @@ vm_object_shadow(object, offset, length)
* specified pager and paging id.
*/
-vm_object_t
+vm_object_t
vm_object_lookup(pager)
vm_pager_t pager;
{
@@ -1048,7 +1048,7 @@ vm_object_lookup(pager)
* the hash table.
*/
-void
+void
vm_object_enter(object, pager)
vm_object_t object;
vm_pager_t pager;
@@ -1206,7 +1206,7 @@ vm_object_collapse(object)
while (TRUE) {
/*
* Verify that the conditions are right for collapse:
- *
+ *
* The object exists and no pages in it are currently being paged
* out.
*/
@@ -1274,7 +1274,7 @@ vm_object_collapse(object)
backing_object->flags |= OBJ_DEAD;
/*
* We can collapse the backing object.
- *
+ *
* Move all in-memory pages from backing_object to the
* parent. Pages that have been paged out will be
* overwritten by any of the parent's pages that
@@ -1289,7 +1289,7 @@ vm_object_collapse(object)
* If the parent has a page here, or if this
* page falls outside the parent, dispose of
* it.
- *
+ *
* Otherwise, move it as planned.
*/
@@ -1373,7 +1373,7 @@ vm_object_collapse(object)
object->shadow_offset += backing_object->shadow_offset;
/*
* Discard backing_object.
- *
+ *
* Since the backing object has no pages, no pager left,
* and no object references within it, all that is
* necessary is to dispose of it.
@@ -1396,7 +1396,7 @@ vm_object_collapse(object)
* shadowed by the parent object, the parent object no
* longer has to shadow the backing object; it can
* shadow the next one in the chain.
- *
+ *
* The backing object must not be paged out - we'd have
* to check all of the paged-out pages, as well.
*/
@@ -1416,7 +1416,7 @@ vm_object_collapse(object)
/*
* If the parent has a page here, or if this
* page falls outside the parent, keep going.
- *
+ *
* Otherwise, the backing_object must be left in
* the chain.
*/
@@ -1586,7 +1586,7 @@ again:
* Conditions:
* The object must *not* be locked.
*/
-boolean_t
+boolean_t
vm_object_coalesce(prev_object, next_object,
prev_offset, next_offset,
prev_size, next_size)
@@ -1765,7 +1765,7 @@ vm_object_check() {
if( lsize > maxhash)
maxhash = lsize;
}
-
+
printf("maximum object hash queue size: %d\n", maxhash);
/*
@@ -1791,7 +1791,7 @@ vm_object_check() {
/*
* vm_object_print: [ debug ]
*/
-void
+void
vm_object_print(object, full)
vm_object_t object;
boolean_t full;
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 96537d0..30983e1 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
- * $Id: vm_page.c,v 1.30 1995/04/16 11:50:45 davidg Exp $
+ * $Id: vm_page.c,v 1.31 1995/04/16 12:56:21 davidg Exp $
*/
/*
@@ -126,7 +126,7 @@ static u_short vm_page_dev_bsize_chunks[] = {
*
* Sets page_shift and page_mask from cnt.v_page_size.
*/
-void
+void
vm_set_page_size()
{
@@ -216,11 +216,11 @@ vm_page_startup(starta, enda, vaddr)
/*
* Allocate (and initialize) the hash table buckets.
- *
+ *
* The number of buckets MUST BE a power of 2, and the actual value is
* the next power of 2 greater than the number of physical pages in
* the system.
- *
+ *
* Note: This computation can be tweaked if desired.
*/
vm_page_buckets = (struct pglist *) vaddr;
@@ -262,7 +262,7 @@ vm_page_startup(starta, enda, vaddr)
* kmem_map which must be initialized before malloc() will work
* (obviously). Also could include pager maps which would be
* allocated before kmeminit.
- *
+ *
* Allow some kernel map entries... this should be plenty since people
* shouldn't be cluttering up the kernel map (they should use their
* own maps).
@@ -375,7 +375,7 @@ vm_page_hash(object, offset)
* The object and page must be locked, and must be splhigh.
*/
-inline void
+inline void
vm_page_insert(mem, object, offset)
register vm_page_t mem;
register vm_object_t object;
@@ -428,7 +428,7 @@ vm_page_insert(mem, object, offset)
* The object and page must be locked, and at splhigh.
*/
-inline void
+inline void
vm_page_remove(mem)
register vm_page_t mem;
{
@@ -472,7 +472,7 @@ vm_page_remove(mem)
* The object must be locked. No side effects.
*/
-vm_page_t
+vm_page_t
vm_page_lookup(object, offset)
register vm_object_t object;
register vm_offset_t offset;
@@ -511,7 +511,7 @@ vm_page_lookup(object, offset)
*
* The object must be locked.
*/
-void
+void
vm_page_rename(mem, new_object, new_offset)
register vm_page_t mem;
register vm_object_t new_object;
@@ -586,7 +586,7 @@ vm_page_alloc(object, offset, page_req)
if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
page_req = VM_ALLOC_SYSTEM;
};
-
+
simple_lock(&vm_page_queue_free_lock);
s = splhigh();
@@ -763,7 +763,7 @@ again:
*
* Object and page must be locked prior to entry.
*/
-void
+void
vm_page_free(mem)
register vm_page_t mem;
{
@@ -836,7 +836,7 @@ vm_page_free(mem)
*
* The page queues must be locked.
*/
-void
+void
vm_page_wire(mem)
register vm_page_t mem;
{
@@ -861,7 +861,7 @@ vm_page_wire(mem)
*
* The page queues must be locked.
*/
-void
+void
vm_page_unwire(mem)
register vm_page_t mem;
{
@@ -889,7 +889,7 @@ vm_page_unwire(mem)
*
* The page queues must be locked.
*/
-void
+void
vm_page_activate(m)
register vm_page_t m;
{
@@ -937,7 +937,7 @@ vm_page_deactivate(m)
/*
* Only move active pages -- ignore locked or already inactive ones.
- *
+ *
* XXX: sometimes we get pages which aren't wired down or on any queue -
* we need to put them on the inactive queue also, otherwise we lose
* track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
@@ -963,7 +963,7 @@ vm_page_deactivate(m)
*
* Put the specified page onto the page cache queue (if appropriate).
*/
-void
+void
vm_page_cache(m)
register vm_page_t m;
{
@@ -1033,7 +1033,7 @@ vm_page_copy(src_m, dest_m)
* mapping function for valid bits or for dirty bits in
* a page
*/
-inline int
+inline int
vm_page_bits(int base, int size)
{
u_short chunk;
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 79d28c9..c4c351f 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.c,v 1.49 1995/05/10 18:56:06 davidg Exp $
+ * $Id: vm_pageout.c,v 1.50 1995/05/21 21:39:31 davidg Exp $
*/
/*
@@ -121,10 +121,10 @@ vm_pageout_clean(m, sync)
{
/*
* Clean the page and remove it from the laundry.
- *
+ *
* We set the busy bit to cause potential page faults on this page to
* block.
- *
+ *
* And we set pageout-in-progress to keep the object from disappearing
* during pageout. This guarantees that the page won't move from the
* inactive queue. (However, any other page on the inactive queue may
@@ -234,7 +234,7 @@ vm_pageout_clean(m, sync)
pageout_count += b_pageout_count;
}
}
-
+
/*
* we allow reads during pageouts...
*/
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index 99a1559..d1da63c 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_pageout.h,v 1.10 1995/03/16 18:17:30 bde Exp $
+ * $Id: vm_pageout.h,v 1.11 1995/04/09 06:03:55 davidg Exp $
*/
#ifndef _VM_VM_PAGEOUT_H_
@@ -103,7 +103,7 @@ pagedaemon_wakeup()
#define VM_WAIT vm_wait()
-static inline void
+static inline void
vm_wait()
{
int s;
diff --git a/sys/vm/vm_swap.c b/sys/vm/vm_swap.c
index 010ba9d..9b90074 100644
--- a/sys/vm/vm_swap.c
+++ b/sys/vm/vm_swap.c
@@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
- * $Id: vm_swap.c,v 1.19 1995/05/19 03:27:08 davidg Exp $
+ * $Id: vm_swap.c,v 1.20 1995/05/25 03:38:11 davidg Exp $
*/
#include <sys/param.h>
@@ -225,7 +225,7 @@ swaponvp(p, vp, dev, nblks)
error = VOP_OPEN(vp, FREAD | FWRITE, p->p_ucred, p);
if (error)
return (error);
-
+
if (nblks == 0 && (bdevsw[major(dev)].d_psize == 0 ||
(nblks = (*bdevsw[major(dev)].d_psize) (dev)) == -1)) {
(void) VOP_CLOSE(vp, FREAD | FWRITE, p->p_ucred, p);
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 759abde..6b93ec8 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -37,7 +37,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
- * $Id: vnode_pager.c,v 1.38 1995/05/10 18:56:09 davidg Exp $
+ * $Id: vnode_pager.c,v 1.39 1995/05/18 02:59:26 davidg Exp $
*/
/*
@@ -314,11 +314,11 @@ vnode_pager_haspage(pager, offset)
/*
* Read the index to find the disk block to read from. If there is no
* block, report that we don't have this data.
- *
+ *
* Assumes that the vnode has whole page or nothing.
*/
err = VOP_BMAP(vp, block, (struct vnode **) 0, &bn, 0);
- if (err)
+ if (err)
return (TRUE);
return ((long) bn < 0 ? FALSE : TRUE);
}
OpenPOWER on IntegriCloud