summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authortanimura <tanimura@FreeBSD.org>2000-12-13 10:01:00 +0000
committertanimura <tanimura@FreeBSD.org>2000-12-13 10:01:00 +0000
commita8dbeb3f7b850f093f2892563882b3afcecd5e8e (patch)
tree5cfdc93fb25655fb4c56646dcc64ce1e405c28e8 /sys
parent90d90d0c248fbcde27a7d443098c2bf514aaa199 (diff)
downloadFreeBSD-src-a8dbeb3f7b850f093f2892563882b3afcecd5e8e.zip
FreeBSD-src-a8dbeb3f7b850f093f2892563882b3afcecd5e8e.tar.gz
- If swap metadata does not fit into the KVM, reduce the number of
struct swblock entries by dividing the number of the entries by 2 until the swap metadata fits. - Reject swapon(2) upon failure of swap_zone allocation. This is just a temporary fix. Better solutions include: (suggested by: dillon) o reserving swap in SWAP_META_PAGES chunks, and o swapping the swblock structures themselves. Reviewed by: alfred, dillon
Diffstat (limited to 'sys')
-rw-r--r--sys/amd64/amd64/machdep.c19
-rw-r--r--sys/i386/i386/machdep.c19
-rw-r--r--sys/vm/default_pager.c1
-rw-r--r--sys/vm/swap_pager.c35
-rw-r--r--sys/vm/swap_pager.h1
-rw-r--r--sys/vm/vm_kern.c1
-rw-r--r--sys/vm/vm_map.c2
-rw-r--r--sys/vm/vm_object.c2
-rw-r--r--sys/vm/vm_pageout.c1
-rw-r--r--sys/vm/vm_swap.c8
-rw-r--r--sys/vm/vm_zone.c14
11 files changed, 76 insertions, 27 deletions
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index a6f92b1..7b537ff 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -260,6 +260,7 @@ cpu_startup(dummy)
vm_size_t size = 0;
int firstaddr;
vm_offset_t minaddr;
+ int physmem_est;
if (boothowto & RB_VERBOSE)
bootverbose++;
@@ -329,6 +330,16 @@ again:
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
+ * Discount the physical memory larger than the size of kernel_map
+ * to avoid eating up all of KVA space.
+ */
+ if (kernel_map->first_free == NULL) {
+ printf("Warning: no free entries in kernel_map.\n");
+ physmem_est = physmem;
+ } else
+ physmem_est = min(physmem, kernel_map->max_offset - kernel_map->min_offset);
+
+ /*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
* cover 1/4 of our ram. Beyond the first 64MB allocate additional
@@ -340,10 +351,10 @@ again:
int factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
- if (physmem > 1024)
- nbuf += min((physmem - 1024) / factor, 16384 / factor);
- if (physmem > 16384)
- nbuf += (physmem - 16384) * 2 / (factor * 5);
+ if (physmem_est > 1024)
+ nbuf += min((physmem_est - 1024) / factor, 16384 / factor);
+ if (physmem_est > 16384)
+ nbuf += (physmem_est - 16384) * 2 / (factor * 5);
}
/*
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index a6f92b1..7b537ff 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -260,6 +260,7 @@ cpu_startup(dummy)
vm_size_t size = 0;
int firstaddr;
vm_offset_t minaddr;
+ int physmem_est;
if (boothowto & RB_VERBOSE)
bootverbose++;
@@ -329,6 +330,16 @@ again:
valloc(callwheel, struct callout_tailq, callwheelsize);
/*
+ * Discount the physical memory larger than the size of kernel_map
+ * to avoid eating up all of KVA space.
+ */
+ if (kernel_map->first_free == NULL) {
+ printf("Warning: no free entries in kernel_map.\n");
+ physmem_est = physmem;
+ } else
+ physmem_est = min(physmem, kernel_map->max_offset - kernel_map->min_offset);
+
+ /*
* The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
* For the first 64MB of ram nominally allocate sufficient buffers to
* cover 1/4 of our ram. Beyond the first 64MB allocate additional
@@ -340,10 +351,10 @@ again:
int factor = 4 * BKVASIZE / PAGE_SIZE;
nbuf = 50;
- if (physmem > 1024)
- nbuf += min((physmem - 1024) / factor, 16384 / factor);
- if (physmem > 16384)
- nbuf += (physmem - 16384) * 2 / (factor * 5);
+ if (physmem_est > 1024)
+ nbuf += min((physmem_est - 1024) / factor, 16384 / factor);
+ if (physmem_est > 16384)
+ nbuf += (physmem_est - 16384) * 2 / (factor * 5);
}
/*
diff --git a/sys/vm/default_pager.c b/sys/vm/default_pager.c
index 7ea507f..f5d88a5 100644
--- a/sys/vm/default_pager.c
+++ b/sys/vm/default_pager.c
@@ -46,6 +46,7 @@
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
+#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
static vm_object_t default_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t,
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 0b88efd..9a0ca56 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -90,13 +90,16 @@
#include "opt_swap.h"
#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
#include <vm/vm_extern.h>
-#include <vm/vm_zone.h>
#define SWM_FREE 0x02 /* free, period */
#define SWM_POP 0x04 /* pop out */
@@ -273,7 +276,7 @@ swap_pager_init()
void
swap_pager_swap_init()
{
- int n;
+ int n, n2;
/*
* Number of in-transit swap bp operations. Don't
@@ -311,15 +314,23 @@ swap_pager_swap_init()
* can hold 16 pages, so this is probably overkill.
*/
- n = cnt.v_page_count * 2;
-
- swap_zone = zinit(
- "SWAPMETA",
- sizeof(struct swblock),
- n,
- ZONE_INTERRUPT,
- 1
- );
+ n = min(cnt.v_page_count, (kernel_map->max_offset - kernel_map->min_offset) / PAGE_SIZE) * 2;
+ n2 = n;
+
+ while (n > 0
+ && (swap_zone = zinit(
+ "SWAPMETA",
+ sizeof(struct swblock),
+ n,
+ ZONE_INTERRUPT,
+ 1
+ )) == NULL)
+ n >>= 1;
+ if (swap_zone == NULL)
+ printf("WARNING: failed to init swap_zone!\n");
+ if (n2 != n)
+ printf("Swap zone entries reduced to %d.\n", n);
+ n2 = n;
/*
* Initialize our meta-data hash table. The swapper does not need to
@@ -330,7 +341,7 @@ swap_pager_swap_init()
* swhash_mask: hash table index mask
*/
- for (n = 1; n < cnt.v_page_count / 4; n <<= 1)
+ for (n = 1; n < n2 ; n <<= 1)
;
swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
diff --git a/sys/vm/swap_pager.h b/sys/vm/swap_pager.h
index ea57132..b0fbe3a 100644
--- a/sys/vm/swap_pager.h
+++ b/sys/vm/swap_pager.h
@@ -84,6 +84,7 @@ struct swblock {
extern struct pagerlst swap_pager_un_object_list;
extern int swap_pager_full;
extern struct blist *swapblist;
+extern vm_zone_t swap_zone;
void swap_pager_putpages __P((vm_object_t, vm_page_t *, int, boolean_t, int *));
boolean_t swap_pager_haspage __P((vm_object_t object, vm_pindex_t pindex, int *before, int *after));
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index ee9e7e4..14b9fbe 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -489,4 +489,3 @@ kmem_init(start, end)
/* ... and ending with the completion of the above `insert' */
vm_map_unlock(m);
}
-
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 8aa25e2..8cc61b1 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -86,8 +86,8 @@
#include <vm/vm_pager.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
-#include <vm/swap_pager.h>
#include <vm/vm_zone.h>
+#include <vm/swap_pager.h>
/*
* Virtual memory maps provide for the mapping, protection,
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index ddb9ab7..3eb5243 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -84,10 +84,10 @@
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
+#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
-#include <vm/vm_zone.h>
static void vm_object_qcollapse __P((vm_object_t object));
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 6a52166..dbea3d6 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -93,6 +93,7 @@
#include <vm/vm_map.h>
#include <vm/vm_pageout.h>
#include <vm/vm_pager.h>
+#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
#include <vm/vm_extern.h>
diff --git a/sys/vm/vm_swap.c b/sys/vm/vm_swap.c
index 8691c22..fada384 100644
--- a/sys/vm/vm_swap.c
+++ b/sys/vm/vm_swap.c
@@ -53,6 +53,7 @@
#include <sys/stat.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
+#include <vm/vm_zone.h>
#include <vm/swap_pager.h>
/*
@@ -194,6 +195,13 @@ swapon(p, uap)
if (error)
return (error);
+ /*
+ * Swap metadata may not fit in the KVM if we have physical
+ * memory of >1GB.
+ */
+ if (swap_zone == NULL)
+ return (ENOMEM);
+
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, p);
error = namei(&nd);
if (error)
diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c
index c592c91..b5ade30 100644
--- a/sys/vm/vm_zone.c
+++ b/sys/vm/vm_zone.c
@@ -80,8 +80,11 @@ int
zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
int nentries, int flags, int zalloc)
{
- int totsize;
+ int totsize, oldzflags;
+ vm_zone_t oldzlist;
+ oldzflags = z->zflags;
+ oldzlist = zlist;
if ((z->zflags & ZONE_BOOT) == 0) {
z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
simple_lock_init(&z->zlock);
@@ -112,8 +115,12 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
zone_kmem_kvaspace += totsize;
z->zkva = kmem_alloc_pageable(kernel_map, totsize);
- if (z->zkva == 0)
+ if (z->zkva == 0) {
+ /* Clean up the zlist in case we messed it. */
+ if ((oldzflags & ZONE_BOOT) == 0)
+ zlist = oldzlist;
return 0;
+ }
z->zpagemax = totsize / PAGE_SIZE;
if (obj == NULL) {
@@ -156,11 +163,10 @@ zinit(char *name, int size, int nentries, int flags, int zalloc)
{
vm_zone_t z;
- z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
+ z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT | M_ZERO);
if (z == NULL)
return NULL;
- z->zflags = 0;
if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
free(z, M_ZONE);
return NULL;
OpenPOWER on IntegriCloud