summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/machdep.c15
-rw-r--r--sys/alpha/alpha/pmap.c2
-rw-r--r--sys/amd64/amd64/machdep.c17
-rw-r--r--sys/amd64/amd64/pmap.c2
-rw-r--r--sys/i386/i386/machdep.c17
-rw-r--r--sys/i386/i386/pmap.c2
-rw-r--r--sys/kern/kern_timeout.c49
-rw-r--r--sys/kern/vfs_bio.c68
-rw-r--r--sys/sys/buf.h2
-rw-r--r--sys/sys/systm.h2
-rw-r--r--sys/vm/vm.h17
-rw-r--r--sys/vm/vm_init.c89
12 files changed, 239 insertions, 43 deletions
diff --git a/sys/alpha/alpha/machdep.c b/sys/alpha/alpha/machdep.c
index 34dcfe0..54c7b65 100644
--- a/sys/alpha/alpha/machdep.c
+++ b/sys/alpha/alpha/machdep.c
@@ -228,9 +228,7 @@ void osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code);
static void identifycpu __P((void));
-static vm_offset_t buffer_sva, buffer_eva;
-vm_offset_t clean_sva, clean_eva;
-static vm_offset_t pager_sva, pager_eva;
+struct kva_md_info kmi;
/*
* Hooked into the shutdown chain; if the system is to be halted,
@@ -248,13 +246,6 @@ static void
cpu_startup(dummy)
void *dummy;
{
- register unsigned i;
- register caddr_t v;
- vm_offset_t maxaddr;
- vm_size_t size = 0;
- vm_offset_t firstaddr;
- vm_offset_t minaddr;
-
/*
* Good {morning,afternoon,evening,night}.
*/
@@ -281,6 +272,9 @@ cpu_startup(dummy)
}
}
+ vm_ksubmap_init(&kmi);
+
+#if 0
/*
* Calculate callout wheel size
*/
@@ -387,6 +381,7 @@ again:
}
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
+#endif
#if defined(USERCONFIG)
#if defined(USERCONFIG_BOOT)
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index 7f2a6e8..8e4428a 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -774,7 +774,7 @@ pmap_get_asn(pmap_t pmap)
static PMAP_INLINE int
pmap_track_modified(vm_offset_t va)
{
- if ((va < clean_sva) || (va >= clean_eva))
+ if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
return 1;
else
return 0;
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index f5c2694..95216bc 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -198,9 +198,8 @@ vm_offset_t phys_avail[10];
/* must be 2 less so 0 0 can signal end of chunks */
#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
-static vm_offset_t buffer_sva, buffer_eva;
-vm_offset_t clean_sva, clean_eva;
-static vm_offset_t pager_sva, pager_eva;
+struct kva_md_info kmi;
+
static struct trapframe proc0_tf;
#ifndef SMP
static struct globaldata __globaldata;
@@ -213,14 +212,6 @@ static void
cpu_startup(dummy)
void *dummy;
{
- register unsigned i;
- register caddr_t v;
- vm_offset_t maxaddr;
- vm_size_t size = 0;
- int firstaddr;
- vm_offset_t minaddr;
- int physmem_est; /* in pages */
-
/*
* Good {morning,afternoon,evening,night}.
*/
@@ -250,6 +241,9 @@ cpu_startup(dummy)
}
}
+ vm_ksubmap_init(&kmi);
+
+#if 0
/*
* Calculate callout wheel size
*/
@@ -387,6 +381,7 @@ again:
}
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
+#endif
#if defined(USERCONFIG)
userconfig();
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 7a32c98b..f6d0984 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -547,7 +547,7 @@ pmap_nw_modified(pt_entry_t ptea)
static PMAP_INLINE int
pmap_track_modified(vm_offset_t va)
{
- if ((va < clean_sva) || (va >= clean_eva))
+ if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
return 1;
else
return 0;
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index f5c2694..95216bc 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -198,9 +198,8 @@ vm_offset_t phys_avail[10];
/* must be 2 less so 0 0 can signal end of chunks */
#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
-static vm_offset_t buffer_sva, buffer_eva;
-vm_offset_t clean_sva, clean_eva;
-static vm_offset_t pager_sva, pager_eva;
+struct kva_md_info kmi;
+
static struct trapframe proc0_tf;
#ifndef SMP
static struct globaldata __globaldata;
@@ -213,14 +212,6 @@ static void
cpu_startup(dummy)
void *dummy;
{
- register unsigned i;
- register caddr_t v;
- vm_offset_t maxaddr;
- vm_size_t size = 0;
- int firstaddr;
- vm_offset_t minaddr;
- int physmem_est; /* in pages */
-
/*
* Good {morning,afternoon,evening,night}.
*/
@@ -250,6 +241,9 @@ cpu_startup(dummy)
}
}
+ vm_ksubmap_init(&kmi);
+
+#if 0
/*
* Calculate callout wheel size
*/
@@ -387,6 +381,7 @@ again:
}
mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
+#endif
#if defined(USERCONFIG)
userconfig();
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 7a32c98b..f6d0984 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -547,7 +547,7 @@ pmap_nw_modified(pt_entry_t ptea)
static PMAP_INLINE int
pmap_track_modified(vm_offset_t va)
{
- if ((va < clean_sva) || (va >= clean_eva))
+ if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
return 1;
else
return 0;
diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c
index 4f88bab..da29c55 100644
--- a/sys/kern/kern_timeout.c
+++ b/sys/kern/kern_timeout.c
@@ -62,6 +62,55 @@ struct mtx callout_lock;
static struct callout *nextsoftcheck; /* Next callout to be checked. */
/*
+ * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
+ *
+ * This code is called very early in the kernel initialization sequence,
+ * and may be called more then once.
+ */
+caddr_t
+kern_timeout_callwheel_alloc(caddr_t v)
+{
+ /*
+ * Calculate callout wheel size
+ */
+ for (callwheelsize = 1, callwheelbits = 0;
+ callwheelsize < ncallout;
+ callwheelsize <<= 1, ++callwheelbits)
+ ;
+ callwheelmask = callwheelsize - 1;
+
+ callout = (struct callout *)v;
+ v = (caddr_t)(callout + ncallout);
+ callwheel = (struct callout_tailq *)v;
+ v = (caddr_t)(callwheel + callwheelsize);
+ return(v);
+}
+
+/*
+ * kern_timeout_callwheel_init() - initialize previously reserved callwheel
+ * space.
+ *
+ * This code is called just once, after the space reserved for the
+ * callout wheel has been finalized.
+ */
+void
+kern_timeout_callwheel_init(void)
+{
+ int i;
+
+ SLIST_INIT(&callfree);
+ for (i = 0; i < ncallout; i++) {
+ callout_init(&callout[i], 0);
+ callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
+ SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
+ }
+ for (i = 0; i < callwheelsize; i++) {
+ TAILQ_INIT(&callwheel[i]);
+ }
+ mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE);
+}
+
+/*
* The callout mechanism is based on the work of Adam M. Costello and
* George Varghese, published in a technical report entitled "Redesigning
* the BSD Callout and Timer Facilities" and modified slightly for inclusion
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 94baa5a..6333f3e 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -319,19 +319,73 @@ bd_speedup(void)
}
/*
- * Initialize buffer headers and related structures.
+ * Calculating buffer cache scaling values and reserve space for buffer
+ * headers. This is called during low level kernel initialization and
+ * may be called more then once. We CANNOT write to the memory area
+ * being reserved at this time.
*/
-
caddr_t
-bufhashinit(caddr_t vaddr)
+kern_vfs_bio_buffer_alloc(caddr_t v, int physmem_est)
{
- /* first, make a null hash table */
+ /*
+ * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
+ * For the first 64MB of ram nominally allocate sufficient buffers to
+ * cover 1/4 of our ram. Beyond the first 64MB allocate additional
+ * buffers to cover 1/20 of our ram over 64MB. When auto-sizing
+ * the buffer cache we limit the eventual kva reservation to
+ * maxbcache bytes.
+ *
+ * factor represents the 1/4 x ram conversion.
+ */
+ if (nbuf == 0) {
+ int factor = 4 * BKVASIZE / PAGE_SIZE;
+
+ nbuf = 50;
+ if (physmem_est > 1024)
+ nbuf += min((physmem_est - 1024) / factor,
+ 16384 / factor);
+ if (physmem_est > 16384)
+ nbuf += (physmem_est - 16384) * 2 / (factor * 5);
+
+ if (maxbcache && nbuf > maxbcache / BKVASIZE)
+ nbuf = maxbcache / BKVASIZE;
+ }
+
+ /*
+ * Do not allow the buffer_map to be more then 1/2 the size of the
+ * kernel_map.
+ */
+ if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) /
+ (BKVASIZE * 2)) {
+ nbuf = (kernel_map->max_offset - kernel_map->min_offset) /
+ (BKVASIZE * 2);
+ printf("Warning: nbufs capped at %d\n", nbuf);
+ }
+
+ /*
+ * swbufs are used as temporary holders for I/O, such as paging I/O.
+ * We have no less then 16 and no more then 256.
+ */
+ nswbuf = max(min(nbuf/4, 256), 16);
+
+ /*
+ * Reserve space for the buffer cache buffers
+ */
+ swbuf = (void *)v;
+ v = (caddr_t)(swbuf + nswbuf);
+ buf = (void *)v;
+ v = (caddr_t)(buf + nbuf);
+
+ /*
+ * Calculate the hash table size and reserve space
+ */
for (bufhashmask = 8; bufhashmask < nbuf / 4; bufhashmask <<= 1)
;
- bufhashtbl = (void *)vaddr;
- vaddr = vaddr + sizeof(*bufhashtbl) * bufhashmask;
+ bufhashtbl = (void *)v;
+ v = (caddr_t)(bufhashtbl + bufhashmask);
--bufhashmask;
- return(vaddr);
+
+ return(v);
}
void
diff --git a/sys/sys/buf.h b/sys/sys/buf.h
index 30fcd13..21646ad 100644
--- a/sys/sys/buf.h
+++ b/sys/sys/buf.h
@@ -513,7 +513,7 @@ extern TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES];
struct uio;
-caddr_t bufhashinit __P((caddr_t));
+caddr_t kern_vfs_bio_buffer_alloc __P((caddr_t v, int physmem_est));
void bufinit __P((void));
void bwillwrite __P((void));
int buf_dirty_count_severe __P((void));
diff --git a/sys/sys/systm.h b/sys/sys/systm.h
index c00441d..469a490 100644
--- a/sys/sys/systm.h
+++ b/sys/sys/systm.h
@@ -208,6 +208,8 @@ typedef void timeout_t __P((void *)); /* timeout function type */
void callout_handle_init __P((struct callout_handle *));
struct callout_handle timeout __P((timeout_t *, void *, int));
void untimeout __P((timeout_t *, void *, struct callout_handle));
+caddr_t kern_timeout_callwheel_alloc __P((caddr_t v));
+void kern_timeout_callwheel_init __P((void));
/* Stubs for obsolete functions that used to be for interrupt management */
static __inline void spl0(void) { return; }
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 38f04ac..710d854 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -113,4 +113,21 @@ struct vm_page;
typedef struct vm_page *vm_page_t;
#endif
+/*
+ * Information passed from the machine-independant VM initialization code
+ * for use by machine-dependant code (mainly for MMU support)
+ */
+struct kva_md_info {
+ vm_offset_t buffer_sva;
+ vm_offset_t buffer_eva;
+ vm_offset_t clean_sva;
+ vm_offset_t clean_eva;
+ vm_offset_t pager_sva;
+ vm_offset_t pager_eva;
+};
+
+extern struct kva_md_info kmi;
+extern void vm_ksubmap_init(struct kva_md_info *kmi);
+
#endif /* VM_H */
+
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index cec1997..1fbcb07 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -74,8 +74,12 @@
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/systm.h>
+#include <sys/bio.h>
+#include <sys/buf.h>
#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
@@ -119,3 +123,88 @@ vm_mem_init(dummy)
pmap_init(avail_start, avail_end);
vm_pager_init();
}
+
+void
+vm_ksubmap_init(struct kva_md_info *kmi)
+{
+ vm_offset_t firstaddr;
+ caddr_t v;
+ vm_size_t size = 0;
+ int physmem_est;
+ vm_offset_t minaddr;
+ vm_offset_t maxaddr;
+
+ /*
+ * Allocate space for system data structures.
+ * The first available kernel virtual address is in "v".
+ * As pages of kernel virtual memory are allocated, "v" is incremented.
+ * As pages of memory are allocated and cleared,
+ * "firstaddr" is incremented.
+ * An index into the kernel page table corresponding to the
+ * virtual memory address maintained in "v" is kept in "mapaddr".
+ */
+
+ /*
+ * Make two passes. The first pass calculates how much memory is
+ * needed and allocates it. The second pass assigns virtual
+ * addresses to the various data structures.
+ */
+ firstaddr = 0;
+again:
+ v = (caddr_t)firstaddr;
+
+ v = kern_timeout_callwheel_alloc(v);
+
+ /*
+ * Discount the physical memory larger than the size of kernel_map
+ * to avoid eating up all of KVA space.
+ */
+ if (kernel_map->first_free == NULL) {
+ printf("Warning: no free entries in kernel_map.\n");
+ physmem_est = physmem;
+ } else {
+ physmem_est = min(physmem, btoc(kernel_map->max_offset -
+ kernel_map->min_offset));
+ }
+
+ v = kern_vfs_bio_buffer_alloc(v, physmem_est);
+
+ /*
+ * End of first pass, size has been calculated so allocate memory
+ */
+ if (firstaddr == 0) {
+ size = (vm_size_t)((char *)v - firstaddr);
+ firstaddr = kmem_alloc(kernel_map, round_page(size));
+ if (firstaddr == 0)
+ panic("startup: no room for tables");
+ goto again;
+ }
+
+ /*
+ * End of second pass, addresses have been assigned
+ */
+ if ((vm_size_t)((char *)v - firstaddr) != size)
+ panic("startup: table size inconsistency");
+
+ clean_map = kmem_suballoc(kernel_map, &kmi->clean_sva, &kmi->clean_eva,
+ (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size);
+ buffer_map = kmem_suballoc(clean_map, &kmi->buffer_sva,
+ &kmi->buffer_eva, (nbuf*BKVASIZE));
+ buffer_map->system_map = 1;
+ pager_map = kmem_suballoc(clean_map, &kmi->pager_sva, &kmi->pager_eva,
+ (nswbuf*MAXPHYS) + pager_map_size);
+ pager_map->system_map = 1;
+ exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
+ (16*(ARG_MAX+(PAGE_SIZE*3))));
+
+ /*
+ * XXX: Mbuf system machine-specific initializations should
+ * go here, if anywhere.
+ */
+
+ /*
+ * Initialize the callouts we just allocated.
+ */
+ kern_timeout_callwheel_init();
+}
+
OpenPOWER on IntegriCloud