summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2014-11-15 23:40:44 +0000
committeralc <alc@FreeBSD.org>2014-11-15 23:40:44 +0000
commitaeebd38e4bfe2d40d4c39fab6702b4140abbada4 (patch)
tree0f1c627f32faa09851b8a412dd9586dabb0109ad
parent07e17a18ac8e355a9f6b268acbe7b870e31f7fad (diff)
downloadFreeBSD-src-aeebd38e4bfe2d40d4c39fab6702b4140abbada4.zip
FreeBSD-src-aeebd38e4bfe2d40d4c39fab6702b4140abbada4.tar.gz
Enable the use of VM_PHYSSEG_SPARSE on amd64 and i386, making it the default
on i386 PAE. Previously, VM_PHYSSEG_SPARSE could not be used on amd64 and i386 because vm_page_startup() would not create vm_page structures for the kernel page table pages allocated during pmap_bootstrap() but those vm_page structures are needed when the kernel attempts to promote the corresponding kernel virtual addresses to superpage mappings. To address this problem, a new public function, vm_phys_add_seg(), is introduced and vm_phys_init() is updated to reflect the creation of vm_phys_seg structures by calls to vm_phys_add_seg(). Discussed with: Svatopluk Kraus MFC after: 3 weeks Sponsored by: EMC / Isilon Storage Division
-rw-r--r--sys/amd64/amd64/pmap.c9
-rw-r--r--sys/i386/i386/pmap.c9
-rw-r--r--sys/i386/include/vmparam.h8
-rw-r--r--sys/vm/vm_page.c29
-rw-r--r--sys/vm/vm_phys.c101
-rw-r--r--sys/vm/vm_phys.h1
6 files changed, 107 insertions, 50 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 29e6980..6a4077c 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -833,6 +833,15 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
*/
create_pagetables(firstaddr);
+ /*
+ * Add a physical memory segment (vm_phys_seg) corresponding to the
+ * preallocated kernel page table pages so that vm_page structures
+ * representing these pages will be created. The vm_page structures
+ * are required for promotion of the corresponding kernel virtual
+ * addresses to superpage mappings.
+ */
+ vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
+
virtual_avail = (vm_offset_t) KERNBASE + *firstaddr;
virtual_avail = pmap_kmem_choose(virtual_avail);
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 8513724..68b44e9 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -375,6 +375,15 @@ pmap_bootstrap(vm_paddr_t firstaddr)
int i;
/*
+ * Add a physical memory segment (vm_phys_seg) corresponding to the
+ * preallocated kernel page table pages so that vm_page structures
+ * representing these pages will be created. The vm_page structures
+ * are required for promotion of the corresponding kernel virtual
+ * addresses to superpage mappings.
+ */
+ vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt));
+
+ /*
* Initialize the first available kernel virtual address. However,
* using "firstaddr" may waste a few pages of the kernel virtual
* address space, because locore may not have mapped every physical
diff --git a/sys/i386/include/vmparam.h b/sys/i386/include/vmparam.h
index 975b302..5dc5669 100644
--- a/sys/i386/include/vmparam.h
+++ b/sys/i386/include/vmparam.h
@@ -64,9 +64,15 @@
#endif
/*
- * The physical address space is densely populated.
+ * Choose between DENSE and SPARSE based on whether lower execution time or
+ * lower kernel address space consumption is desired. Under PAE, kernel
+ * address space is often in short supply.
*/
+#ifdef PAE
+#define VM_PHYSSEG_SPARSE
+#else
#define VM_PHYSSEG_DENSE
+#endif
/*
* The number of PHYSSEG entries must be one greater than the number
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 87b39f1..b2877c1 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -304,9 +304,23 @@ vm_page_startup(vm_offset_t vaddr)
phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
}
+#ifdef XEN
+ /*
+ * There is no obvious reason why i386 PV Xen needs vm_page structs
+ * created for these pseudo-physical addresses. XXX
+ */
+ vm_phys_add_seg(0, phys_avail[0]);
+#endif
+
low_water = phys_avail[0];
high_water = phys_avail[1];
+ for (i = 0; i < vm_phys_nsegs; i++) {
+ if (vm_phys_segs[i].start < low_water)
+ low_water = vm_phys_segs[i].start;
+ if (vm_phys_segs[i].end > high_water)
+ high_water = vm_phys_segs[i].end;
+ }
for (i = 0; phys_avail[i + 1]; i += 2) {
vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
@@ -320,10 +334,6 @@ vm_page_startup(vm_offset_t vaddr)
high_water = phys_avail[i + 1];
}
-#ifdef XEN
- low_water = 0;
-#endif
-
end = phys_avail[biggestone+1];
/*
@@ -391,6 +401,10 @@ vm_page_startup(vm_offset_t vaddr)
first_page = low_water / PAGE_SIZE;
#ifdef VM_PHYSSEG_SPARSE
page_range = 0;
+ for (i = 0; i < vm_phys_nsegs; i++) {
+ page_range += atop(vm_phys_segs[i].end -
+ vm_phys_segs[i].start);
+ }
for (i = 0; phys_avail[i + 1] != 0; i += 2)
page_range += atop(phys_avail[i + 1] - phys_avail[i]);
#elif defined(VM_PHYSSEG_DENSE)
@@ -433,6 +447,13 @@ vm_page_startup(vm_offset_t vaddr)
phys_avail[biggestone + 1] = new_end;
/*
+ * Add physical memory segments corresponding to the available
+ * physical pages.
+ */
+ for (i = 0; phys_avail[i + 1] != 0; i += 2)
+ vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
+
+ /*
* Clear all of the page structures
*/
bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index be3d5be..95369a8 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -301,29 +301,19 @@ static void
_vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain)
{
struct vm_phys_seg *seg;
-#ifdef VM_PHYSSEG_SPARSE
- long pages;
- int segind;
- pages = 0;
- for (segind = 0; segind < vm_phys_nsegs; segind++) {
- seg = &vm_phys_segs[segind];
- pages += atop(seg->end - seg->start);
- }
-#endif
KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
KASSERT(domain < vm_ndomains,
("vm_phys_create_seg: invalid domain provided"));
seg = &vm_phys_segs[vm_phys_nsegs++];
+ while (seg > vm_phys_segs && (seg - 1)->start >= end) {
+ *seg = *(seg - 1);
+ seg--;
+ }
seg->start = start;
seg->end = end;
seg->domain = domain;
-#ifdef VM_PHYSSEG_SPARSE
- seg->first_page = &vm_page_array[pages];
-#else
- seg->first_page = PHYS_TO_VM_PAGE(start);
-#endif
seg->free_queues = &vm_phys_free_queues[domain][flind];
}
@@ -357,47 +347,68 @@ vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
}
/*
- * Initialize the physical memory allocator.
+ * Add a physical memory segment.
*/
void
-vm_phys_init(void)
+vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
{
- struct vm_freelist *fl;
- int dom, flind, i, oind, pind;
- for (i = 0; phys_avail[i + 1] != 0; i += 2) {
+ KASSERT((start & PAGE_MASK) == 0,
+ ("vm_phys_define_seg: start is not page aligned"));
+ KASSERT((end & PAGE_MASK) == 0,
+ ("vm_phys_define_seg: end is not page aligned"));
#ifdef VM_FREELIST_ISADMA
- if (phys_avail[i] < 16777216) {
- if (phys_avail[i + 1] > 16777216) {
- vm_phys_create_seg(phys_avail[i], 16777216,
- VM_FREELIST_ISADMA);
- vm_phys_create_seg(16777216, phys_avail[i + 1],
- VM_FREELIST_DEFAULT);
- } else {
- vm_phys_create_seg(phys_avail[i],
- phys_avail[i + 1], VM_FREELIST_ISADMA);
- }
- if (VM_FREELIST_ISADMA >= vm_nfreelists)
- vm_nfreelists = VM_FREELIST_ISADMA + 1;
+ if (start < 16777216) {
+ if (end > 16777216) {
+ vm_phys_create_seg(start, 16777216,
+ VM_FREELIST_ISADMA);
+ vm_phys_create_seg(16777216, end, VM_FREELIST_DEFAULT);
} else
+ vm_phys_create_seg(start, end, VM_FREELIST_ISADMA);
+ if (VM_FREELIST_ISADMA >= vm_nfreelists)
+ vm_nfreelists = VM_FREELIST_ISADMA + 1;
+ } else
#endif
#ifdef VM_FREELIST_HIGHMEM
- if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
- if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
- vm_phys_create_seg(phys_avail[i],
- VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
- vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
- phys_avail[i + 1], VM_FREELIST_HIGHMEM);
- } else {
- vm_phys_create_seg(phys_avail[i],
- phys_avail[i + 1], VM_FREELIST_HIGHMEM);
- }
- if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
- vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
+ if (end > VM_HIGHMEM_ADDRESS) {
+ if (start < VM_HIGHMEM_ADDRESS) {
+ vm_phys_create_seg(start, VM_HIGHMEM_ADDRESS,
+ VM_FREELIST_DEFAULT);
+ vm_phys_create_seg(VM_HIGHMEM_ADDRESS, end,
+ VM_FREELIST_HIGHMEM);
} else
+ vm_phys_create_seg(start, end, VM_FREELIST_HIGHMEM);
+ if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
+ vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
+ } else
+#endif
+ vm_phys_create_seg(start, end, VM_FREELIST_DEFAULT);
+}
+
+/*
+ * Initialize the physical memory allocator.
+ */
+void
+vm_phys_init(void)
+{
+ struct vm_freelist *fl;
+ struct vm_phys_seg *seg;
+#ifdef VM_PHYSSEG_SPARSE
+ long pages;
+#endif
+ int dom, flind, oind, pind, segind;
+
+#ifdef VM_PHYSSEG_SPARSE
+ pages = 0;
+#endif
+ for (segind = 0; segind < vm_phys_nsegs; segind++) {
+ seg = &vm_phys_segs[segind];
+#ifdef VM_PHYSSEG_SPARSE
+ seg->first_page = &vm_page_array[pages];
+ pages += atop(seg->end - seg->start);
+#else
+ seg->first_page = PHYS_TO_VM_PAGE(seg->start);
#endif
- vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
- VM_FREELIST_DEFAULT);
}
for (dom = 0; dom < vm_ndomains; dom++) {
for (flind = 0; flind < vm_nfreelists; flind++) {
diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h
index 6d94e07..7444046 100644
--- a/sys/vm/vm_phys.h
+++ b/sys/vm/vm_phys.h
@@ -69,6 +69,7 @@ extern int vm_phys_nsegs;
* The following functions are only to be used by the virtual memory system.
*/
void vm_phys_add_page(vm_paddr_t pa);
+void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end);
vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary);
vm_page_t vm_phys_alloc_freelist_pages(int flind, int pool, int order);
OpenPOWER on IntegriCloud