summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2015-01-02 17:45:52 +0000
committeralc <alc@FreeBSD.org>2015-01-02 17:45:52 +0000
commitd5a13901bff0d31efdcb363925086b0e00bea32b (patch)
treefa41565eff257f888b11c0e324e0bc2f147f72ad /sys/vm
parent5309e52c3c33e83023a6edac5ef5dea2b34b5c55 (diff)
downloadFreeBSD-src-d5a13901bff0d31efdcb363925086b0e00bea32b.zip
FreeBSD-src-d5a13901bff0d31efdcb363925086b0e00bea32b.tar.gz
MFC r273701, r274556
By the time that pmap_init() runs, vm_phys_segs[] has been initialized. Obtaining the end of memory address from vm_phys_segs[] is a little easier than obtaining it from phys_avail[]. Enable the use of VM_PHYSSEG_SPARSE on amd64 and i386, making it the default on i386 PAE. (The use of VM_PHYSSEG_SPARSE on i386 PAE saves us some precious kernel virtual address space that would have been wasted on unused vm_page structures.)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_page.c29
-rw-r--r--sys/vm/vm_phys.c101
-rw-r--r--sys/vm/vm_phys.h1
3 files changed, 82 insertions, 49 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 2f14d12..ab3bd5e 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -307,9 +307,23 @@ vm_page_startup(vm_offset_t vaddr)
phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
}
+#ifdef XEN
+ /*
+ * There is no obvious reason why i386 PV Xen needs vm_page structs
+ * created for these pseudo-physical addresses. XXX
+ */
+ vm_phys_add_seg(0, phys_avail[0]);
+#endif
+
low_water = phys_avail[0];
high_water = phys_avail[1];
+ for (i = 0; i < vm_phys_nsegs; i++) {
+ if (vm_phys_segs[i].start < low_water)
+ low_water = vm_phys_segs[i].start;
+ if (vm_phys_segs[i].end > high_water)
+ high_water = vm_phys_segs[i].end;
+ }
for (i = 0; phys_avail[i + 1]; i += 2) {
vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
@@ -323,10 +337,6 @@ vm_page_startup(vm_offset_t vaddr)
high_water = phys_avail[i + 1];
}
-#ifdef XEN
- low_water = 0;
-#endif
-
end = phys_avail[biggestone+1];
/*
@@ -394,6 +404,10 @@ vm_page_startup(vm_offset_t vaddr)
first_page = low_water / PAGE_SIZE;
#ifdef VM_PHYSSEG_SPARSE
page_range = 0;
+ for (i = 0; i < vm_phys_nsegs; i++) {
+ page_range += atop(vm_phys_segs[i].end -
+ vm_phys_segs[i].start);
+ }
for (i = 0; phys_avail[i + 1] != 0; i += 2)
page_range += atop(phys_avail[i + 1] - phys_avail[i]);
#elif defined(VM_PHYSSEG_DENSE)
@@ -436,6 +450,13 @@ vm_page_startup(vm_offset_t vaddr)
phys_avail[biggestone + 1] = new_end;
/*
+ * Add physical memory segments corresponding to the available
+ * physical pages.
+ */
+ for (i = 0; phys_avail[i + 1] != 0; i += 2)
+ vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
+
+ /*
* Clear all of the page structures
*/
bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index b354a8d..aad2e13 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -246,29 +246,19 @@ static void
_vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain)
{
struct vm_phys_seg *seg;
-#ifdef VM_PHYSSEG_SPARSE
- long pages;
- int segind;
- pages = 0;
- for (segind = 0; segind < vm_phys_nsegs; segind++) {
- seg = &vm_phys_segs[segind];
- pages += atop(seg->end - seg->start);
- }
-#endif
KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
KASSERT(domain < vm_ndomains,
("vm_phys_create_seg: invalid domain provided"));
seg = &vm_phys_segs[vm_phys_nsegs++];
+ while (seg > vm_phys_segs && (seg - 1)->start >= end) {
+ *seg = *(seg - 1);
+ seg--;
+ }
seg->start = start;
seg->end = end;
seg->domain = domain;
-#ifdef VM_PHYSSEG_SPARSE
- seg->first_page = &vm_page_array[pages];
-#else
- seg->first_page = PHYS_TO_VM_PAGE(start);
-#endif
seg->free_queues = &vm_phys_free_queues[domain][flind];
}
@@ -302,47 +292,68 @@ vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind)
}
/*
- * Initialize the physical memory allocator.
+ * Add a physical memory segment.
*/
void
-vm_phys_init(void)
+vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
{
- struct vm_freelist *fl;
- int dom, flind, i, oind, pind;
- for (i = 0; phys_avail[i + 1] != 0; i += 2) {
+ KASSERT((start & PAGE_MASK) == 0,
+ ("vm_phys_define_seg: start is not page aligned"));
+ KASSERT((end & PAGE_MASK) == 0,
+ ("vm_phys_define_seg: end is not page aligned"));
#ifdef VM_FREELIST_ISADMA
- if (phys_avail[i] < 16777216) {
- if (phys_avail[i + 1] > 16777216) {
- vm_phys_create_seg(phys_avail[i], 16777216,
- VM_FREELIST_ISADMA);
- vm_phys_create_seg(16777216, phys_avail[i + 1],
- VM_FREELIST_DEFAULT);
- } else {
- vm_phys_create_seg(phys_avail[i],
- phys_avail[i + 1], VM_FREELIST_ISADMA);
- }
- if (VM_FREELIST_ISADMA >= vm_nfreelists)
- vm_nfreelists = VM_FREELIST_ISADMA + 1;
+ if (start < 16777216) {
+ if (end > 16777216) {
+ vm_phys_create_seg(start, 16777216,
+ VM_FREELIST_ISADMA);
+ vm_phys_create_seg(16777216, end, VM_FREELIST_DEFAULT);
} else
+ vm_phys_create_seg(start, end, VM_FREELIST_ISADMA);
+ if (VM_FREELIST_ISADMA >= vm_nfreelists)
+ vm_nfreelists = VM_FREELIST_ISADMA + 1;
+ } else
#endif
#ifdef VM_FREELIST_HIGHMEM
- if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) {
- if (phys_avail[i] < VM_HIGHMEM_ADDRESS) {
- vm_phys_create_seg(phys_avail[i],
- VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT);
- vm_phys_create_seg(VM_HIGHMEM_ADDRESS,
- phys_avail[i + 1], VM_FREELIST_HIGHMEM);
- } else {
- vm_phys_create_seg(phys_avail[i],
- phys_avail[i + 1], VM_FREELIST_HIGHMEM);
- }
- if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
- vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
+ if (end > VM_HIGHMEM_ADDRESS) {
+ if (start < VM_HIGHMEM_ADDRESS) {
+ vm_phys_create_seg(start, VM_HIGHMEM_ADDRESS,
+ VM_FREELIST_DEFAULT);
+ vm_phys_create_seg(VM_HIGHMEM_ADDRESS, end,
+ VM_FREELIST_HIGHMEM);
} else
+ vm_phys_create_seg(start, end, VM_FREELIST_HIGHMEM);
+ if (VM_FREELIST_HIGHMEM >= vm_nfreelists)
+ vm_nfreelists = VM_FREELIST_HIGHMEM + 1;
+ } else
+#endif
+ vm_phys_create_seg(start, end, VM_FREELIST_DEFAULT);
+}
+
+/*
+ * Initialize the physical memory allocator.
+ */
+void
+vm_phys_init(void)
+{
+ struct vm_freelist *fl;
+ struct vm_phys_seg *seg;
+#ifdef VM_PHYSSEG_SPARSE
+ long pages;
+#endif
+ int dom, flind, oind, pind, segind;
+
+#ifdef VM_PHYSSEG_SPARSE
+ pages = 0;
+#endif
+ for (segind = 0; segind < vm_phys_nsegs; segind++) {
+ seg = &vm_phys_segs[segind];
+#ifdef VM_PHYSSEG_SPARSE
+ seg->first_page = &vm_page_array[pages];
+ pages += atop(seg->end - seg->start);
+#else
+ seg->first_page = PHYS_TO_VM_PAGE(seg->start);
#endif
- vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],
- VM_FREELIST_DEFAULT);
}
for (dom = 0; dom < vm_ndomains; dom++) {
for (flind = 0; flind < vm_nfreelists; flind++) {
diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h
index f39943c..62fa0d4 100644
--- a/sys/vm/vm_phys.h
+++ b/sys/vm/vm_phys.h
@@ -69,6 +69,7 @@ extern int vm_phys_nsegs;
* The following functions are only to be used by the virtual memory system.
*/
void vm_phys_add_page(vm_paddr_t pa);
+void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end);
vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary);
vm_page_t vm_phys_alloc_freelist_pages(int flind, int pool, int order);
OpenPOWER on IntegriCloud