summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhb <jhb@FreeBSD.org>2001-03-07 01:04:17 +0000
committerjhb <jhb@FreeBSD.org>2001-03-07 01:04:17 +0000
commit74a74a3282c24c442a126202ebc0c195292dc824 (patch)
tree2e12e1be222f421cccfc281714d8cde52edae28e
parent690911ace637b4f0b93d21cc361631a80de28377 (diff)
downloadFreeBSD-src-74a74a3282c24c442a126202ebc0c195292dc824.zip
FreeBSD-src-74a74a3282c24c442a126202ebc0c195292dc824.tar.gz
Back out the pmap_map() change for now, it isn't completely stable on the
i386.
-rw-r--r--sys/alpha/alpha/pmap.c18
-rw-r--r--sys/amd64/amd64/pmap.c20
-rw-r--r--sys/i386/i386/pmap.c20
-rw-r--r--sys/ia64/ia64/pmap.c58
-rw-r--r--sys/vm/pmap.h2
-rw-r--r--sys/vm/vm_page.c25
6 files changed, 70 insertions, 73 deletions
diff --git a/sys/alpha/alpha/pmap.c b/sys/alpha/alpha/pmap.c
index bdea119..4dd81ad 100644
--- a/sys/alpha/alpha/pmap.c
+++ b/sys/alpha/alpha/pmap.c
@@ -884,18 +884,18 @@ pmap_kremove(vm_offset_t va)
* Used to map a range of physical addresses into kernel
* virtual address space.
*
- * The value passed in '*virt' is a suggested virtual address for
- * the mapping. Architectures which can support a direct-mapped
- * physical to virtual region can return the appropriate address
- * within that region, leaving '*virt' unchanged. Other
- * architectures should map the pages starting at '*virt' and
- * update '*virt' with the first usable address after the mapped
- * region.
+ * For now, VM is already on, we only need to map the
+ * specified memory.
*/
vm_offset_t
-pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
+pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
{
- return ALPHA_PHYS_TO_K0SEG(start);
+ while (start < end) {
+ pmap_kenter(virt, start);
+ virt += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ return (virt);
}
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 4bf0ace..52a5e3c 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -711,30 +711,22 @@ pmap_kremove(va)
* Used to map a range of physical addresses into kernel
* virtual address space.
*
- * The value passed in '*virt' is a suggested virtual address for
- * the mapping. Architectures which can support a direct-mapped
- * physical to virtual region can return the appropriate address
- * within that region, leaving '*virt' unchanged. Other
- * architectures should map the pages starting at '*virt' and
- * update '*virt' with the first usable address after the mapped
- * region.
+ * For now, VM is already on, we only need to map the
+ * specified memory.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
- vm_offset_t *virt;
+ vm_offset_t virt;
vm_offset_t start;
vm_offset_t end;
int prot;
{
- vm_offset_t sva = *virt;
- vm_offset_t va = sva;
while (start < end) {
- pmap_kenter(va, start);
- va += PAGE_SIZE;
+ pmap_kenter(virt, start);
+ virt += PAGE_SIZE;
start += PAGE_SIZE;
}
- *virt = va;
- return (sva);
+ return (virt);
}
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 4bf0ace..52a5e3c 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -711,30 +711,22 @@ pmap_kremove(va)
* Used to map a range of physical addresses into kernel
* virtual address space.
*
- * The value passed in '*virt' is a suggested virtual address for
- * the mapping. Architectures which can support a direct-mapped
- * physical to virtual region can return the appropriate address
- * within that region, leaving '*virt' unchanged. Other
- * architectures should map the pages starting at '*virt' and
- * update '*virt' with the first usable address after the mapped
- * region.
+ * For now, VM is already on, we only need to map the
+ * specified memory.
*/
vm_offset_t
pmap_map(virt, start, end, prot)
- vm_offset_t *virt;
+ vm_offset_t virt;
vm_offset_t start;
vm_offset_t end;
int prot;
{
- vm_offset_t sva = *virt;
- vm_offset_t va = sva;
while (start < end) {
- pmap_kenter(va, start);
- va += PAGE_SIZE;
+ pmap_kenter(virt, start);
+ virt += PAGE_SIZE;
start += PAGE_SIZE;
}
- *virt = va;
- return (sva);
+ return (virt);
}
diff --git a/sys/ia64/ia64/pmap.c b/sys/ia64/ia64/pmap.c
index 0c396c0..cd25efb 100644
--- a/sys/ia64/ia64/pmap.c
+++ b/sys/ia64/ia64/pmap.c
@@ -220,9 +220,12 @@ static int pmap_ridbits = 18;
static vm_zone_t pvzone;
static struct vm_zone pvzone_store;
static struct vm_object pvzone_obj;
+static vm_zone_t pvbootzone;
+static struct vm_zone pvbootzone_store;
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
static int pmap_pagedaemon_waken = 0;
static struct pv_entry *pvinit;
+static struct pv_entry *pvbootinit;
static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv));
static pv_entry_t get_pv_entry __P((void));
@@ -268,6 +271,7 @@ void
pmap_bootstrap()
{
int i;
+ int boot_pvs;
/*
* Setup RIDs. We use the bits above pmap_ridbits for a
@@ -315,6 +319,19 @@ pmap_bootstrap()
ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));
/*
+ * We need some PVs to cope with pmap_kenter() calls prior to
+ * pmap_init(). This is all a bit flaky and needs to be
+ * rethought, probably by avoiding the zone allocator
+ * entirely.
+ */
+ boot_pvs = 32768;
+ pvbootzone = &pvbootzone_store;
+ pvbootinit = (struct pv_entry *)
+ pmap_steal_memory(boot_pvs * sizeof (struct pv_entry));
+ zbootinit(pvbootzone, "PV ENTRY", sizeof (struct pv_entry),
+ pvbootinit, boot_pvs);
+
+ /*
* Set up proc0's PCB.
*/
#if 0
@@ -735,23 +752,8 @@ free_pv_entry(pv_entry_t pv)
static pv_entry_t
get_pv_entry(void)
{
- /*
- * We can get called a few times really early before
- * pmap_init() has finished allocating the pvzone (mostly as a
- * result of the call to kmem_alloc() in pmap_init(). We allow
- * a small number of entries to be allocated statically to
- * cover this.
- */
- if (!pvinit) {
-#define PV_BOOTSTRAP_NEEDED 512
- static struct pv_entry pvbootentries[PV_BOOTSTRAP_NEEDED];
- static int pvbootnext = 0;
-
- if (pvbootnext == PV_BOOTSTRAP_NEEDED)
- panic("get_pv_entry: called too many times"
- " before pmap_init is finished");
- return &pvbootentries[pvbootnext++];
- }
+ if (!pvinit)
+ return zalloc(pvbootzone);
pv_entry_count++;
if (pv_entry_high_water &&
@@ -1113,18 +1115,22 @@ pmap_kremove(vm_offset_t va)
* Used to map a range of physical addresses into kernel
* virtual address space.
*
- * The value passed in '*virt' is a suggested virtual address for
- * the mapping. Architectures which can support a direct-mapped
- * physical to virtual region can return the appropriate address
- * within that region, leaving '*virt' unchanged. Other
- * architectures should map the pages starting at '*virt' and
- * update '*virt' with the first usable address after the mapped
- * region.
+ * For now, VM is already on, we only need to map the
+ * specified memory.
*/
vm_offset_t
-pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
+pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
{
- return IA64_PHYS_TO_RR7(start);
+ /*
+ * XXX We should really try to use larger pagesizes here to
+ * cut down the number of PVs used.
+ */
+ while (start < end) {
+ pmap_kenter(virt, start);
+ virt += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ return (virt);
}
/*
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 4ff3321..1aca49d 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -110,7 +110,7 @@ boolean_t pmap_is_modified __P((vm_page_t m));
boolean_t pmap_ts_referenced __P((vm_page_t m));
void pmap_kenter __P((vm_offset_t va, vm_offset_t pa));
void pmap_kremove __P((vm_offset_t));
-vm_offset_t pmap_map __P((vm_offset_t *, vm_offset_t, vm_offset_t, int));
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_offset_t size,
int pagelimit));
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index e332564..808f7f3 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -180,7 +180,7 @@ vm_offset_t
vm_page_startup(starta, enda, vaddr)
register vm_offset_t starta;
vm_offset_t enda;
- vm_offset_t vaddr;
+ register vm_offset_t vaddr;
{
register vm_offset_t mapped;
register struct vm_page **bucket;
@@ -242,6 +242,8 @@ vm_page_startup(starta, enda, vaddr)
*
* Note: This computation can be tweaked if desired.
*/
+ vm_page_buckets = (struct vm_page **)vaddr;
+ bucket = vm_page_buckets;
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
while (vm_page_bucket_count < atop(total))
@@ -255,12 +257,12 @@ vm_page_startup(starta, enda, vaddr)
*/
new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
new_end = trunc_page(new_end);
- mapped = pmap_map(&vaddr, new_end, end,
+ mapped = round_page(vaddr);
+ vaddr = pmap_map(mapped, new_end, end,
VM_PROT_READ | VM_PROT_WRITE);
- bzero((caddr_t) mapped, end - new_end);
+ vaddr = round_page(vaddr);
+ bzero((caddr_t) mapped, vaddr - mapped);
- vm_page_buckets = (struct vm_page **)mapped;
- bucket = vm_page_buckets;
for (i = 0; i < vm_page_bucket_count; i++) {
*bucket = NULL;
bucket++;
@@ -279,15 +281,20 @@ vm_page_startup(starta, enda, vaddr)
(end - new_end)) / PAGE_SIZE;
end = new_end;
-
/*
* Initialize the mem entry structures now, and put them in the free
* queue.
*/
+ vm_page_array = (vm_page_t) vaddr;
+ mapped = vaddr;
+
+ /*
+ * Validate these addresses.
+ */
+
new_end = trunc_page(end - page_range * sizeof(struct vm_page));
- mapped = pmap_map(&vaddr, new_end, end,
+ mapped = pmap_map(mapped, new_end, end,
VM_PROT_READ | VM_PROT_WRITE);
- vm_page_array = (vm_page_t) mapped;
/*
* Clear all of the page structures
@@ -314,7 +321,7 @@ vm_page_startup(starta, enda, vaddr)
pa += PAGE_SIZE;
}
}
- return (vaddr);
+ return (mapped);
}
/*
OpenPOWER on IntegriCloud