summaryrefslogtreecommitdiffstats
path: root/sys/powerpc/include/pmap.h
diff options
context:
space:
mode:
authorbenno <benno@FreeBSD.org>2002-02-14 01:39:11 +0000
committerbenno <benno@FreeBSD.org>2002-02-14 01:39:11 +0000
commit8c67ca76f79f837ceff85f7b9beb68cb784aaadd (patch)
treeea22ffca9020aeeb7d70a0afc75a11a1200af23b /sys/powerpc/include/pmap.h
parentc745bc3937461deb0149ef098ea50897e1e5c0a1 (diff)
downloadFreeBSD-src-8c67ca76f79f837ceff85f7b9beb68cb784aaadd.zip
FreeBSD-src-8c67ca76f79f837ceff85f7b9beb68cb784aaadd.tar.gz
Complete rework of the PowerPC pmap and a number of other bits in the early
boot sequence. The new pmap.c is based on NetBSD's newer pmap.c (for the mpc6xx processors) which is 70% faster than the older code that the original pmap.c was based on. It has also been based on the framework established by jake's initial sparc64 pmap.c. There is no change to how far the kernel gets (it makes it to the mountroot prompt in psim) but the new pmap code is a lot cleaner. Obtained from: NetBSD (pmap code)
Diffstat (limited to 'sys/powerpc/include/pmap.h')
-rw-r--r--sys/powerpc/include/pmap.h118
1 files changed, 34 insertions, 84 deletions
diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h
index ce744d9..080c5a3 100644
--- a/sys/powerpc/include/pmap.h
+++ b/sys/powerpc/include/pmap.h
@@ -35,102 +35,52 @@
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
-#include <machine/pte.h>
-
-/*
- * Segment registers
- */
-#ifndef LOCORE
-typedef u_int sr_t;
-#endif /* LOCORE */
-#define SR_TYPE 0x80000000
-#define SR_SUKEY 0x40000000
-#define SR_PRKEY 0x20000000
-#define SR_VSID 0x00ffffff
-
-#ifndef LOCORE
-
-struct pv_entry {
- struct pv_entry *pv_next; /* Linked list of mappings */
- int pv_idx; /* Index into ptable */
- vm_offset_t pv_va; /* virtual address of mapping */
+#include <machine/sr.h>
+
+struct pmap {
+ u_int pm_sr[16];
+ u_int pm_active;
+ u_int pm_context;
+ u_int pm_count;
+ struct pmap_statistics pm_stats;
};
-struct md_page {
- int pv_list_count;
- int pv_flags;
- TAILQ_HEAD(,pv_entry) pv_list;
-};
+typedef struct pmap *pmap_t;
-/*
- * Pmap stuff
- */
-struct pmap {
- sr_t pm_sr[16]; /* segments used in this pmap */
- int pm_refs; /* ref count */
- struct pmap_statistics pm_stats; /* pmap statistics */
+struct pvo_entry {
+ LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
+ LIST_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
+ struct pte pvo_pte; /* PTE */
+ pmap_t pvo_pmap; /* Owning pmap */
+ vm_offset_t pvo_vaddr; /* VA of entry */
};
+LIST_HEAD(pvo_head, pvo_entry);
-typedef struct pmap *pmap_t;
-
-typedef struct pv_entry *pv_entry_t;
+struct md_page {
+ u_int mdpg_attrs;
+ struct pvo_head mdpg_pvoh;
+};
-#ifdef _KERNEL
+extern struct pmap kernel_pmap_store;
+#define kernel_pmap (&kernel_pmap_store)
-#define pmap_clear_modify(pg) (ptemodify((pg), PTE_CHG, 0))
-#define pmap_clear_reference(pg) (ptemodify((pg), PTE_REF, 0))
-#define pmap_is_modified(pg) (ptebits((pg), PTE_CHG))
-#define pmap_is_referenced(pg) (ptebits((pg), PTE_REF))
-#define pmap_unwire(pm, va)
+#define pmap_resident_count(pm) (pm->pm_stats.resident_count)
-#define pmap_phys_address(x) (x)
+#ifdef _KERNEL
-#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+void pmap_bootstrap(vm_offset_t, vm_offset_t);
+vm_offset_t pmap_kextract(vm_offset_t);
-extern pmap_t kernel_pmap;
+int pmap_pte_spill(vm_offset_t);
-extern vm_offset_t avail_end;
-extern vm_offset_t avail_start;
-extern vm_offset_t phys_avail[];
-extern vm_offset_t virtual_avail;
-extern vm_offset_t virtual_end;
+extern vm_offset_t avail_start;
+extern vm_offset_t avail_end;
+extern vm_offset_t phys_avail[];
+extern vm_offset_t virtual_avail;
+extern vm_offset_t virtual_end;
-void pmap_bootstrap __P((void));
-void *pmap_mapdev __P((vm_offset_t, vm_size_t));
-void pmap_setavailmem __P((u_int kernelstart, u_int kernelend));
-vm_offset_t pmap_steal_memory __P((vm_size_t));
-boolean_t ptemodify __P((struct vm_page *, u_int, u_int));
-int ptebits __P((struct vm_page *, int));
+extern vm_offset_t msgbuf_phys;
-#if 0
-#define PMAP_NEED_PROCWR
-void pmap_procwr __P((struct proc *, vaddr_t, size_t));
#endif
-/*
- * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
- *
- * Note: This won't work if we have more memory than can be direct-mapped
- * VA==PA all at once. But pmap_copy_page() and pmap_zero_page() will have
- * this problem, too.
- */
-#define PMAP_MAP_POOLPAGE(pa) (pa)
-#define PMAP_UNMAP_POOLPAGE(pa) (pa)
-
-#define vtophys(va) pmap_kextract(((vm_offset_t) (va)))
-
-extern pte_t PTmap[];
-
-#define vtopte(x) (PTmap + powerpc_btop(x))
-
-static __inline vm_offset_t
-pmap_kextract(vm_offset_t va)
-{
- /* XXX: coming soon... */
- return (0);
-}
-
-#endif /* _KERNEL */
-#endif /* LOCORE */
-
-#endif /* _MACHINE_PMAP_H_ */
+#endif /* !_MACHINE_PMAP_H_ */
OpenPOWER on IntegriCloud