From 01549fd11d3bc0ad90ac3bf29fa51ab279360b55 Mon Sep 17 00:00:00 2001 From: bde Date: Sat, 12 Oct 1996 20:36:15 +0000 Subject: Cleaned up: - fixed a sloppy common-style declaration. - removed an unused macro. - moved once-used macros to the one file where they are used. - removed unused forward struct declarations. - removed __pure. - declared inline functions as inline in their prototype as well as in theire definition (gcc unfortunately allows the prototype to be inconsistent). - staticized. --- sys/amd64/amd64/pmap.c | 18 ++++++++++++------ sys/amd64/include/pmap.h | 18 +++--------------- sys/i386/i386/pmap.c | 18 ++++++++++++------ sys/i386/include/pmap.h | 18 +++--------------- 4 files changed, 30 insertions(+), 42 deletions(-) (limited to 'sys') diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 1e7c4bb..62d0688 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -39,7 +39,7 @@ * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 - * $Id: pmap.c,v 1.122 1996/09/28 22:37:38 dyson Exp $ + * $Id: pmap.c,v 1.123 1996/10/09 19:47:19 bde Exp $ */ /* @@ -133,6 +133,9 @@ static void init_pv_entries __P((int)); #define pte_prot(m, p) (protection_codes[p]) static int protection_codes[8]; +#define pa_index(pa) atop((pa) - vm_first_phys) +#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) + static struct pmap kernel_pmap_store; pmap_t kernel_pmap; @@ -165,6 +168,7 @@ static int npvvapg; */ pt_entry_t *CMAP1; static pt_entry_t *CMAP2, *ptmmap; +static pv_table_t *pv_table; caddr_t CADDR1, ptvmmap; static caddr_t CADDR2; static pt_entry_t *msgbufmap; @@ -173,14 +177,14 @@ struct msgbuf *msgbufp; pt_entry_t *PMAP1; unsigned *PADDR1; -static void free_pv_entry __P((pv_entry_t pv)); +static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv)); static unsigned * get_ptbase __P((pmap_t pmap)); static pv_entry_t get_pv_entry __P((void)); static void i386_protection_init __P((void)); static void pmap_alloc_pv_entry __P((void)); static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); -static int pmap_is_managed __P((vm_offset_t pa)); +static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa)); static void pmap_remove_all __P((vm_offset_t pa)); static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_page_t mpte)); @@ -199,9 +203,11 @@ static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p)); static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex)); static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va)); static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex)); +static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex)); static PMAP_INLINE void pmap_lock __P((pmap_t pmap)); static PMAP_INLINE void pmap_unlock __P((pmap_t pmap)); static void pmap_lock2 __P((pmap_t pmap1, pmap_t pmap2)); +static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t)); #define PDSTACKMAX 6 static vm_offset_t pdstack[PDSTACKMAX]; @@ -524,7 +530,7 @@ pmap_pte(pmap, va) * to do an entire invltlb for checking a single mapping. */ -unsigned * +static unsigned * pmap_pte_quick(pmap, va) register pmap_t pmap; vm_offset_t va; @@ -694,7 +700,7 @@ pmap_page_alloc(object, pindex) return m; } -vm_page_t +static vm_page_t pmap_page_lookup(object, pindex) vm_object_t object; vm_pindex_t pindex; @@ -782,7 +788,7 @@ pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) { * After removing a page table entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ -int +static int pmap_unuse_pt(pmap, va, mpte) pmap_t pmap; vm_offset_t va; diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h index 2c4e469..03e58d5 100644 --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -42,13 +42,12 @@ * * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 - * $Id: pmap.h,v 1.42 1996/07/30 03:08:57 dyson Exp $ + * $Id: pmap.h,v 1.43 1996/09/08 16:57:45 dyson Exp $ */ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ - /* * Page-directory and page-table entires follow this format, with a few * of the fields not present here and there, depending on a lot of things. @@ -162,8 +161,6 @@ pmap_kextract(vm_offset_t va) } #endif -struct vm_page; - /* * Pmap stuff */ @@ -193,7 +190,6 @@ typedef struct pmap *pmap_t; extern pmap_t kernel_pmap; #endif - /* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page. An entry is a pv_entry_t, the list is pv_table. @@ -218,25 +214,17 @@ extern pt_entry_t *CMAP1; extern vm_offset_t avail_end; extern vm_offset_t avail_start; extern vm_offset_t phys_avail[]; -pv_table_t *pv_table; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; -#define pa_index(pa) atop(pa - vm_first_phys) -#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) - -#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) - -struct pcb; - void pmap_bootstrap __P(( vm_offset_t, vm_offset_t)); pmap_t pmap_kernel __P((void)); void *pmap_mapdev __P((vm_offset_t, vm_size_t)); -unsigned * __pure pmap_pte __P((pmap_t, vm_offset_t)) __pure2; -int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t)); +unsigned *pmap_pte __P((pmap_t, vm_offset_t)) __pure2; vm_page_t pmap_use_pt __P((pmap_t, vm_offset_t)); #endif /* KERNEL */ + #endif /* !LOCORE */ #endif /* !_MACHINE_PMAP_H_ */ diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 1e7c4bb..62d0688 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -39,7 +39,7 @@ * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 - * $Id: pmap.c,v 1.122 1996/09/28 22:37:38 dyson Exp $ + * $Id: pmap.c,v 1.123 1996/10/09 19:47:19 bde Exp $ */ /* @@ -133,6 +133,9 @@ static void init_pv_entries __P((int)); #define pte_prot(m, p) (protection_codes[p]) static int protection_codes[8]; +#define pa_index(pa) atop((pa) - vm_first_phys) +#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) + static struct pmap kernel_pmap_store; pmap_t kernel_pmap; @@ -165,6 +168,7 @@ static int npvvapg; */ pt_entry_t *CMAP1; static pt_entry_t *CMAP2, *ptmmap; +static pv_table_t *pv_table; caddr_t CADDR1, ptvmmap; static caddr_t CADDR2; static pt_entry_t *msgbufmap; @@ -173,14 +177,14 @@ struct msgbuf *msgbufp; pt_entry_t *PMAP1; unsigned *PADDR1; -static void free_pv_entry __P((pv_entry_t pv)); +static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv)); static unsigned * get_ptbase __P((pmap_t pmap)); static pv_entry_t get_pv_entry __P((void)); static void i386_protection_init __P((void)); static void pmap_alloc_pv_entry __P((void)); static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); -static int pmap_is_managed __P((vm_offset_t pa)); +static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa)); static void pmap_remove_all __P((vm_offset_t pa)); static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_page_t mpte)); @@ -199,9 +203,11 @@ static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p)); static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex)); static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va)); static vm_page_t pmap_page_alloc __P((vm_object_t object, vm_pindex_t pindex)); +static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex)); static PMAP_INLINE void pmap_lock __P((pmap_t pmap)); static PMAP_INLINE void pmap_unlock __P((pmap_t pmap)); static void pmap_lock2 __P((pmap_t pmap1, pmap_t pmap2)); +static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t)); #define PDSTACKMAX 6 static vm_offset_t pdstack[PDSTACKMAX]; @@ -524,7 +530,7 @@ pmap_pte(pmap, va) * to do an entire invltlb for checking a single mapping. */ -unsigned * +static unsigned * pmap_pte_quick(pmap, va) register pmap_t pmap; vm_offset_t va; @@ -694,7 +700,7 @@ pmap_page_alloc(object, pindex) return m; } -vm_page_t +static vm_page_t pmap_page_lookup(object, pindex) vm_object_t object; vm_pindex_t pindex; @@ -782,7 +788,7 @@ pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) { * After removing a page table entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ -int +static int pmap_unuse_pt(pmap, va, mpte) pmap_t pmap; vm_offset_t va; diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h index 2c4e469..03e58d5 100644 --- a/sys/i386/include/pmap.h +++ b/sys/i386/include/pmap.h @@ -42,13 +42,12 @@ * * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 - * $Id: pmap.h,v 1.42 1996/07/30 03:08:57 dyson Exp $ + * $Id: pmap.h,v 1.43 1996/09/08 16:57:45 dyson Exp $ */ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ - /* * Page-directory and page-table entires follow this format, with a few * of the fields not present here and there, depending on a lot of things. @@ -162,8 +161,6 @@ pmap_kextract(vm_offset_t va) } #endif -struct vm_page; - /* * Pmap stuff */ @@ -193,7 +190,6 @@ typedef struct pmap *pmap_t; extern pmap_t kernel_pmap; #endif - /* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page. An entry is a pv_entry_t, the list is pv_table. @@ -218,25 +214,17 @@ extern pt_entry_t *CMAP1; extern vm_offset_t avail_end; extern vm_offset_t avail_start; extern vm_offset_t phys_avail[]; -pv_table_t *pv_table; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; -#define pa_index(pa) atop(pa - vm_first_phys) -#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) - -#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) - -struct pcb; - void pmap_bootstrap __P(( vm_offset_t, vm_offset_t)); pmap_t pmap_kernel __P((void)); void *pmap_mapdev __P((vm_offset_t, vm_size_t)); -unsigned * __pure pmap_pte __P((pmap_t, vm_offset_t)) __pure2; -int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t)); +unsigned *pmap_pte __P((pmap_t, vm_offset_t)) __pure2; vm_page_t pmap_use_pt __P((pmap_t, vm_offset_t)); #endif /* KERNEL */ + #endif /* !LOCORE */ #endif /* !_MACHINE_PMAP_H_ */ -- cgit v1.1