summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoralc <alc@FreeBSD.org>2007-11-03 05:15:26 +0000
committeralc <alc@FreeBSD.org>2007-11-03 05:15:26 +0000
commit1fd60b45c3d597ef0ed0e53e130dce9ae450f953 (patch)
tree75eda6fe669080b9a80a8ddb4d0944f5a1f02dd0
parent274f85e40e01e93a96695eaf47dbb4e228c7e32b (diff)
downloadFreeBSD-src-1fd60b45c3d597ef0ed0e53e130dce9ae450f953.zip
FreeBSD-src-1fd60b45c3d597ef0ed0e53e130dce9ae450f953.tar.gz
Eliminate spurious "Approaching the limit on PV entries, ..."
warnings. Specifically, whenever vm_page_alloc(9) returned NULL to get_pv_entry(), we issued a warning regardless of the number of pv entries in use. (Note: The older pv entry allocator in RELENG_6 does not have this problem.) Reported by: Jeremy Chadwick Eliminate the direct call to pagedaemon_wakeup() by get_pv_entry(). This was a holdover from earlier times when the page daemon was responsible for the reclamation of pv entries. MFC after: 5 days
-rw-r--r--sys/amd64/amd64/pmap.c9
-rw-r--r--sys/i386/i386/pmap.c10
2 files changed, 8 insertions, 11 deletions
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index db7fc1c..bd255b9 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -1732,7 +1732,10 @@ get_pv_entry(pmap_t pmap, int try)
PV_STAT(pv_entry_allocs++);
pv_entry_count++;
if (pv_entry_count > pv_entry_high_water)
- pagedaemon_wakeup();
+ if (ratecheck(&lastprint, &printinterval))
+ printf("Approaching the limit on PV entries, consider "
+ "increasing either the vm.pmap.shpgperproc or the "
+ "vm.pmap.pv_entry_max sysctl.\n");
pc = TAILQ_FIRST(&pmap->pm_pvchunk);
if (pc != NULL) {
for (field = 0; field < _NPCM; field++) {
@@ -1767,10 +1770,6 @@ get_pv_entry(pmap_t pmap, int try)
* pages. After that, if a pv chunk entry is still needed,
* destroy mappings to active pages.
*/
- if (ratecheck(&lastprint, &printinterval))
- printf("Approaching the limit on PV entries, consider "
- "increasing sysctl vm.pmap.shpgperproc or "
- "vm.pmap.pv_entry_max\n");
PV_STAT(pmap_collect_inactive++);
pmap_collect(pmap, &vm_page_queues[PQ_INACTIVE]);
m = vm_page_alloc(NULL, colour,
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index d751861..b83fd1c 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -1805,7 +1805,10 @@ get_pv_entry(pmap_t pmap, int try)
PV_STAT(pv_entry_allocs++);
pv_entry_count++;
if (pv_entry_count > pv_entry_high_water)
- pagedaemon_wakeup();
+ if (ratecheck(&lastprint, &printinterval))
+ printf("Approaching the limit on PV entries, consider "
+ "increasing either the vm.pmap.shpgperproc or the "
+ "vm.pmap.pv_entry_max tunable.\n");
pc = TAILQ_FIRST(&pmap->pm_pvchunk);
if (pc != NULL) {
for (field = 0; field < _NPCM; field++) {
@@ -1851,11 +1854,6 @@ get_pv_entry(pmap_t pmap, int try)
* inactive pages. After that, if a pv chunk entry
* is still needed, destroy mappings to active pages.
*/
- if (ratecheck(&lastprint, &printinterval))
- printf("Approaching the limit on PV entries, "
- "consider increasing tunables "
- "vm.pmap.shpgperproc or "
- "vm.pmap.pv_entry_max\n");
PV_STAT(pmap_collect_inactive++);
pmap_collect(pmap, &vm_page_queues[PQ_INACTIVE]);
if (m == NULL)
OpenPOWER on IntegriCloud