summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/swap_pager.c55
-rw-r--r--sys/vm/uma.h5
-rw-r--r--sys/vm/uma_core.c18
-rw-r--r--sys/vm/uma_int.h7
-rw-r--r--sys/vm/vm_map.c14
-rw-r--r--sys/vm/vm_pageout.c7
6 files changed, 66 insertions, 40 deletions
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index c09dbc2..4deebd9 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -2354,8 +2354,8 @@ swapoff_one(struct swdevt *sp, struct ucred *cred)
swap_pager_swapoff(sp);
sp->sw_close(curthread, sp);
- sp->sw_id = NULL;
mtx_lock(&sw_dev_mtx);
+ sp->sw_id = NULL;
TAILQ_REMOVE(&swtailq, sp, sw_list);
nswapdev--;
if (nswapdev == 0) {
@@ -2541,13 +2541,39 @@ swapgeom_close_ev(void *arg, int flags)
g_destroy_consumer(cp);
}
+/*
+ * Add a reference to the g_consumer for an inflight transaction.
+ */
+static void
+swapgeom_acquire(struct g_consumer *cp)
+{
+
+ mtx_assert(&sw_dev_mtx, MA_OWNED);
+ cp->index++;
+}
+
+/*
+ * Remove a reference from the g_consumer. Post a close event if
+ * all referneces go away.
+ */
+static void
+swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
+{
+
+ mtx_assert(&sw_dev_mtx, MA_OWNED);
+ cp->index--;
+ if (cp->index == 0) {
+ if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
+ sp->sw_id = NULL;
+ }
+}
+
static void
swapgeom_done(struct bio *bp2)
{
struct swdevt *sp;
struct buf *bp;
struct g_consumer *cp;
- int destroy;
bp = bp2->bio_caller2;
cp = bp2->bio_from;
@@ -2557,16 +2583,11 @@ swapgeom_done(struct bio *bp2)
bp->b_resid = bp->b_bcount - bp2->bio_completed;
bp->b_error = bp2->bio_error;
bufdone(bp);
+ sp = bp2->bio_caller1;
mtx_lock(&sw_dev_mtx);
- destroy = ((--cp->index) == 0 && cp->private);
- if (destroy) {
- sp = bp2->bio_caller1;
- sp->sw_id = NULL;
- }
+ swapgeom_release(cp, sp);
mtx_unlock(&sw_dev_mtx);
g_destroy_bio(bp2);
- if (destroy)
- g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL);
}
static void
@@ -2584,13 +2605,16 @@ swapgeom_strategy(struct buf *bp, struct swdevt *sp)
bufdone(bp);
return;
}
- cp->index++;
+ swapgeom_acquire(cp);
mtx_unlock(&sw_dev_mtx);
if (bp->b_iocmd == BIO_WRITE)
bio = g_new_bio();
else
bio = g_alloc_bio();
if (bio == NULL) {
+ mtx_lock(&sw_dev_mtx);
+ swapgeom_release(cp, sp);
+ mtx_unlock(&sw_dev_mtx);
bp->b_error = ENOMEM;
bp->b_ioflags |= BIO_ERROR;
bufdone(bp);
@@ -2630,7 +2654,12 @@ swapgeom_orphan(struct g_consumer *cp)
break;
}
}
- cp->private = (void *)(uintptr_t)1;
+ /*
+ * Drop reference we were created with. Do directly since we're in a
+ * special context where we don't have to queue the call to
+ * swapgeom_close_ev().
+ */
+ cp->index--;
destroy = ((sp != NULL) && (cp->index == 0));
if (destroy)
sp->sw_id = NULL;
@@ -2691,8 +2720,8 @@ swapongeom_ev(void *arg, int flags)
if (gp == NULL)
gp = g_new_geomf(&g_swap_class, "swap");
cp = g_new_consumer(gp);
- cp->index = 0; /* Number of active I/Os. */
- cp->private = NULL; /* Orphanization flag */
+ cp->index = 1; /* Number of active I/Os, plus one for being active. */
+ cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
g_attach(cp, pp);
/*
* XXX: Everytime you think you can improve the margin for
diff --git a/sys/vm/uma.h b/sys/vm/uma.h
index ed69e19..d3e0658 100644
--- a/sys/vm/uma.h
+++ b/sys/vm/uma.h
@@ -382,7 +382,8 @@ uma_zfree(uma_zone_t zone, void *item)
* A pointer to the allocated memory or NULL on failure.
*/
-typedef void *(*uma_alloc)(uma_zone_t zone, int size, uint8_t *pflag, int wait);
+typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t size, uint8_t *pflag,
+ int wait);
/*
* Backend page free routines
@@ -395,7 +396,7 @@ typedef void *(*uma_alloc)(uma_zone_t zone, int size, uint8_t *pflag, int wait);
* Returns:
* None
*/
-typedef void (*uma_free)(void *item, int size, uint8_t pflag);
+typedef void (*uma_free)(void *item, vm_size_t size, uint8_t pflag);
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index d0df901..ee0b207 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -229,10 +229,10 @@ enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
/* Prototypes.. */
-static void *noobj_alloc(uma_zone_t, int, uint8_t *, int);
-static void *page_alloc(uma_zone_t, int, uint8_t *, int);
-static void *startup_alloc(uma_zone_t, int, uint8_t *, int);
-static void page_free(void *, int, uint8_t);
+static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
+static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
+static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
+static void page_free(void *, vm_size_t, uint8_t);
static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
static void cache_drain(uma_zone_t);
static void bucket_drain(uma_zone_t, uma_bucket_t);
@@ -1038,7 +1038,7 @@ out:
* the VM is ready.
*/
static void *
-startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
+startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
{
uma_keg_t keg;
uma_slab_t tmps;
@@ -1098,7 +1098,7 @@ startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
* NULL if M_NOWAIT is set.
*/
static void *
-page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
+page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
{
void *p; /* Returned page */
@@ -1120,7 +1120,7 @@ page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
* NULL if M_NOWAIT is set.
*/
static void *
-noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
+noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
{
TAILQ_HEAD(, vm_page) alloctail;
u_long npages;
@@ -1183,7 +1183,7 @@ noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
* Nothing
*/
static void
-page_free(void *mem, int size, uint8_t flags)
+page_free(void *mem, vm_size_t size, uint8_t flags)
{
struct vmem *vmem;
@@ -3269,7 +3269,7 @@ uma_zone_exhausted_nolock(uma_zone_t zone)
}
void *
-uma_large_malloc(int size, int wait)
+uma_large_malloc(vm_size_t size, int wait)
{
void *mem;
uma_slab_t slab;
diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h
index 1ffc7d5..ad2a405 100644
--- a/sys/vm/uma_int.h
+++ b/sys/vm/uma_int.h
@@ -341,7 +341,7 @@ zone_first_keg(uma_zone_t zone)
#ifdef _KERNEL
/* Internal prototypes */
static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
-void *uma_large_malloc(int size, int wait);
+void *uma_large_malloc(vm_size_t size, int wait);
void uma_large_free(uma_slab_t slab);
/* Lock Macros */
@@ -424,8 +424,9 @@ vsetslab(vm_offset_t va, uma_slab_t slab)
* if they can provide more effecient allocation functions. This is useful
* for using direct mapped addresses.
*/
-void *uma_small_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait);
-void uma_small_free(void *mem, int size, uint8_t flags);
+void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,
+ int wait);
+void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
#endif /* _KERNEL */
#endif /* VM_UMA_INT_H */
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 93db8d1..46e3089 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -3989,12 +3989,10 @@ RetryLookup:;
vm_map_unlock_read(map);
return (KERN_PROTECTION_FAILURE);
}
- if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
- (entry->eflags & MAP_ENTRY_COW) &&
- (fault_type & VM_PROT_WRITE)) {
- vm_map_unlock_read(map);
- return (KERN_PROTECTION_FAILURE);
- }
+ KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
+ (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
+ (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
+ ("entry %p flags %x", entry, entry->eflags));
if ((fault_typea & VM_PROT_COPY) != 0 &&
(entry->max_protection & VM_PROT_WRITE) == 0 &&
(entry->eflags & MAP_ENTRY_COW) == 0) {
@@ -4148,10 +4146,6 @@ vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */
fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
if ((fault_type & prot) != fault_type)
return (KERN_PROTECTION_FAILURE);
- if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
- (entry->eflags & MAP_ENTRY_COW) &&
- (fault_type & VM_PROT_WRITE))
- return (KERN_PROTECTION_FAILURE);
/*
* If this page is not pageable, we have to get it for all possible
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index 998cd37..6a56fd7 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -93,6 +93,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sdt.h>
#include <sys/signalvar.h>
#include <sys/smp.h>
+#include <sys/time.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/rwlock.h>
@@ -170,7 +171,7 @@ static int vm_pageout_update_period;
static int defer_swap_pageouts;
static int disable_swap_pageouts;
static int lowmem_period = 10;
-static int lowmem_ticks;
+static time_t lowmem_uptime;
#if defined(NO_SWAPPING)
static int vm_swap_enabled = 0;
@@ -932,7 +933,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
* some. We rate limit to avoid thrashing.
*/
if (vmd == &vm_dom[0] && pass > 0 &&
- (ticks - lowmem_ticks) / hz >= lowmem_period) {
+ (time_uptime - lowmem_uptime) >= lowmem_period) {
/*
* Decrease registered cache sizes.
*/
@@ -943,7 +944,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass)
* drained above.
*/
uma_reclaim();
- lowmem_ticks = ticks;
+ lowmem_uptime = time_uptime;
}
/*
OpenPOWER on IntegriCloud