summaryrefslogtreecommitdiffstats
path: root/sys/vm/vm_phys.h
diff options
context:
space:
mode:
authorkib <kib@FreeBSD.org>2013-08-07 16:36:38 +0000
committerkib <kib@FreeBSD.org>2013-08-07 16:36:38 +0000
commit8de1718b6098ce10c7adafd754331072122d1b3a (patch)
tree958d5d7311cef3751ddc3af3de19306b77bec18d /sys/vm/vm_phys.h
parenta3142db9ac39863c9280f1ec60c165e521b66fd3 (diff)
downloadFreeBSD-src-8de1718b6098ce10c7adafd754331072122d1b3a.zip
FreeBSD-src-8de1718b6098ce10c7adafd754331072122d1b3a.tar.gz
Split the pagequeues per NUMA domains, and split pageademon process
into threads each processing queue in a single domain. The structure of the pagedaemons and queues is kept intact, most of the changes come from the need for code to find an owning page queue for given page, calculated from the segment containing the page. The tie between NUMA domain and pagedaemon thread/pagequeue split is rather arbitrary, the multithreaded daemon could be allowed for the single-domain machines, or one domain might be split into several page domains, to further increase concurrency. Right now, each pagedaemon thread tries to reach the global target, precalculated at the start of the pass. This is not optimal, since it could cause excessive page deactivation and freeing. The code should be changed to re-check the global page deficit state in the loop after some number of iterations. The pagedaemons reach the quorum before starting the OOM, since one thread inability to meet the target is normal for split queues. Only when all pagedaemons fail to produce enough reusable pages, OOM is started by single selected thread. Launder is modified to take into account the segments layout with regard to the region for which cleaning is performed. Based on the preliminary patch by jeff, sponsored by EMC / Isilon Storage Division. Reviewed by: alc Tested by: pho Sponsored by: The FreeBSD Foundation
Diffstat (limited to 'sys/vm/vm_phys.h')
-rw-r--r--sys/vm/vm_phys.h47
1 files changed, 47 insertions, 0 deletions
diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h
index 9812816..f39943c 100644
--- a/sys/vm/vm_phys.h
+++ b/sys/vm/vm_phys.h
@@ -47,8 +47,23 @@ struct mem_affinity {
int domain;
};
+struct vm_freelist {
+ struct pglist pl;
+ int lcnt;
+};
+
+struct vm_phys_seg {
+ vm_paddr_t start;
+ vm_paddr_t end;
+ vm_page_t first_page;
+ int domain;
+ struct vm_freelist (*free_queues)[VM_NFREEPOOL][VM_NFREEORDER];
+};
+
extern struct mem_affinity *mem_affinity;
extern int vm_ndomains;
+extern struct vm_phys_seg vm_phys_segs[];
+extern int vm_phys_nsegs;
/*
* The following functions are only to be used by the virtual memory system.
@@ -58,6 +73,7 @@ vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
u_long alignment, vm_paddr_t boundary);
vm_page_t vm_phys_alloc_freelist_pages(int flind, int pool, int order);
vm_page_t vm_phys_alloc_pages(int pool, int order);
+boolean_t vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high);
int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
vm_memattr_t memattr);
void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end);
@@ -70,5 +86,36 @@ void vm_phys_set_pool(int pool, vm_page_t m, int order);
boolean_t vm_phys_unfree_page(vm_page_t m);
boolean_t vm_phys_zero_pages_idle(void);
+/*
+ * vm_phys_domain:
+ *
+ * Return the memory domain the page belongs to.
+ */
+static inline struct vm_domain *
+vm_phys_domain(vm_page_t m)
+{
+#if MAXMEMDOM > 1
+ int domn, segind;
+
+ /* XXXKIB try to assert that the page is managed */
+ segind = m->segind;
+ KASSERT(segind < vm_phys_nsegs, ("segind %d m %p", segind, m));
+ domn = vm_phys_segs[segind].domain;
+ KASSERT(domn < vm_ndomains, ("domain %d m %p", domn, m));
+ return (&vm_dom[domn]);
+#else
+ return (&vm_dom[0]);
+#endif
+}
+
+static inline void
+vm_phys_freecnt_adj(vm_page_t m, int adj)
+{
+
+ mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
+ cnt.v_free_count += adj;
+ vm_phys_domain(m)->vmd_free_count += adj;
+}
+
#endif /* _KERNEL */
#endif /* !_VM_PHYS_H_ */
OpenPOWER on IntegriCloud