summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormjg <mjg@FreeBSD.org>2017-03-14 20:43:04 +0000
committermjg <mjg@FreeBSD.org>2017-03-14 20:43:04 +0000
commit3edf8a9d08485cf11c17af11e7852d41f8edea23 (patch)
tree3a52f54c3221a41c2827b86fcf9b8cfa4136dae9
parent57f28a25af7dd32fc8adea80c5ebb2b237cc86a6 (diff)
downloadFreeBSD-src-3edf8a9d08485cf11c17af11e7852d41f8edea23.zip
FreeBSD-src-3edf8a9d08485cf11c17af11e7852d41f8edea23.tar.gz
MFC r312724,r312901,r312902:
hwpmc: partially depessimize munmap handling if the module is not loaded HWPMC_HOOKS is enabled in GENERIC and triggers some work avoidable in the common (module not loaded) case. In particular this avoids permission checks + lock downgrade singlethreaded and in cases were an executable mapping is found the pmc sx lock is no longer bounced. Note this is a band aid. == hwpmc: partially depessimize mmap handling if the module is not loaded In particular this means the pmc sx lock is no longer taken when an executable mapping succeeds. == hwpmc: annotate pmc_hook and pmc_intr as __read_mostly
-rw-r--r--sys/kern/kern_pmc.c4
-rw-r--r--sys/kern/vfs_vnops.c10
-rw-r--r--sys/sys/pmckern.h3
-rw-r--r--sys/vm/vm_mmap.c49
4 files changed, 39 insertions, 27 deletions
diff --git a/sys/kern/kern_pmc.c b/sys/kern/kern_pmc.c
index 09dc6ed..43943da 100644
--- a/sys/kern/kern_pmc.c
+++ b/sys/kern/kern_pmc.c
@@ -59,10 +59,10 @@ MALLOC_DEFINE(M_PMCHOOKS, "pmchooks", "Memory space for PMC hooks");
const int pmc_kernel_version = PMC_KERNEL_VERSION;
/* Hook variable. */
-int (*pmc_hook)(struct thread *td, int function, void *arg) = NULL;
+int __read_mostly (*pmc_hook)(struct thread *td, int function, void *arg) = NULL;
/* Interrupt handler */
-int (*pmc_intr)(int cpu, struct trapframe *tf) = NULL;
+int __read_mostly (*pmc_intr)(int cpu, struct trapframe *tf) = NULL;
/* Bitmask of CPUs requiring servicing at hardclock time */
volatile cpuset_t pmc_cpumask;
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index f9da22b..601f3f2 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -2479,10 +2479,12 @@ vn_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
}
#ifdef HWPMC_HOOKS
/* Inform hwpmc(4) if an executable is being mapped. */
- if (error == 0 && (prot & VM_PROT_EXECUTE) != 0) {
- pkm.pm_file = vp;
- pkm.pm_address = (uintptr_t) *addr;
- PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm);
+ if (PMC_HOOK_INSTALLED(PMC_FN_MMAP)) {
+ if ((prot & VM_PROT_EXECUTE) != 0 && error == 0) {
+ pkm.pm_file = vp;
+ pkm.pm_address = (uintptr_t) *addr;
+ PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm);
+ }
}
#endif
return (error);
diff --git a/sys/sys/pmckern.h b/sys/sys/pmckern.h
index 21b7ee7..dd197fd 100644
--- a/sys/sys/pmckern.h
+++ b/sys/sys/pmckern.h
@@ -174,6 +174,9 @@ extern const int pmc_kernel_version;
/* PMC soft per cpu trapframe */
extern struct trapframe pmc_tf[MAXCPU];
+/* Quick check if preparatory work is necessary */
+#define PMC_HOOK_INSTALLED(cmd) __predict_false(pmc_hook != NULL)
+
/* Hook invocation; for use within the kernel */
#define PMC_CALL_HOOK(t, cmd, arg) \
do { \
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index ada71f0..db0a3bd 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -500,6 +500,7 @@ kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
#ifdef HWPMC_HOOKS
struct pmckern_map_out pkm;
vm_map_entry_t entry;
+ bool pmc_handled;
#endif
vm_offset_t addr;
vm_size_t pageoff;
@@ -524,20 +525,24 @@ kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
return (EINVAL);
vm_map_lock(map);
#ifdef HWPMC_HOOKS
- /*
- * Inform hwpmc if the address range being unmapped contains
- * an executable region.
- */
- pkm.pm_address = (uintptr_t) NULL;
- if (vm_map_lookup_entry(map, addr, &entry)) {
- for (;
- entry != &map->header && entry->start < addr + size;
- entry = entry->next) {
- if (vm_map_check_protection(map, entry->start,
- entry->end, VM_PROT_EXECUTE) == TRUE) {
- pkm.pm_address = (uintptr_t) addr;
- pkm.pm_size = (size_t) size;
- break;
+ pmc_handled = false;
+ if (PMC_HOOK_INSTALLED(PMC_FN_MUNMAP)) {
+ pmc_handled = true;
+ /*
+ * Inform hwpmc if the address range being unmapped contains
+ * an executable region.
+ */
+ pkm.pm_address = (uintptr_t) NULL;
+ if (vm_map_lookup_entry(map, addr, &entry)) {
+ for (;
+ entry != &map->header && entry->start < addr + size;
+ entry = entry->next) {
+ if (vm_map_check_protection(map, entry->start,
+ entry->end, VM_PROT_EXECUTE) == TRUE) {
+ pkm.pm_address = (uintptr_t) addr;
+ pkm.pm_size = (size_t) size;
+ break;
+ }
}
}
}
@@ -545,14 +550,16 @@ kern_munmap(struct thread *td, uintptr_t addr0, size_t size)
vm_map_delete(map, addr, addr + size);
#ifdef HWPMC_HOOKS
- /* downgrade the lock to prevent a LOR with the pmc-sx lock */
- vm_map_lock_downgrade(map);
- if (pkm.pm_address != (uintptr_t) NULL)
- PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
- vm_map_unlock_read(map);
-#else
- vm_map_unlock(map);
+ if (__predict_false(pmc_handled)) {
+ /* downgrade the lock to prevent a LOR with the pmc-sx lock */
+ vm_map_lock_downgrade(map);
+ if (pkm.pm_address != (uintptr_t) NULL)
+ PMC_CALL_HOOK(td, PMC_FN_MUNMAP, (void *) &pkm);
+ vm_map_unlock_read(map);
+ } else
#endif
+ vm_map_unlock(map);
+
/* vm_map_delete returns nothing but KERN_SUCCESS anyway */
return (0);
}
OpenPOWER on IntegriCloud