From abef15cd813ec731ce02f90e20feb9e29ed750e9 Mon Sep 17 00:00:00 2001 From: kib Date: Tue, 4 Aug 2015 12:33:51 +0000 Subject: Copy the fencing of the algorithm to do lock-less update and reading of the timehands, from the kern_tc.c implementation to vdso. Add comments giving hints where to look for the algorithm explanation. To compensate the removal of rmb() in userspace binuptime(), add explicit lfence instruction before rdtsc. On i386, add usual complications to detect SSE2 presence; assume that old CPUs which do not implement SSE2 also execute rdtsc almost in order. Reviewed by: alc, bde (previous version) Sponsored by: The FreeBSD Foundation MFC after: 2 weeks --- lib/libc/amd64/sys/__vdso_gettc.c | 16 +++++++++-- lib/libc/i386/sys/__vdso_gettc.c | 58 ++++++++++++++++++++++++++++++++++++-- lib/libc/sys/__vdso_gettimeofday.c | 25 ++++++++-------- 3 files changed, 83 insertions(+), 16 deletions(-) (limited to 'lib') diff --git a/lib/libc/amd64/sys/__vdso_gettc.c b/lib/libc/amd64/sys/__vdso_gettc.c index c6f2dfb..1899b21 100644 --- a/lib/libc/amd64/sys/__vdso_gettc.c +++ b/lib/libc/amd64/sys/__vdso_gettc.c @@ -36,19 +36,29 @@ __FBSDID("$FreeBSD$"); static u_int __vdso_gettc_low(const struct vdso_timehands *th) { - uint32_t rv; + u_int rv; - __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" + __asm __volatile("lfence; rdtsc; shrd %%cl, %%edx, %0" : "=a" (rv) : "c" (th->th_x86_shift) : "edx"); return (rv); } +static u_int +__vdso_rdtsc32(void) +{ + u_int rv; + + __asm __volatile("lfence;rdtsc" : "=a" (rv) : : "edx"); + return (rv); +} + #pragma weak __vdso_gettc u_int __vdso_gettc(const struct vdso_timehands *th) { - return (th->th_x86_shift > 0 ? __vdso_gettc_low(th) : rdtsc32()); + return (th->th_x86_shift > 0 ? __vdso_gettc_low(th) : + __vdso_rdtsc32()); } #pragma weak __vdso_gettimekeep diff --git a/lib/libc/i386/sys/__vdso_gettc.c b/lib/libc/i386/sys/__vdso_gettc.c index c6f2dfb..1454f16 100644 --- a/lib/libc/i386/sys/__vdso_gettc.c +++ b/lib/libc/i386/sys/__vdso_gettc.c @@ -31,24 +31,78 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include "libc_private.h" +static int lfence_works = -1; + +static int +get_lfence_usage(void) +{ + u_int cpuid_supported, p[4]; + + if (lfence_works == -1) { + __asm __volatile( + " pushfl\n" + " popl %%eax\n" + " movl %%eax,%%ecx\n" + " xorl $0x200000,%%eax\n" + " pushl %%eax\n" + " popfl\n" + " pushfl\n" + " popl %%eax\n" + " xorl %%eax,%%ecx\n" + " je 1f\n" + " movl $1,%0\n" + " jmp 2f\n" + "1: movl $0,%0\n" + "2:\n" + : "=r" (cpuid_supported) : : "eax", "ecx"); + if (cpuid_supported) { + __asm __volatile( + " pushl %%ebx\n" + " cpuid\n" + " movl %%ebx,%1\n" + " popl %%ebx\n" + : "=a" (p[0]), "=r" (p[1]), "=c" (p[2]), "=d" (p[3]) + : "0" (0x1)); + lfence_works = (p[3] & CPUID_SSE2) != 0; + } else + lfence_works = 0; + } + return (lfence_works); +} + static u_int __vdso_gettc_low(const struct vdso_timehands *th) { - uint32_t rv; + u_int rv; + if (get_lfence_usage() == 1) + lfence(); __asm __volatile("rdtsc; shrd %%cl, %%edx, %0" : "=a" (rv) : "c" (th->th_x86_shift) : "edx"); return (rv); } +static u_int +__vdso_rdtsc32(void) +{ + u_int rv; + + if (get_lfence_usage() == 1) + lfence(); + rv = rdtsc32(); + return (rv); +} + #pragma weak __vdso_gettc u_int __vdso_gettc(const struct vdso_timehands *th) { - return (th->th_x86_shift > 0 ? __vdso_gettc_low(th) : rdtsc32()); + return (th->th_x86_shift > 0 ? __vdso_gettc_low(th) : + __vdso_rdtsc32()); } #pragma weak __vdso_gettimekeep diff --git a/lib/libc/sys/__vdso_gettimeofday.c b/lib/libc/sys/__vdso_gettimeofday.c index a305173..b3527fa 100644 --- a/lib/libc/sys/__vdso_gettimeofday.c +++ b/lib/libc/sys/__vdso_gettimeofday.c @@ -42,6 +42,15 @@ tc_delta(const struct vdso_timehands *th) th->th_counter_mask); } +/* + * Calculate the absolute or boot-relative time from the + * machine-specific fast timecounter and the published timehands + * structure read from the shared page. + * + * The lockless reading scheme is similar to the one used to read the + * in-kernel timehands, see sys/kern/kern_tc.c:binuptime(). This code + * is based on the kernel implementation. + */ static int binuptime(struct bintime *bt, struct vdso_timekeep *tk, int abs) { @@ -52,27 +61,21 @@ binuptime(struct bintime *bt, struct vdso_timekeep *tk, int abs) if (!tk->tk_enabled) return (ENOSYS); - /* - * XXXKIB. The load of tk->tk_current should use - * atomic_load_acq_32 to provide load barrier. But - * since tk points to r/o mapped page, x86 - * implementation of atomic_load_acq faults. - */ - curr = tk->tk_current; - rmb(); + curr = atomic_load_acq_32(&tk->tk_current); th = &tk->tk_th[curr]; if (th->th_algo != VDSO_TH_ALGO_1) return (ENOSYS); - gen = th->th_gen; + gen = atomic_load_acq_32(&th->th_gen); *bt = th->th_offset; bintime_addx(bt, th->th_scale * tc_delta(th)); if (abs) bintime_add(bt, &th->th_boottime); /* - * Barrier for load of both tk->tk_current and th->th_gen. + * Ensure that the load of th_offset is completed + * before the load of th_gen. */ - rmb(); + atomic_thread_fence_acq(); } while (curr != tk->tk_current || gen == 0 || gen != th->th_gen); return (0); } -- cgit v1.1