summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/subr_sbuf.c129
-rw-r--r--sys/kern/subr_smp.c71
2 files changed, 128 insertions, 72 deletions
diff --git a/sys/kern/subr_sbuf.c b/sys/kern/subr_sbuf.c
index 793e17e..e931e65 100644
--- a/sys/kern/subr_sbuf.c
+++ b/sys/kern/subr_sbuf.c
@@ -67,7 +67,7 @@ static MALLOC_DEFINE(M_SBUF, "sbuf", "string buffers");
#define SBUF_ISDYNSTRUCT(s) ((s)->s_flags & SBUF_DYNSTRUCT)
#define SBUF_ISFINISHED(s) ((s)->s_flags & SBUF_FINISHED)
#define SBUF_HASROOM(s) ((s)->s_len < (s)->s_size - 1)
-#define SBUF_FREESPACE(s) ((s)->s_size - (s)->s_len - 1)
+#define SBUF_FREESPACE(s) ((s)->s_size - ((s)->s_len + 1))
#define SBUF_CANEXTEND(s) ((s)->s_flags & SBUF_AUTOEXTEND)
/*
@@ -77,8 +77,14 @@ static MALLOC_DEFINE(M_SBUF, "sbuf", "string buffers");
#define SBUF_CLEARFLAG(s, f) do { (s)->s_flags &= ~(f); } while (0)
#define SBUF_MINEXTENDSIZE 16 /* Should be power of 2. */
+
+#ifdef PAGE_SIZE
#define SBUF_MAXEXTENDSIZE PAGE_SIZE
#define SBUF_MAXEXTENDINCR PAGE_SIZE
+#else
+#define SBUF_MAXEXTENDSIZE 4096
+#define SBUF_MAXEXTENDINCR 4096
+#endif
/*
* Debugging support
@@ -138,7 +144,6 @@ sbuf_extendsize(int size)
return (newsize);
}
-
/*
* Extend an sbuf.
*/
@@ -154,7 +159,7 @@ sbuf_extend(struct sbuf *s, int addlen)
newbuf = SBMALLOC(newsize);
if (newbuf == NULL)
return (-1);
- bcopy(s->s_buf, newbuf, s->s_size);
+ memcpy(newbuf, s->s_buf, s->s_size);
if (SBUF_ISDYNAMIC(s))
SBFREE(s->s_buf);
else
@@ -165,6 +170,38 @@ sbuf_extend(struct sbuf *s, int addlen)
}
/*
+ * Initialize the internals of an sbuf.
+ * If buf is non-NULL, it points to a static or already-allocated string
+ * big enough to hold at least length characters.
+ */
+static struct sbuf *
+sbuf_newbuf(struct sbuf *s, char *buf, int length, int flags)
+{
+
+ memset(s, 0, sizeof(*s));
+ s->s_flags = flags;
+ s->s_size = length;
+ s->s_buf = buf;
+
+ if ((s->s_flags & SBUF_AUTOEXTEND) == 0) {
+ KASSERT(s->s_size > 1,
+ ("attempt to create a too small sbuf"));
+ }
+
+ if (s->s_buf != NULL)
+ return (s);
+
+ if ((flags & SBUF_AUTOEXTEND) != 0)
+ s->s_size = sbuf_extendsize(s->s_size);
+
+ s->s_buf = SBMALLOC(s->s_size);
+ if (s->s_buf == NULL)
+ return (NULL);
+ SBUF_SETFLAG(s, SBUF_DYNAMIC);
+ return (s);
+}
+
+/*
* Initialize an sbuf.
* If buf is non-NULL, it points to a static or already-allocated string
* big enough to hold at least length characters.
@@ -179,31 +216,17 @@ sbuf_new(struct sbuf *s, char *buf, int length, int flags)
("%s called with invalid flags", __func__));
flags &= SBUF_USRFLAGMSK;
- if (s == NULL) {
- s = SBMALLOC(sizeof(*s));
- if (s == NULL)
- return (NULL);
- bzero(s, sizeof(*s));
- s->s_flags = flags;
- SBUF_SETFLAG(s, SBUF_DYNSTRUCT);
- } else {
- bzero(s, sizeof(*s));
- s->s_flags = flags;
- }
- s->s_size = length;
- if (buf != NULL) {
- s->s_buf = buf;
- return (s);
- }
- if ((flags & SBUF_AUTOEXTEND) != 0)
- s->s_size = sbuf_extendsize(s->s_size);
- s->s_buf = SBMALLOC(s->s_size);
- if (s->s_buf == NULL) {
- if (SBUF_ISDYNSTRUCT(s))
- SBFREE(s);
+ if (s != NULL)
+ return (sbuf_newbuf(s, buf, length, flags));
+
+ s = SBMALLOC(sizeof(*s));
+ if (s == NULL)
+ return (NULL);
+ if (sbuf_newbuf(s, buf, length, flags) == NULL) {
+ SBFREE(s);
return (NULL);
}
- SBUF_SETFLAG(s, SBUF_DYNAMIC);
+ SBUF_SETFLAG(s, SBUF_DYNSTRUCT);
return (s);
}
@@ -328,7 +351,7 @@ sbuf_drain(struct sbuf *s)
* buffer and marking overflow.
*/
static void
-sbuf_put_byte(int c, struct sbuf *s)
+sbuf_put_byte(struct sbuf *s, int c)
{
assert_sbuf_integrity(s);
@@ -337,7 +360,7 @@ sbuf_put_byte(int c, struct sbuf *s)
if (s->s_error != 0)
return;
if (SBUF_FREESPACE(s) <= 0) {
- /*
+ /*
* If there is a drain, use it, otherwise extend the
* buffer.
*/
@@ -352,18 +375,6 @@ sbuf_put_byte(int c, struct sbuf *s)
}
/*
- * Append a non-NUL character to an sbuf. This prototype signature is
- * suitable for use with kvprintf(9).
- */
-static void
-sbuf_putc_func(int c, void *arg)
-{
-
- if (c != '\0')
- sbuf_put_byte(c, arg);
-}
-
-/*
* Append a byte string to an sbuf.
*/
int
@@ -378,10 +389,10 @@ sbuf_bcat(struct sbuf *s, const void *buf, size_t len)
if (s->s_error != 0)
return (-1);
for (; str < end; str++) {
- sbuf_put_byte(*str, s);
+ sbuf_put_byte(s, *str);
if (s->s_error != 0)
return (-1);
- }
+ }
return (0);
}
@@ -443,7 +454,7 @@ sbuf_cat(struct sbuf *s, const char *str)
return (-1);
while (*str != '\0') {
- sbuf_put_byte(*str++, s);
+ sbuf_put_byte(s, *str++);
if (s->s_error != 0)
return (-1);
}
@@ -507,6 +518,19 @@ sbuf_cpy(struct sbuf *s, const char *str)
* Format the given argument list and append the resulting string to an sbuf.
*/
#ifdef _KERNEL
+
+/*
+ * Append a non-NUL character to an sbuf. This prototype signature is
+ * suitable for use with kvprintf(9).
+ */
+static void
+sbuf_putc_func(int c, void *arg)
+{
+
+ if (c != '\0')
+ sbuf_put_byte(arg, c);
+}
+
int
sbuf_vprintf(struct sbuf *s, const char *fmt, va_list ap)
{
@@ -611,7 +635,7 @@ int
sbuf_putc(struct sbuf *s, int c)
{
- sbuf_putc_func(c, s);
+ sbuf_put_byte(s, c);
if (s->s_error != 0)
return (-1);
return (0);
@@ -654,24 +678,23 @@ sbuf_error(const struct sbuf *s)
int
sbuf_finish(struct sbuf *s)
{
- int error;
assert_sbuf_integrity(s);
assert_sbuf_state(s, 0);
- error = s->s_error;
if (s->s_drain_func != NULL) {
- while (s->s_len > 0 && error == 0)
- error = sbuf_drain(s);
+ while (s->s_len > 0 && s->s_error == 0)
+ s->s_error = sbuf_drain(s);
}
s->s_buf[s->s_len] = '\0';
- s->s_error = 0;
SBUF_SETFLAG(s, SBUF_FINISHED);
#ifdef _KERNEL
- return (error);
+ return (s->s_error);
#else
- errno = error;
- return (-1);
+ errno = s->s_error;
+ if (s->s_error)
+ return (-1);
+ return (0);
#endif
}
@@ -721,7 +744,7 @@ sbuf_delete(struct sbuf *s)
if (SBUF_ISDYNAMIC(s))
SBFREE(s->s_buf);
isdyn = SBUF_ISDYNSTRUCT(s);
- bzero(s, sizeof(*s));
+ memset(s, 0, sizeof(*s));
if (isdyn)
SBFREE(s);
}
diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c
index ae061f3..83655e8 100644
--- a/sys/kern/subr_smp.c
+++ b/sys/kern/subr_smp.c
@@ -53,10 +53,10 @@ __FBSDID("$FreeBSD$");
#include "opt_sched.h"
#ifdef SMP
-volatile cpuset_t stopped_cpus;
-volatile cpuset_t started_cpus;
-cpuset_t hlt_cpus_mask;
-cpuset_t logical_cpus_mask;
+volatile cpumask_t stopped_cpus;
+volatile cpumask_t started_cpus;
+cpumask_t hlt_cpus_mask;
+cpumask_t logical_cpus_mask;
void (*cpustop_restartfunc)(void);
#endif
@@ -110,6 +110,7 @@ static void (*volatile smp_rv_action_func)(void *arg);
static void (*volatile smp_rv_teardown_func)(void *arg);
static void *volatile smp_rv_func_arg;
static volatile int smp_rv_waiters[3];
+static volatile int smp_rv_generation;
/*
* Shared mutex to restrict busywaits between smp_rendezvous() and
@@ -317,39 +318,63 @@ restart_cpus(cpuset_t map)
void
smp_rendezvous_action(void)
{
- void* local_func_arg = smp_rv_func_arg;
- void (*local_setup_func)(void*) = smp_rv_setup_func;
- void (*local_action_func)(void*) = smp_rv_action_func;
- void (*local_teardown_func)(void*) = smp_rv_teardown_func;
+ void *local_func_arg;
+ void (*local_setup_func)(void*);
+ void (*local_action_func)(void*);
+ void (*local_teardown_func)(void*);
+ int generation;
/* Ensure we have up-to-date values. */
atomic_add_acq_int(&smp_rv_waiters[0], 1);
while (smp_rv_waiters[0] < smp_rv_ncpus)
cpu_spinwait();
- /* setup function */
+ /* Fetch rendezvous parameters after acquire barrier. */
+ local_func_arg = smp_rv_func_arg;
+ local_setup_func = smp_rv_setup_func;
+ local_action_func = smp_rv_action_func;
+ local_teardown_func = smp_rv_teardown_func;
+ generation = smp_rv_generation;
+
+ /*
+ * If requested, run a setup function before the main action
+ * function. Ensure all CPUs have completed the setup
+ * function before moving on to the action function.
+ */
if (local_setup_func != smp_no_rendevous_barrier) {
if (smp_rv_setup_func != NULL)
smp_rv_setup_func(smp_rv_func_arg);
-
- /* spin on entry rendezvous */
atomic_add_int(&smp_rv_waiters[1], 1);
while (smp_rv_waiters[1] < smp_rv_ncpus)
cpu_spinwait();
}
- /* action function */
if (local_action_func != NULL)
local_action_func(local_func_arg);
- /* spin on exit rendezvous */
+ /*
+ * Signal that the main action has been completed. If a
+ * full exit rendezvous is requested, then all CPUs will
+ * wait here until all CPUs have finished the main action.
+ *
+ * Note that the write by the last CPU to finish the action
+ * may become visible to different CPUs at different times.
+ * As a result, the CPU that initiated the rendezvous may
+ * exit the rendezvous and drop the lock allowing another
+ * rendezvous to be initiated on the same CPU or a different
+ * CPU. In that case the exit sentinel may be cleared before
+ * all CPUs have noticed causing those CPUs to hang forever.
+ * Workaround this by using a generation count to notice when
+ * this race occurs and to exit the rendezvous in that case.
+ */
+ MPASS(generation == smp_rv_generation);
atomic_add_int(&smp_rv_waiters[2], 1);
if (local_teardown_func == smp_no_rendevous_barrier)
return;
- while (smp_rv_waiters[2] < smp_rv_ncpus)
+ while (smp_rv_waiters[2] < smp_rv_ncpus &&
+ generation == smp_rv_generation)
cpu_spinwait();
- /* teardown function */
if (local_teardown_func != NULL)
local_teardown_func(local_func_arg);
}
@@ -380,10 +405,11 @@ smp_rendezvous_cpus(cpuset_t map,
if (ncpus == 0)
panic("ncpus is 0 with non-zero map");
- /* obtain rendezvous lock */
mtx_lock_spin(&smp_ipi_mtx);
- /* set static function pointers */
+ atomic_add_acq_int(&smp_rv_generation, 1);
+
+ /* Pass rendezvous parameters via global variables. */
smp_rv_ncpus = ncpus;
smp_rv_setup_func = setup_func;
smp_rv_action_func = action_func;
@@ -393,7 +419,10 @@ smp_rendezvous_cpus(cpuset_t map,
smp_rv_waiters[2] = 0;
atomic_store_rel_int(&smp_rv_waiters[0], 0);
- /* signal other processors, which will enter the IPI with interrupts off */
+ /*
+ * Signal other processors, which will enter the IPI with
+ * interrupts off.
+ */
curcpumap = CPU_ISSET(curcpu, &map);
CPU_CLR(curcpu, &map);
ipi_selected(map, IPI_RENDEZVOUS);
@@ -402,11 +431,15 @@ smp_rendezvous_cpus(cpuset_t map,
if (curcpumap != 0)
smp_rendezvous_action();
+ /*
+ * If the caller did not request an exit barrier to be enforced
+ * on each CPU, ensure that this CPU waits for all the other
+ * CPUs to finish the rendezvous.
+ */
if (teardown_func == smp_no_rendevous_barrier)
while (atomic_load_acq_int(&smp_rv_waiters[2]) < ncpus)
cpu_spinwait();
- /* release lock */
mtx_unlock_spin(&smp_ipi_mtx);
}
OpenPOWER on IntegriCloud