summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_sysctl.c
diff options
context:
space:
mode:
authored <ed@FreeBSD.org>2008-12-29 12:58:45 +0000
committered <ed@FreeBSD.org>2008-12-29 12:58:45 +0000
commitf3a9a195cb5b2d1f5e0a7779c33cce89b9539695 (patch)
tree1cc7c4d342853f5d46fa8f554e48601c75ec4157 /sys/kern/kern_sysctl.c
parentbd5d614be80b38952e55e5516853af28f99d108d (diff)
downloadFreeBSD-src-f3a9a195cb5b2d1f5e0a7779c33cce89b9539695.zip
FreeBSD-src-f3a9a195cb5b2d1f5e0a7779c33cce89b9539695.tar.gz
Push down Giant inside sysctl. Also add some more assertions to the code.
In the existing code we didn't really enforce that callers hold Giant before calling userland_sysctl(), even though there is no guarantee it is safe. Fix this by just placing Giant locks around the call to the oid handler. This also means we only pick up Giant for a very short period of time. Maybe we should add MPSAFE flags to sysctl or phase it out all together. I've also added SYSCTL_LOCK_ASSERT(). We have to make sure sysctl_root() and name2oid() are called with the sysctl lock held. Reviewed by: Jille Timmermans <jille quis cx>
Diffstat (limited to 'sys/kern/kern_sysctl.c')
-rw-r--r--sys/kern/kern_sysctl.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index a094d42..82d3a7c 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -71,6 +71,7 @@ static struct sx sysctllock;
#define SYSCTL_LOCK() sx_xlock(&sysctllock)
#define SYSCTL_UNLOCK() sx_xunlock(&sysctllock)
+#define SYSCTL_LOCK_ASSERT() sx_assert(&sysctllock, SX_XLOCKED)
#define SYSCTL_INIT() sx_init(&sysctllock, "sysctl lock")
static int sysctl_root(SYSCTL_HANDLER_ARGS);
@@ -686,6 +687,8 @@ name2oid (char *name, int *oid, int *len, struct sysctl_oid **oidpp)
struct sysctl_oid_list *lsp = &sysctl__children;
char *p;
+ SYSCTL_LOCK_ASSERT();
+
if (!*name)
return (ENOENT);
@@ -742,6 +745,8 @@ sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS)
int error, oid[CTL_MAXNAME], len;
struct sysctl_oid *op = 0;
+ SYSCTL_LOCK_ASSERT();
+
if (!req->newlen)
return (ENOENT);
if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */
@@ -1086,14 +1091,12 @@ kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
req.lock = REQ_LOCKED;
SYSCTL_LOCK();
-
error = sysctl_root(0, name, namelen, &req);
+ SYSCTL_UNLOCK();
if (req.lock == REQ_WIRED && req.validlen > 0)
vsunlock(req.oldptr, req.validlen);
- SYSCTL_UNLOCK();
-
if (error && error != ENOMEM)
return (error);
@@ -1118,6 +1121,11 @@ kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp,
oid[1] = 3; /* name2oid */
oidlen = sizeof(oid);
+ /*
+ * XXX: Prone to a possible race condition between lookup and
+ * execution? Maybe put locking around it?
+ */
+
error = kernel_sysctl(td, oid, 2, oid, &oidlen,
(void *)name, strlen(name), &plen, flags);
if (error)
@@ -1270,6 +1278,8 @@ sysctl_root(SYSCTL_HANDLER_ARGS)
struct sysctl_oid *oid;
int error, indx, lvl;
+ SYSCTL_LOCK_ASSERT();
+
error = sysctl_find_oid(arg1, arg2, &oid, &indx, req);
if (error)
return (error);
@@ -1324,7 +1334,11 @@ sysctl_root(SYSCTL_HANDLER_ARGS)
if (error != 0)
return (error);
#endif
+
+ /* XXX: Handlers are not guaranteed to be Giant safe! */
+ mtx_lock(&Giant);
error = oid->oid_handler(oid, arg1, arg2, req);
+ mtx_unlock(&Giant);
return (error);
}
@@ -1352,20 +1366,13 @@ __sysctl(struct thread *td, struct sysctl_args *uap)
if (error)
return (error);
- mtx_lock(&Giant);
-
error = userland_sysctl(td, name, uap->namelen,
uap->old, uap->oldlenp, 0,
uap->new, uap->newlen, &j, 0);
if (error && error != ENOMEM)
- goto done2;
- if (uap->oldlenp) {
- int i = copyout(&j, uap->oldlenp, sizeof(j));
- if (i)
- error = i;
- }
-done2:
- mtx_unlock(&Giant);
+ return (error);
+ if (uap->oldlenp)
+ error = copyout(&j, uap->oldlenp, sizeof(j));
return (error);
}
@@ -1426,12 +1433,12 @@ userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
uio_yield();
}
- if (req.lock == REQ_WIRED && req.validlen > 0)
- vsunlock(req.oldptr, req.validlen);
-
CURVNET_RESTORE();
SYSCTL_UNLOCK();
+ if (req.lock == REQ_WIRED && req.validlen > 0)
+ vsunlock(req.oldptr, req.validlen);
+
if (error && error != ENOMEM)
return (error);
@@ -1519,8 +1526,6 @@ ogetkerninfo(struct thread *td, struct getkerninfo_args *uap)
size_t size;
u_int needed = 0;
- mtx_lock(&Giant);
-
switch (uap->op & 0xff00) {
case KINFO_RT:
@@ -1653,7 +1658,6 @@ ogetkerninfo(struct thread *td, struct getkerninfo_args *uap)
error = copyout(&size, uap->size, sizeof(size));
}
}
- mtx_unlock(&Giant);
return (error);
}
#endif /* COMPAT_43 */
OpenPOWER on IntegriCloud