summaryrefslogtreecommitdiffstats
path: root/sys/rpc
diff options
context:
space:
mode:
authorrmacklem <rmacklem@FreeBSD.org>2011-10-07 01:15:04 +0000
committerrmacklem <rmacklem@FreeBSD.org>2011-10-07 01:15:04 +0000
commit7adae26f0ff7104dea297cfdfb4ef600e60cc78b (patch)
tree9289dfe134ccdc9a4c169f62bc276de0f353a8d8 /sys/rpc
parentf3e29d548396db158b981d33b8e7affeac709ae3 (diff)
downloadFreeBSD-src-7adae26f0ff7104dea297cfdfb4ef600e60cc78b.zip
FreeBSD-src-7adae26f0ff7104dea297cfdfb4ef600e60cc78b.tar.gz
A crash reported on freebsd-fs@ on Sep. 23, 2011 under the subject
heading "kernel panics with RPCSEC_GSS" appears to be caused by a corrupted tailq list for the client structure. Looking at the code, calls to the function svc_rpc_gss_forget_client() were done in an SMP unsafe manner, with the svc_rpc_gss_lock only being acquired in the function and not before it. As such, when multiple threads called svc_rpc_gss_forget_client() concurrently, it could try and remove the same client structure from the tailq lists multiple times. The patch fixes this by moving the critical code into a separate function called svc_rpc_gss_forget_client_locked(), which must be called with the lock held. For the one case where the caller would have no interest in the lock, svc_rpc_gss_forget_client() was retained, but a loop was added to check that the client structure is still in the tailq lists before removing it, to make it safe for multiple concurrent calls. Tested by: clinton.adams at gmail.com (earlier version) Reviewed by: zkirsch MFC after: 3 days
Diffstat (limited to 'sys/rpc')
-rw-r--r--sys/rpc/rpcsec_gss/svc_rpcsec_gss.c58
1 files changed, 48 insertions, 10 deletions
diff --git a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c
index 7433b79d..25e2c46 100644
--- a/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c
+++ b/sys/rpc/rpcsec_gss/svc_rpcsec_gss.c
@@ -609,27 +609,52 @@ svc_rpc_gss_release_client(struct svc_rpc_gss_client *client)
}
/*
- * Remove a client from our global lists and free it if we can.
+ * Remove a client from our global lists.
+ * Must be called with svc_rpc_gss_lock held.
*/
static void
-svc_rpc_gss_forget_client(struct svc_rpc_gss_client *client)
+svc_rpc_gss_forget_client_locked(struct svc_rpc_gss_client *client)
{
struct svc_rpc_gss_client_list *list;
+ sx_assert(&svc_rpc_gss_lock, SX_XLOCKED);
list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % CLIENT_HASH_SIZE];
- sx_xlock(&svc_rpc_gss_lock);
TAILQ_REMOVE(list, client, cl_link);
TAILQ_REMOVE(&svc_rpc_gss_clients, client, cl_alllink);
svc_rpc_gss_client_count--;
+}
+
+/*
+ * Remove a client from our global lists and free it if we can.
+ */
+static void
+svc_rpc_gss_forget_client(struct svc_rpc_gss_client *client)
+{
+ struct svc_rpc_gss_client_list *list;
+ struct svc_rpc_gss_client *tclient;
+
+ list = &svc_rpc_gss_client_hash[client->cl_id.ci_id % CLIENT_HASH_SIZE];
+ sx_xlock(&svc_rpc_gss_lock);
+ TAILQ_FOREACH(tclient, list, cl_link) {
+ /*
+ * Make sure this client has not already been removed
+ * from the lists by svc_rpc_gss_forget_client() or
+ * svc_rpc_gss_forget_client_locked() already.
+ */
+ if (client == tclient) {
+ svc_rpc_gss_forget_client_locked(client);
+ sx_xunlock(&svc_rpc_gss_lock);
+ svc_rpc_gss_release_client(client);
+ return;
+ }
+ }
sx_xunlock(&svc_rpc_gss_lock);
- svc_rpc_gss_release_client(client);
}
static void
svc_rpc_gss_timeout_clients(void)
{
struct svc_rpc_gss_client *client;
- struct svc_rpc_gss_client *nclient;
time_t now = time_uptime;
rpc_gss_log_debug("in svc_rpc_gss_timeout_clients()");
@@ -638,16 +663,29 @@ svc_rpc_gss_timeout_clients(void)
* First enforce the max client limit. We keep
* svc_rpc_gss_clients in LRU order.
*/
- while (svc_rpc_gss_client_count > CLIENT_MAX)
- svc_rpc_gss_forget_client(TAILQ_LAST(&svc_rpc_gss_clients,
- svc_rpc_gss_client_list));
- TAILQ_FOREACH_SAFE(client, &svc_rpc_gss_clients, cl_alllink, nclient) {
+ sx_xlock(&svc_rpc_gss_lock);
+ client = TAILQ_LAST(&svc_rpc_gss_clients, svc_rpc_gss_client_list);
+ while (svc_rpc_gss_client_count > CLIENT_MAX && client != NULL) {
+ svc_rpc_gss_forget_client_locked(client);
+ sx_xunlock(&svc_rpc_gss_lock);
+ svc_rpc_gss_release_client(client);
+ sx_xlock(&svc_rpc_gss_lock);
+ client = TAILQ_LAST(&svc_rpc_gss_clients,
+ svc_rpc_gss_client_list);
+ }
+again:
+ TAILQ_FOREACH(client, &svc_rpc_gss_clients, cl_alllink) {
if (client->cl_state == CLIENT_STALE
|| now > client->cl_expiration) {
+ svc_rpc_gss_forget_client_locked(client);
+ sx_xunlock(&svc_rpc_gss_lock);
rpc_gss_log_debug("expiring client %p", client);
- svc_rpc_gss_forget_client(client);
+ svc_rpc_gss_release_client(client);
+ sx_xlock(&svc_rpc_gss_lock);
+ goto again;
}
}
+ sx_xunlock(&svc_rpc_gss_lock);
}
#ifdef DEBUG
OpenPOWER on IntegriCloud