summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2008-11-06 00:37:40 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2008-11-06 13:51:50 -0800
commitf8d570a4745835f2238a33b537218a1bb03fc671 (patch)
tree776c2909523c684f0954949a2947ff0a792ba457 /net
parent75fa67706cce5272bcfc51ed646f2da21f3bdb6e (diff)
downloadop-kernel-dev-f8d570a4745835f2238a33b537218a1bb03fc671.zip
op-kernel-dev-f8d570a4745835f2238a33b537218a1bb03fc671.tar.gz
net: Fix recursive descent in __scm_destroy().
__scm_destroy() walks the list of file descriptors in the scm_fp_list pointed to by the scm_cookie argument. Those, in turn, can close sockets and invoke __scm_destroy() again. There is nothing which limits how deeply this can occur. The idea for how to fix this is from Linus. Basically, we do all of the fput()s at the top level by collecting all of the scm_fp_list objects hit by an fput(). Inside of the initial __scm_destroy() we keep running the list until it is empty. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'net')
-rw-r--r--net/core/scm.c24
1 files changed, 21 insertions, 3 deletions
diff --git a/net/core/scm.c b/net/core/scm.c
index 10f5c65..ab242cc 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -75,6 +75,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
if (!fpl)
return -ENOMEM;
*fplp = fpl;
+ INIT_LIST_HEAD(&fpl->list);
fpl->count = 0;
}
fpp = &fpl->fp[fpl->count];
@@ -106,9 +107,25 @@ void __scm_destroy(struct scm_cookie *scm)
if (fpl) {
scm->fp = NULL;
- for (i=fpl->count-1; i>=0; i--)
- fput(fpl->fp[i]);
- kfree(fpl);
+ if (current->scm_work_list) {
+ list_add_tail(&fpl->list, current->scm_work_list);
+ } else {
+ LIST_HEAD(work_list);
+
+ current->scm_work_list = &work_list;
+
+ list_add(&fpl->list, &work_list);
+ while (!list_empty(&work_list)) {
+ fpl = list_first_entry(&work_list, struct scm_fp_list, list);
+
+ list_del(&fpl->list);
+ for (i=fpl->count-1; i>=0; i--)
+ fput(fpl->fp[i]);
+ kfree(fpl);
+ }
+
+ current->scm_work_list = NULL;
+ }
}
}
@@ -284,6 +301,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL);
if (new_fpl) {
+ INIT_LIST_HEAD(&new_fpl->list);
for (i=fpl->count-1; i>=0; i--)
get_file(fpl->fp[i]);
memcpy(new_fpl, fpl, sizeof(*fpl));
OpenPOWER on IntegriCloud