summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorMiklos Szeredi <mszeredi@suse.cz>2015-07-01 16:25:59 +0200
committerMiklos Szeredi <mszeredi@suse.cz>2015-07-01 16:25:59 +0200
commitb716d425385ed392adc8e619020c1d77ae5ec1cb (patch)
treeeb0f4bf370c0a86be85494868bf6f5d5e75fa05b /fs
parentdc00809a53edd15369906b90407a2d5b976289f5 (diff)
downloadop-kernel-dev-b716d425385ed392adc8e619020c1d77ae5ec1cb.zip
op-kernel-dev-b716d425385ed392adc8e619020c1d77ae5ec1cb.tar.gz
fuse: fold helpers into abort
Fold end_io_requests() and end_queued_requests() into fuse_abort_conn(). Signed-off-by: Miklos Szeredi <mszeredi@suse.cz> Reviewed-by: Ashish Samant <ashish.samant@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/fuse/dev.c93
1 files changed, 38 insertions, 55 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 92c7691..fc3268b 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2075,51 +2075,6 @@ __acquires(fc->lock)
}
}
-/*
- * Abort requests under I/O
- *
- * Separate out unlocked requests, they should be finished off immediately.
- * Locked requests will be finished after unlock; see unlock_request().
- *
- * Next finish off the unlocked requests. It is possible that some request will
- * finish before we can. This is OK, the request will in that case be removed
- * from the list before we touch it.
- */
-static void end_io_requests(struct fuse_conn *fc)
-__releases(fc->lock)
-__acquires(fc->lock)
-{
- struct fuse_req *req, *next;
- LIST_HEAD(to_end);
-
- list_for_each_entry_safe(req, next, &fc->io, list) {
- req->out.h.error = -ECONNABORTED;
- spin_lock(&req->waitq.lock);
- set_bit(FR_ABORTED, &req->flags);
- if (!test_bit(FR_LOCKED, &req->flags))
- list_move(&req->list, &to_end);
- spin_unlock(&req->waitq.lock);
- }
- while (!list_empty(&to_end)) {
- req = list_first_entry(&to_end, struct fuse_req, list);
- __fuse_get_request(req);
- request_end(fc, req);
- spin_lock(&fc->lock);
- }
-}
-
-static void end_queued_requests(struct fuse_conn *fc)
-__releases(fc->lock)
-__acquires(fc->lock)
-{
- fc->max_background = UINT_MAX;
- flush_bg_queue(fc);
- end_requests(fc, &fc->pending);
- end_requests(fc, &fc->processing);
- while (forget_pending(fc))
- kfree(dequeue_forget(fc, 1, NULL));
-}
-
static void end_polls(struct fuse_conn *fc)
{
struct rb_node *p;
@@ -2138,26 +2093,54 @@ static void end_polls(struct fuse_conn *fc)
/*
* Abort all requests.
*
- * Emergency exit in case of a malicious or accidental deadlock, or
- * just a hung filesystem.
+ * Emergency exit in case of a malicious or accidental deadlock, or just a hung
+ * filesystem.
+ *
+ * The same effect is usually achievable through killing the filesystem daemon
+ * and all users of the filesystem. The exception is the combination of an
+ * asynchronous request and the tricky deadlock (see
+ * Documentation/filesystems/fuse.txt).
*
- * The same effect is usually achievable through killing the
- * filesystem daemon and all users of the filesystem. The exception
- * is the combination of an asynchronous request and the tricky
- * deadlock (see Documentation/filesystems/fuse.txt).
+ * Request progression from one list to the next is prevented by fc->connected
+ * being false.
*
- * Request progression from one list to the next is prevented by
- * fc->connected being false.
+ * Aborting requests under I/O goes as follows: 1: Separate out unlocked
+ * requests, they should be finished off immediately. Locked requests will be
+ * finished after unlock; see unlock_request(). 2: Finish off the unlocked
+ * requests. It is possible that some request will finish before we can. This
+ * is OK, the request will in that case be removed from the list before we touch
+ * it.
*/
void fuse_abort_conn(struct fuse_conn *fc)
{
spin_lock(&fc->lock);
if (fc->connected) {
+ struct fuse_req *req, *next;
+ LIST_HEAD(to_end);
+
fc->connected = 0;
fc->blocked = 0;
fuse_set_initialized(fc);
- end_io_requests(fc);
- end_queued_requests(fc);
+ list_for_each_entry_safe(req, next, &fc->io, list) {
+ req->out.h.error = -ECONNABORTED;
+ spin_lock(&req->waitq.lock);
+ set_bit(FR_ABORTED, &req->flags);
+ if (!test_bit(FR_LOCKED, &req->flags))
+ list_move(&req->list, &to_end);
+ spin_unlock(&req->waitq.lock);
+ }
+ while (!list_empty(&to_end)) {
+ req = list_first_entry(&to_end, struct fuse_req, list);
+ __fuse_get_request(req);
+ request_end(fc, req);
+ spin_lock(&fc->lock);
+ }
+ fc->max_background = UINT_MAX;
+ flush_bg_queue(fc);
+ end_requests(fc, &fc->pending);
+ end_requests(fc, &fc->processing);
+ while (forget_pending(fc))
+ kfree(dequeue_forget(fc, 1, NULL));
end_polls(fc);
wake_up_all(&fc->waitq);
wake_up_all(&fc->blocked_waitq);
OpenPOWER on IntegriCloud