summaryrefslogtreecommitdiffstats
path: root/fs/fuse/dev.c
diff options
context:
space:
mode:
authorMaxim Patlasov <mpatlasov@parallels.com>2013-03-21 18:02:28 +0400
committerMiklos Szeredi <mszeredi@suse.cz>2013-04-17 12:31:45 +0200
commit0aada88476a33690c9569b094191ce92a38e6541 (patch)
tree4aef3365654d4751154dc825731af721661fcef2 /fs/fuse/dev.c
parent796523fb24028639c007f71e02ca21730f7c0af6 (diff)
downloadop-kernel-dev-0aada88476a33690c9569b094191ce92a38e6541.zip
op-kernel-dev-0aada88476a33690c9569b094191ce92a38e6541.tar.gz
fuse: skip blocking on allocations of synchronous requests
A task may have at most one synchronous request allocated. So these requests need not be otherwise limited. The patch re-works fuse_get_req() to follow this idea. Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com> Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r--fs/fuse/dev.c30
1 files changed, 20 insertions, 10 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 39c2fe3..d692b85 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -130,21 +130,30 @@ static void fuse_req_init_context(struct fuse_req *req)
req->in.h.pid = current->pid;
}
+static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
+{
+ return !fc->initialized || (for_background && fc->blocked);
+}
+
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
bool for_background)
{
struct fuse_req *req;
- sigset_t oldset;
- int intr;
int err;
-
atomic_inc(&fc->num_waiting);
- block_sigs(&oldset);
- intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
- restore_sigs(&oldset);
- err = -EINTR;
- if (intr)
- goto out;
+
+ if (fuse_block_alloc(fc, for_background)) {
+ sigset_t oldset;
+ int intr;
+
+ block_sigs(&oldset);
+ intr = wait_event_interruptible(fc->blocked_waitq,
+ !fuse_block_alloc(fc, for_background));
+ restore_sigs(&oldset);
+ err = -EINTR;
+ if (intr)
+ goto out;
+ }
err = -ENOTCONN;
if (!fc->connected)
@@ -239,7 +248,7 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
struct fuse_req *req;
atomic_inc(&fc->num_waiting);
- wait_event(fc->blocked_waitq, !fc->blocked);
+ wait_event(fc->blocked_waitq, fc->initialized);
req = fuse_request_alloc(0);
if (!req)
req = get_reserved_req(fc, file);
@@ -2106,6 +2115,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
spin_lock(&fc->lock);
fc->connected = 0;
fc->blocked = 0;
+ fc->initialized = 1;
end_queued_requests(fc);
end_polls(fc);
wake_up_all(&fc->blocked_waitq);
OpenPOWER on IntegriCloud