diff options
author | Miklos Szeredi <mszeredi@suse.cz> | 2015-07-01 16:25:58 +0200 |
---|---|---|
committer | Miklos Szeredi <mszeredi@suse.cz> | 2015-07-01 16:25:58 +0200 |
commit | 825d6d3395e88a616e4c953984d77eeacbad4310 (patch) | |
tree | 1a8644c118994d7d7a743665cb657fd21a450443 /fs/fuse/dev.c | |
parent | 0d8e84b0432beb6d11a1c82deeb9dc1a7bee02c0 (diff) | |
download | op-kernel-dev-825d6d3395e88a616e4c953984d77eeacbad4310.zip op-kernel-dev-825d6d3395e88a616e4c953984d77eeacbad4310.tar.gz |
fuse: req use bitops
Finer grained locking will mean there's no single lock to protect
modification of bitfileds in fuse_req.
So move to using bitops. Can use the non-atomic variants for those which
happen while the request definitely has only one reference.
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Reviewed-by: Ashish Samant <ashish.samant@oracle.com>
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r-- | fs/fuse/dev.c | 71 |
1 files changed, 36 insertions, 35 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 3b979ab..dcfef547 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -181,8 +181,10 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, } fuse_req_init_context(req); - req->waiting = 1; - req->background = for_background; + __set_bit(FR_WAITING, &req->flags); + if (for_background) + __set_bit(FR_BACKGROUND, &req->flags); + return req; out: @@ -272,15 +274,15 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, req = get_reserved_req(fc, file); fuse_req_init_context(req); - req->waiting = 1; - req->background = 0; + __set_bit(FR_WAITING, &req->flags); + __clear_bit(FR_BACKGROUND, &req->flags); return req; } void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) { if (atomic_dec_and_test(&req->count)) { - if (unlikely(req->background)) { + if (test_bit(FR_BACKGROUND, &req->flags)) { /* * We get here in the unlikely case that a background * request was allocated but not sent @@ -291,9 +293,9 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) spin_unlock(&fc->lock); } - if (req->waiting) { + if (test_bit(FR_WAITING, &req->flags)) { + __clear_bit(FR_WAITING, &req->flags); atomic_dec(&fc->num_waiting); - req->waiting = 0; } if (req->stolen_file) @@ -385,9 +387,8 @@ __releases(fc->lock) list_del_init(&req->list); list_del_init(&req->intr_entry); req->state = FUSE_REQ_FINISHED; - if (req->background) { - req->background = 0; - + if (test_bit(FR_BACKGROUND, &req->flags)) { + clear_bit(FR_BACKGROUND, &req->flags); if (fc->num_background == fc->max_background) fc->blocked = 0; @@ -442,12 +443,12 @@ __acquires(fc->lock) if (req->state == FUSE_REQ_FINISHED) return; - req->interrupted = 1; + set_bit(FR_INTERRUPTED, &req->flags); if (req->state == FUSE_REQ_SENT) queue_interrupt(fc, req); } - if (!req->force) { + if (!test_bit(FR_FORCE, &req->flags)) { sigset_t oldset; /* Only fatal signals may interrupt this */ @@ -478,7 +479,7 @@ __acquires(fc->lock) static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) { - BUG_ON(req->background); + BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); spin_lock(&fc->lock); if (!fc->connected) req->out.h.error = -ENOTCONN; @@ -496,9 +497,9 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) { - req->isreply = 1; - if (!req->waiting) { - req->waiting = 1; + __set_bit(FR_ISREPLY, &req->flags); + if (!test_bit(FR_WAITING, &req->flags)) { + __set_bit(FR_WAITING, &req->flags); atomic_inc(&fc->num_waiting); } __fuse_request_send(fc, req); @@ -578,12 +579,12 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) void fuse_request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req) { - BUG_ON(!req->background); - if (!req->waiting) { - req->waiting = 1; + BUG_ON(!test_bit(FR_BACKGROUND, &req->flags)); + if (!test_bit(FR_WAITING, &req->flags)) { + __set_bit(FR_WAITING, &req->flags); atomic_inc(&fc->num_waiting); } - req->isreply = 1; + __set_bit(FR_ISREPLY, &req->flags); fc->num_background++; if (fc->num_background == fc->max_background) fc->blocked = 1; @@ -617,7 +618,7 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc, { int err = -ENODEV; - req->isreply = 0; + __clear_bit(FR_ISREPLY, &req->flags); req->in.h.unique = unique; spin_lock(&fc->lock); if (fc->connected) { @@ -644,7 +645,7 @@ void fuse_force_forget(struct file *file, u64 nodeid) req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; - req->isreply = 0; + __clear_bit(FR_ISREPLY, &req->flags); __fuse_request_send(fc, req); /* ignore errors */ fuse_put_request(fc, req); @@ -660,10 +661,10 @@ static int lock_request(struct fuse_conn *fc, struct fuse_req *req) int err = 0; if (req) { spin_lock(&fc->lock); - if (req->aborted) + if (test_bit(FR_ABORTED, &req->flags)) err = -ENOENT; else - req->locked = 1; + set_bit(FR_LOCKED, &req->flags); spin_unlock(&fc->lock); } return err; @@ -678,10 +679,10 @@ static int unlock_request(struct fuse_conn *fc, struct fuse_req *req) int err = 0; if (req) { spin_lock(&fc->lock); - if (req->aborted) + if (test_bit(FR_ABORTED, &req->flags)) err = -ENOENT; else - req->locked = 0; + clear_bit(FR_LOCKED, &req->flags); spin_unlock(&fc->lock); } return err; @@ -902,7 +903,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) err = 0; spin_lock(&cs->fc->lock); - if (cs->req->aborted) + if (test_bit(FR_ABORTED, &cs->req->flags)) err = -ENOENT; else *pagep = newpage; @@ -1309,7 +1310,7 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, (struct fuse_arg *) in->args, 0); fuse_copy_finish(cs); spin_lock(&fc->lock); - req->locked = 0; + clear_bit(FR_LOCKED, &req->flags); if (!fc->connected) { request_end(fc, req); return -ENODEV; @@ -1319,12 +1320,12 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, request_end(fc, req); return err; } - if (!req->isreply) + if (!test_bit(FR_ISREPLY, &req->flags)) { request_end(fc, req); - else { + } else { req->state = FUSE_REQ_SENT; list_move_tail(&req->list, &fc->processing); - if (req->interrupted) + if (test_bit(FR_INTERRUPTED, &req->flags)) queue_interrupt(fc, req); spin_unlock(&fc->lock); } @@ -1921,7 +1922,7 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc, req->state = FUSE_REQ_WRITING; list_move(&req->list, &fc->io); req->out.h = oh; - req->locked = 1; + set_bit(FR_LOCKED, &req->flags); cs->req = req; if (!req->out.page_replace) cs->move_pages = 0; @@ -1931,7 +1932,7 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc, fuse_copy_finish(cs); spin_lock(&fc->lock); - req->locked = 0; + clear_bit(FR_LOCKED, &req->flags); if (!fc->connected) err = -ENOENT; else if (err) @@ -2097,8 +2098,8 @@ __acquires(fc->lock) list_for_each_entry_safe(req, next, &fc->io, list) { req->out.h.error = -ECONNABORTED; - req->aborted = 1; - if (!req->locked) + set_bit(FR_ABORTED, &req->flags); + if (!test_bit(FR_LOCKED, &req->flags)) list_move(&req->list, &to_end); } while (!list_empty(&to_end)) { |