summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@redhat.com>2010-08-26 13:19:52 -0400
committerJ. Bruce Fields <bfields@redhat.com>2010-09-07 20:19:12 -0400
commit6610f720e9e8103c22d1f1ccf8fbb695550a571f (patch)
tree6177d8f5e15ce007f4a3b29c16f31d97f3b58c59 /net
parentf16b6e8d838b2e2bb4561201311c66ac02ad67df (diff)
downloadop-kernel-dev-6610f720e9e8103c22d1f1ccf8fbb695550a571f.zip
op-kernel-dev-6610f720e9e8103c22d1f1ccf8fbb695550a571f.tar.gz
svcrpc: minor cache cleanup
Pull out some code into helper functions, fix a typo. Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/cache.c44
-rw-r--r--net/sunrpc/svc_xprt.c2
2 files changed, 25 insertions, 21 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2c5297f..18e5e8e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -520,10 +520,26 @@ static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
complete(&dr->completion);
}
+static void __unhash_deferred_req(struct cache_deferred_req *dreq)
+{
+ list_del_init(&dreq->recent);
+ list_del_init(&dreq->hash);
+ cache_defer_cnt--;
+}
+
+static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
+{
+ int hash = DFR_HASH(item);
+
+ list_add(&dreq->recent, &cache_defer_list);
+ if (cache_defer_hash[hash].next == NULL)
+ INIT_LIST_HEAD(&cache_defer_hash[hash]);
+ list_add(&dreq->hash, &cache_defer_hash[hash]);
+}
+
static int cache_defer_req(struct cache_req *req, struct cache_head *item)
{
struct cache_deferred_req *dreq, *discard;
- int hash = DFR_HASH(item);
struct thread_deferred_req sleeper;
if (cache_defer_cnt >= DFR_MAX) {
@@ -549,20 +565,14 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
spin_lock(&cache_defer_lock);
- list_add(&dreq->recent, &cache_defer_list);
-
- if (cache_defer_hash[hash].next == NULL)
- INIT_LIST_HEAD(&cache_defer_hash[hash]);
- list_add(&dreq->hash, &cache_defer_hash[hash]);
+ __hash_deferred_req(dreq, item);
/* it is in, now maybe clean up */
discard = NULL;
if (++cache_defer_cnt > DFR_MAX) {
discard = list_entry(cache_defer_list.prev,
struct cache_deferred_req, recent);
- list_del_init(&discard->recent);
- list_del_init(&discard->hash);
- cache_defer_cnt--;
+ __unhash_deferred_req(discard);
}
spin_unlock(&cache_defer_lock);
@@ -584,9 +594,7 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
*/
spin_lock(&cache_defer_lock);
if (!list_empty(&sleeper.handle.hash)) {
- list_del_init(&sleeper.handle.recent);
- list_del_init(&sleeper.handle.hash);
- cache_defer_cnt--;
+ __unhash_deferred_req(&sleeper.handle);
spin_unlock(&cache_defer_lock);
} else {
/* cache_revisit_request already removed
@@ -632,9 +640,8 @@ static void cache_revisit_request(struct cache_head *item)
dreq = list_entry(lp, struct cache_deferred_req, hash);
lp = lp->next;
if (dreq->item == item) {
- list_del_init(&dreq->hash);
- list_move(&dreq->recent, &pending);
- cache_defer_cnt--;
+ __unhash_deferred_req(dreq);
+ list_add(&dreq->recent, &pending);
}
}
}
@@ -657,11 +664,8 @@ void cache_clean_deferred(void *owner)
spin_lock(&cache_defer_lock);
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
- if (dreq->owner == owner) {
- list_del_init(&dreq->hash);
- list_move(&dreq->recent, &pending);
- cache_defer_cnt--;
- }
+ if (dreq->owner == owner)
+ __unhash_deferred_req(dreq);
}
spin_unlock(&cache_defer_lock);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 8ff6840..95fc3e8 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -665,7 +665,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
/* As there is a shortage of threads and this request
- * had to be queue, don't allow the thread to wait so
+ * had to be queued, don't allow the thread to wait so
* long for cache updates.
*/
rqstp->rq_chandle.thread_wait = 1*HZ;
OpenPOWER on IntegriCloud