summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-07-14 15:40:00 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2008-01-30 02:05:30 -0500
commit3ff7576ddac06c3d07089e241b40826d24bbf1ac (patch)
tree7f5998434b7d0dd1f08ff7ee65c93fab4861a6d3 /include
parentc970aa85e71bd581726c42df843f6f129db275ac (diff)
downloadop-kernel-dev-3ff7576ddac06c3d07089e241b40826d24bbf1ac.zip
op-kernel-dev-3ff7576ddac06c3d07089e241b40826d24bbf1ac.tar.gz
SUNRPC: Clean up the initialisation of priority queue scheduling info.
We want the default scheduling priority (priority == 0) to remain RPC_PRIORITY_NORMAL. Also ensure that the priority wait queue scheduling is per process id instead of sometimes being per thread, and sometimes being per inode. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sunrpc/sched.h17
1 files changed, 9 insertions, 8 deletions
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index d974421..c9444fd 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -56,8 +56,6 @@ struct rpc_task {
__u8 tk_garb_retry;
__u8 tk_cred_retry;
- unsigned long tk_cookie; /* Cookie for batching tasks */
-
/*
* timeout_fn to be executed by timer bottom half
* callback to be executed after waking up
@@ -78,7 +76,6 @@ struct rpc_task {
struct timer_list tk_timer; /* kernel timer */
unsigned long tk_timeout; /* timeout for rpc_sleep() */
unsigned short tk_flags; /* misc flags */
- unsigned char tk_priority : 2;/* Task priority */
unsigned long tk_runstate; /* Task run status */
struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
* be any workqueue
@@ -94,6 +91,9 @@ struct rpc_task {
unsigned long tk_start; /* RPC task init timestamp */
long tk_rtt; /* round-trip time (jiffies) */
+ pid_t tk_owner; /* Process id for batching tasks */
+ unsigned char tk_priority : 2;/* Task priority */
+
#ifdef RPC_DEBUG
unsigned short tk_pid; /* debugging aid */
#endif
@@ -123,6 +123,7 @@ struct rpc_task_setup {
const struct rpc_call_ops *callback_ops;
void *callback_data;
unsigned short flags;
+ signed char priority;
};
/*
@@ -187,10 +188,10 @@ struct rpc_task_setup {
* Note: if you change these, you must also change
* the task initialization definitions below.
*/
-#define RPC_PRIORITY_LOW 0
-#define RPC_PRIORITY_NORMAL 1
-#define RPC_PRIORITY_HIGH 2
-#define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1)
+#define RPC_PRIORITY_LOW (-1)
+#define RPC_PRIORITY_NORMAL (0)
+#define RPC_PRIORITY_HIGH (1)
+#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW)
/*
* RPC synchronization objects
@@ -198,7 +199,7 @@ struct rpc_task_setup {
struct rpc_wait_queue {
spinlock_t lock;
struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
- unsigned long cookie; /* cookie of last task serviced */
+ pid_t owner; /* process id of last task serviced */
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char count; /* # task groups remaining serviced so far */
OpenPOWER on IntegriCloud