summaryrefslogtreecommitdiffstats
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 14:54:01 +0000
committerDavid Howells <dhowells@redhat.com>2006-11-22 14:54:01 +0000
commit52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c (patch)
tree5849b4e3c17daa70a7e81cfdeaddac9ac8a0e953 /include/linux/workqueue.h
parent0f9005a6f7a82f4aacbd72f7b92322a8ca1c3f97 (diff)
downloadop-kernel-dev-52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c.zip
op-kernel-dev-52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c.tar.gz
WorkStruct: Separate delayable and non-delayable events.
Separate delayable work items from non-delayable work items be splitting them into a separate structure (delayed_work), which incorporates a work_struct and the timer_list removed from work_struct. The work_struct struct is huge, and this limits it's usefulness. On a 64-bit architecture it's nearly 100 bytes in size. This reduces that by half for the non-delayable type of event. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h44
1 files changed, 32 insertions, 12 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 9bca353..9faacca 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -17,6 +17,10 @@ struct work_struct {
void (*func)(void *);
void *data;
void *wq_data;
+};
+
+struct delayed_work {
+ struct work_struct work;
struct timer_list timer;
};
@@ -28,32 +32,48 @@ struct execute_work {
.entry = { &(n).entry, &(n).entry }, \
.func = (f), \
.data = (d), \
+ }
+
+#define __DELAYED_WORK_INITIALIZER(n, f, d) { \
+ .work = __WORK_INITIALIZER((n).work, (f), (d)), \
.timer = TIMER_INITIALIZER(NULL, 0, 0), \
}
#define DECLARE_WORK(n, f, d) \
struct work_struct n = __WORK_INITIALIZER(n, f, d)
+#define DECLARE_DELAYED_WORK(n, f, d) \
+ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, d)
+
/*
- * initialize a work-struct's func and data pointers:
+ * initialize a work item's function and data pointers
*/
#define PREPARE_WORK(_work, _func, _data) \
do { \
- (_work)->func = _func; \
- (_work)->data = _data; \
+ (_work)->func = (_func); \
+ (_work)->data = (_data); \
} while (0)
+#define PREPARE_DELAYED_WORK(_work, _func, _data) \
+ PREPARE_WORK(&(_work)->work, (_func), (_data))
+
/*
- * initialize all of a work-struct:
+ * initialize all of a work item in one go
*/
#define INIT_WORK(_work, _func, _data) \
do { \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->pending = 0; \
PREPARE_WORK((_work), (_func), (_data)); \
+ } while (0)
+
+#define INIT_DELAYED_WORK(_work, _func, _data) \
+ do { \
+ INIT_WORK(&(_work)->work, (_func), (_data)); \
init_timer(&(_work)->timer); \
} while (0)
+
extern struct workqueue_struct *__create_workqueue(const char *name,
int singlethread);
#define create_workqueue(name) __create_workqueue((name), 0)
@@ -62,24 +82,24 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
extern void destroy_workqueue(struct workqueue_struct *wq);
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
-extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay));
+extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct work_struct *work, unsigned long delay);
+ struct delayed_work *work, unsigned long delay);
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
extern int FASTCALL(schedule_work(struct work_struct *work));
-extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
+extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
-extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
+extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
extern void flush_scheduled_work(void);
extern int current_is_keventd(void);
extern int keventd_up(void);
extern void init_workqueues(void);
-void cancel_rearming_delayed_work(struct work_struct *work);
+void cancel_rearming_delayed_work(struct delayed_work *work);
void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
- struct work_struct *);
+ struct delayed_work *);
int execute_in_process_context(void (*fn)(void *), void *,
struct execute_work *);
@@ -88,13 +108,13 @@ int execute_in_process_context(void (*fn)(void *), void *,
* function may still be running on return from cancel_delayed_work(). Run
* flush_scheduled_work() to wait on it.
*/
-static inline int cancel_delayed_work(struct work_struct *work)
+static inline int cancel_delayed_work(struct delayed_work *work)
{
int ret;
ret = del_timer_sync(&work->timer);
if (ret)
- clear_bit(0, &work->pending);
+ clear_bit(0, &work->work.pending);
return ret;
}
OpenPOWER on IntegriCloud