summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-09-09 21:59:51 -0700
committerDavid S. Miller <davem@davemloft.net>2010-09-09 21:59:51 -0700
commit053d8f6622701f849fda2ca2c9ae596c13599ba9 (patch)
treee5dd90cca3a69bc993b5aa860a9eeb8c9178450a
parentc9cedbba0fc591e1c0587f838932ca3f3c6fec57 (diff)
parent615cc2211c17ed05a2a5d94abdac6c340a8ea508 (diff)
downloadop-kernel-dev-053d8f6622701f849fda2ca2c9ae596c13599ba9.zip
op-kernel-dev-053d8f6622701f849fda2ca2c9ae596c13599ba9.tar.gz
Merge branch 'vhost-net' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
-rw-r--r--drivers/vhost/vhost.c80
-rw-r--r--include/linux/cgroup.h11
-rw-r--r--kernel/cgroup.c9
3 files changed, 73 insertions, 27 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 4b99117..c579dcc 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -60,22 +60,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return 0;
}
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+{
+ INIT_LIST_HEAD(&work->node);
+ work->fn = fn;
+ init_waitqueue_head(&work->done);
+ work->flushing = 0;
+ work->queue_seq = work->done_seq = 0;
+}
+
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev)
{
- struct vhost_work *work = &poll->work;
-
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
poll->mask = mask;
poll->dev = dev;
- INIT_LIST_HEAD(&work->node);
- work->fn = fn;
- init_waitqueue_head(&work->done);
- work->flushing = 0;
- work->queue_seq = work->done_seq = 0;
+ vhost_work_init(&poll->work, fn);
}
/* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -95,35 +98,38 @@ void vhost_poll_stop(struct vhost_poll *poll)
remove_wait_queue(poll->wqh, &poll->wait);
}
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
- struct vhost_work *work = &poll->work;
unsigned seq;
int left;
int flushing;
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
wait_event(work->done, ({
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
left = seq - work->done_seq <= 0;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
left;
}));
- spin_lock_irq(&poll->dev->work_lock);
+ spin_lock_irq(&dev->work_lock);
flushing = --work->flushing;
- spin_unlock_irq(&poll->dev->work_lock);
+ spin_unlock_irq(&dev->work_lock);
BUG_ON(flushing < 0);
}
-void vhost_poll_queue(struct vhost_poll *poll)
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
+{
+ vhost_work_flush(poll->dev, &poll->work);
+}
+
+static inline void vhost_work_queue(struct vhost_dev *dev,
+ struct vhost_work *work)
{
- struct vhost_dev *dev = poll->dev;
- struct vhost_work *work = &poll->work;
unsigned long flags;
spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +141,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
spin_unlock_irqrestore(&dev->work_lock, flags);
}
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+ vhost_work_queue(poll->dev, &poll->work);
+}
+
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
@@ -236,6 +247,29 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
return dev->mm == current->mm ? 0 : -EPERM;
}
+struct vhost_attach_cgroups_struct {
+ struct vhost_work work;
+ struct task_struct *owner;
+ int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+ struct vhost_attach_cgroups_struct *s;
+ s = container_of(work, struct vhost_attach_cgroups_struct, work);
+ s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+ struct vhost_attach_cgroups_struct attach;
+ attach.owner = current;
+ vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+ vhost_work_queue(dev, &attach.work);
+ vhost_work_flush(dev, &attach.work);
+ return attach.ret;
+}
+
/* Caller should have device mutex */
static long vhost_dev_set_owner(struct vhost_dev *dev)
{
@@ -255,14 +289,16 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
}
dev->worker = worker;
- err = cgroup_attach_task_current_cg(worker);
+ wake_up_process(worker); /* avoid contributing to loadavg */
+
+ err = vhost_attach_cgroups(dev);
if (err)
goto err_cgroup;
- wake_up_process(worker); /* avoid contributing to loadavg */
return 0;
err_cgroup:
kthread_stop(worker);
+ dev->worker = NULL;
err_worker:
if (dev->mm)
mmput(dev->mm);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed3e92e..5a53d8f 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -578,7 +578,11 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
-int cgroup_attach_task_current_cg(struct task_struct *);
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+static inline int cgroup_attach_task_current_cg(struct task_struct *tsk)
+{
+ return cgroup_attach_task_all(current, tsk);
+}
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
@@ -636,6 +640,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
}
/* No cgroups - nothing to do */
+static inline int cgroup_attach_task_all(struct task_struct *from,
+ struct task_struct *t)
+{
+ return 0;
+}
static inline int cgroup_attach_task_current_cg(struct task_struct *t)
{
return 0;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 192f88c..ed19afd 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1791,10 +1791,11 @@ out:
}
/**
- * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
+ * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
+ * @from: attach to all cgroups of a given task
* @tsk: the task to be attached
*/
-int cgroup_attach_task_current_cg(struct task_struct *tsk)
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
struct cgroupfs_root *root;
struct cgroup *cur_cg;
@@ -1802,7 +1803,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
cgroup_lock();
for_each_active_root(root) {
- cur_cg = task_cgroup_from_root(current, root);
+ cur_cg = task_cgroup_from_root(from, root);
retval = cgroup_attach_task(cur_cg, tsk);
if (retval)
break;
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
return retval;
}
-EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
+EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
/*
* Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
OpenPOWER on IntegriCloud