summaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-03 19:15:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-03 19:15:32 -0700
commit17dec0a949153d9ac00760ba2f5b78cb583e995f (patch)
treeaaf44fee3aca81a86ecfa46f3f409d5cf5675f1f /ipc
parentd92cd810e64aa7cf22b05f0ea1c7d3e8dbae75fe (diff)
parent2236d4d39035b9839944603ec4b65ce71180a9ea (diff)
downloadop-kernel-dev-17dec0a949153d9ac00760ba2f5b78cb583e995f.zip
op-kernel-dev-17dec0a949153d9ac00760ba2f5b78cb583e995f.tar.gz
Merge branch 'userns-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace
Pull namespace updates from Eric Biederman: "There was a lot of work this cycle fixing bugs that were discovered after the merge window and getting everything ready where we can reasonably support fully unprivileged fuse. The bug fixes you already have and much of the unprivileged fuse work is coming in via other trees. Still left for fully unprivileged fuse is figuring out how to cleanly handle .set_acl and .get_acl in the legacy case, and properly handling of evm xattrs on unprivileged mounts. Included in the tree is a cleanup from Alexely that replaced a linked list with a statically allocated fix sized array for the pid caches, which simplifies and speeds things up. Then there is are some cleanups and fixes for the ipc namespace. The motivation was that in reviewing other code it was discovered that access ipc objects from different pid namespaces recorded pids in such a way that when asked the wrong pids were returned. In the worst case there has been a measured 30% performance impact for sysvipc semaphores. Other test cases showed no measurable performance impact. Manfred Spraul and Davidlohr Bueso who tend to work on sysvipc performance both gave the nod that this is good enough. Casey Schaufler and James Morris have given their approval to the LSM side of the changes. I simplified the types and the code dealing with sysvipc to pass just kern_ipc_perm for all three types of ipc. Which reduced the header dependencies throughout the kernel and simplified the lsm code. Which let me work on the pid fixes without having to worry about trivial changes causing complete kernel recompiles" * 'userns-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: ipc/shm: Fix pid freeing. ipc/shm: fix up for struct file no longer being available in shm.h ipc/smack: Tidy up from the change in type of the ipc security hooks ipc: Directly call the security hook in ipc_ops.associate ipc/sem: Fix semctl(..., GETPID, ...) between pid namespaces ipc/msg: Fix msgctl(..., IPC_STAT, ...) between pid namespaces ipc/shm: Fix shmctl(..., IPC_STAT, ...) between pid namespaces. ipc/util: Helpers for making the sysvipc operations pid namespace aware ipc: Move IPCMNI from include/ipc.h into ipc/util.h msg: Move struct msg_queue into ipc/msg.c shm: Move struct shmid_kernel into ipc/shm.c sem: Move struct sem and struct sem_array into ipc/sem.c msg/security: Pass kern_ipc_perm not msg_queue into the msg_queue security hooks shm/security: Pass kern_ipc_perm not shmid_kernel into the shm security hooks sem/security: Pass kern_ipc_perm not sem_array into the sem security hooks pidns: simpler allocation of pid_* caches
Diffstat (limited to 'ipc')
-rw-r--r--ipc/msg.c62
-rw-r--r--ipc/sem.c81
-rw-r--r--ipc/shm.c68
-rw-r--r--ipc/util.c9
-rw-r--r--ipc/util.h12
5 files changed, 152 insertions, 80 deletions
diff --git a/ipc/msg.c b/ipc/msg.c
index 9de4806..114a211 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -43,6 +43,23 @@
#include <linux/uaccess.h>
#include "util.h"
+/* one msq_queue structure for each present queue on the system */
+struct msg_queue {
+ struct kern_ipc_perm q_perm;
+ time64_t q_stime; /* last msgsnd time */
+ time64_t q_rtime; /* last msgrcv time */
+ time64_t q_ctime; /* last change time */
+ unsigned long q_cbytes; /* current number of bytes on queue */
+ unsigned long q_qnum; /* number of messages in queue */
+ unsigned long q_qbytes; /* max number of bytes on queue */
+ struct pid *q_lspid; /* pid of last msgsnd */
+ struct pid *q_lrpid; /* last receive pid */
+
+ struct list_head q_messages;
+ struct list_head q_receivers;
+ struct list_head q_senders;
+} __randomize_layout;
+
/* one msg_receiver structure for each sleeping receiver */
struct msg_receiver {
struct list_head r_list;
@@ -101,7 +118,7 @@ static void msg_rcu_free(struct rcu_head *head)
struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
struct msg_queue *msq = container_of(p, struct msg_queue, q_perm);
- security_msg_queue_free(msq);
+ security_msg_queue_free(&msq->q_perm);
kvfree(msq);
}
@@ -127,7 +144,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
msq->q_perm.key = key;
msq->q_perm.security = NULL;
- retval = security_msg_queue_alloc(msq);
+ retval = security_msg_queue_alloc(&msq->q_perm);
if (retval) {
kvfree(msq);
return retval;
@@ -137,7 +154,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
msq->q_ctime = ktime_get_real_seconds();
msq->q_cbytes = msq->q_qnum = 0;
msq->q_qbytes = ns->msg_ctlmnb;
- msq->q_lspid = msq->q_lrpid = 0;
+ msq->q_lspid = msq->q_lrpid = NULL;
INIT_LIST_HEAD(&msq->q_messages);
INIT_LIST_HEAD(&msq->q_receivers);
INIT_LIST_HEAD(&msq->q_senders);
@@ -250,25 +267,17 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
free_msg(msg);
}
atomic_sub(msq->q_cbytes, &ns->msg_bytes);
+ ipc_update_pid(&msq->q_lspid, NULL);
+ ipc_update_pid(&msq->q_lrpid, NULL);
ipc_rcu_putref(&msq->q_perm, msg_rcu_free);
}
-/*
- * Called with msg_ids.rwsem and ipcp locked.
- */
-static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
-{
- struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
-
- return security_msg_queue_associate(msq, msgflg);
-}
-
long ksys_msgget(key_t key, int msgflg)
{
struct ipc_namespace *ns;
static const struct ipc_ops msg_ops = {
.getnew = newque,
- .associate = msg_security,
+ .associate = security_msg_queue_associate,
};
struct ipc_params msg_params;
@@ -385,7 +394,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
msq = container_of(ipcp, struct msg_queue, q_perm);
- err = security_msg_queue_msgctl(msq, cmd);
+ err = security_msg_queue_msgctl(&msq->q_perm, cmd);
if (err)
goto out_unlock1;
@@ -507,7 +516,7 @@ static int msgctl_stat(struct ipc_namespace *ns, int msqid,
if (ipcperms(ns, &msq->q_perm, S_IRUGO))
goto out_unlock;
- err = security_msg_queue_msgctl(msq, cmd);
+ err = security_msg_queue_msgctl(&msq->q_perm, cmd);
if (err)
goto out_unlock;
@@ -526,8 +535,8 @@ static int msgctl_stat(struct ipc_namespace *ns, int msqid,
p->msg_cbytes = msq->q_cbytes;
p->msg_qnum = msq->q_qnum;
p->msg_qbytes = msq->q_qbytes;
- p->msg_lspid = msq->q_lspid;
- p->msg_lrpid = msq->q_lrpid;
+ p->msg_lspid = pid_vnr(msq->q_lspid);
+ p->msg_lrpid = pid_vnr(msq->q_lrpid);
ipc_unlock_object(&msq->q_perm);
rcu_read_unlock();
@@ -733,7 +742,7 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
- !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
+ !security_msg_queue_msgrcv(&msq->q_perm, msg, msr->r_tsk,
msr->r_msgtype, msr->r_mode)) {
list_del(&msr->r_list);
@@ -741,7 +750,7 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
wake_q_add(wake_q, msr->r_tsk);
WRITE_ONCE(msr->r_msg, ERR_PTR(-E2BIG));
} else {
- msq->q_lrpid = task_pid_vnr(msr->r_tsk);
+ ipc_update_pid(&msq->q_lrpid, task_pid(msr->r_tsk));
msq->q_rtime = get_seconds();
wake_q_add(wake_q, msr->r_tsk);
@@ -799,7 +808,7 @@ static long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock0;
}
- err = security_msg_queue_msgsnd(msq, msg, msgflg);
+ err = security_msg_queue_msgsnd(&msq->q_perm, msg, msgflg);
if (err)
goto out_unlock0;
@@ -842,7 +851,7 @@ static long do_msgsnd(int msqid, long mtype, void __user *mtext,
}
- msq->q_lspid = task_tgid_vnr(current);
+ ipc_update_pid(&msq->q_lspid, task_tgid(current));
msq->q_stime = get_seconds();
if (!pipelined_send(msq, msg, &wake_q)) {
@@ -987,7 +996,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
list_for_each_entry(msg, &msq->q_messages, m_list) {
if (testmsg(msg, *msgtyp, mode) &&
- !security_msg_queue_msgrcv(msq, msg, current,
+ !security_msg_queue_msgrcv(&msq->q_perm, msg, current,
*msgtyp, mode)) {
if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
*msgtyp = msg->m_type - 1;
@@ -1072,7 +1081,7 @@ static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, in
list_del(&msg->m_list);
msq->q_qnum--;
msq->q_rtime = get_seconds();
- msq->q_lrpid = task_tgid_vnr(current);
+ ipc_update_pid(&msq->q_lrpid, task_tgid(current));
msq->q_cbytes -= msg->m_ts;
atomic_sub(msg->m_ts, &ns->msg_bytes);
atomic_dec(&ns->msg_hdrs);
@@ -1227,6 +1236,7 @@ void msg_exit_ns(struct ipc_namespace *ns)
#ifdef CONFIG_PROC_FS
static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
{
+ struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
struct user_namespace *user_ns = seq_user_ns(s);
struct kern_ipc_perm *ipcp = it;
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
@@ -1238,8 +1248,8 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
msq->q_perm.mode,
msq->q_cbytes,
msq->q_qnum,
- msq->q_lspid,
- msq->q_lrpid,
+ pid_nr_ns(msq->q_lspid, pid_ns),
+ pid_nr_ns(msq->q_lrpid, pid_ns),
from_kuid_munged(user_ns, msq->q_perm.uid),
from_kgid_munged(user_ns, msq->q_perm.gid),
from_kuid_munged(user_ns, msq->q_perm.cuid),
diff --git a/ipc/sem.c b/ipc/sem.c
index 1cf5627..2994da8 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -88,13 +88,47 @@
#include <linux/uaccess.h>
#include "util.h"
+/* One semaphore structure for each semaphore in the system. */
+struct sem {
+ int semval; /* current value */
+ /*
+ * PID of the process that last modified the semaphore. For
+ * Linux, specifically these are:
+ * - semop
+ * - semctl, via SETVAL and SETALL.
+ * - at task exit when performing undo adjustments (see exit_sem).
+ */
+ struct pid *sempid;
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
+ struct list_head pending_alter; /* pending single-sop operations */
+ /* that alter the semaphore */
+ struct list_head pending_const; /* pending single-sop operations */
+ /* that do not alter the semaphore*/
+ time_t sem_otime; /* candidate for sem_otime */
+} ____cacheline_aligned_in_smp;
+
+/* One sem_array data structure for each set of semaphores in the system. */
+struct sem_array {
+ struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
+ time64_t sem_ctime; /* create/last semctl() time */
+ struct list_head pending_alter; /* pending operations */
+ /* that alter the array */
+ struct list_head pending_const; /* pending complex operations */
+ /* that do not alter semvals */
+ struct list_head list_id; /* undo requests on this array */
+ int sem_nsems; /* no. of semaphores in array */
+ int complex_count; /* pending complex operations */
+ unsigned int use_global_lock;/* >0: global lock required */
+
+ struct sem sems[];
+} __randomize_layout;
/* One queue for each sleeping process in the system. */
struct sem_queue {
struct list_head list; /* queue of pending operations */
struct task_struct *sleeper; /* this process */
struct sem_undo *undo; /* undo structure */
- int pid; /* process id of requesting process */
+ struct pid *pid; /* process id of requesting process */
int status; /* completion status of operation */
struct sembuf *sops; /* array of pending operations */
struct sembuf *blocking; /* the operation that blocked */
@@ -265,7 +299,7 @@ static void sem_rcu_free(struct rcu_head *head)
struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
- security_sem_free(sma);
+ security_sem_free(&sma->sem_perm);
kvfree(sma);
}
@@ -495,7 +529,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_perm.key = key;
sma->sem_perm.security = NULL;
- retval = security_sem_alloc(sma);
+ retval = security_sem_alloc(&sma->sem_perm);
if (retval) {
kvfree(sma);
return retval;
@@ -533,17 +567,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
/*
* Called with sem_ids.rwsem and ipcp locked.
*/
-static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
-{
- struct sem_array *sma;
-
- sma = container_of(ipcp, struct sem_array, sem_perm);
- return security_sem_associate(sma, semflg);
-}
-
-/*
- * Called with sem_ids.rwsem and ipcp locked.
- */
static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
struct ipc_params *params)
{
@@ -561,7 +584,7 @@ long ksys_semget(key_t key, int nsems, int semflg)
struct ipc_namespace *ns;
static const struct ipc_ops sem_ops = {
.getnew = newary,
- .associate = sem_security,
+ .associate = security_sem_associate,
.more_checks = sem_more_checks,
};
struct ipc_params sem_params;
@@ -602,7 +625,8 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
*/
static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
{
- int result, sem_op, nsops, pid;
+ int result, sem_op, nsops;
+ struct pid *pid;
struct sembuf *sop;
struct sem *curr;
struct sembuf *sops;
@@ -640,7 +664,7 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
sop--;
pid = q->pid;
while (sop >= sops) {
- sma->sems[sop->sem_num].sempid = pid;
+ ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
sop--;
}
@@ -727,7 +751,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
un->semadj[sop->sem_num] = undo;
}
curr->semval += sem_op;
- curr->sempid = q->pid;
+ ipc_update_pid(&curr->sempid, q->pid);
}
return 0;
@@ -1134,6 +1158,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
unlink_queue(sma, q);
wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
}
+ ipc_update_pid(&sem->sempid, NULL);
}
/* Remove the semaphore set from the IDR */
@@ -1214,7 +1239,7 @@ static int semctl_stat(struct ipc_namespace *ns, int semid,
if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
goto out_unlock;
- err = security_sem_semctl(sma, cmd);
+ err = security_sem_semctl(&sma->sem_perm, cmd);
if (err)
goto out_unlock;
@@ -1305,7 +1330,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
return -EACCES;
}
- err = security_sem_semctl(sma, SETVAL);
+ err = security_sem_semctl(&sma->sem_perm, SETVAL);
if (err) {
rcu_read_unlock();
return -EACCES;
@@ -1326,7 +1351,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
un->semadj[semnum] = 0;
curr->semval = val;
- curr->sempid = task_tgid_vnr(current);
+ ipc_update_pid(&curr->sempid, task_tgid(current));
sma->sem_ctime = ktime_get_real_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &wake_q);
@@ -1359,7 +1384,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
goto out_rcu_wakeup;
- err = security_sem_semctl(sma, cmd);
+ err = security_sem_semctl(&sma->sem_perm, cmd);
if (err)
goto out_rcu_wakeup;
@@ -1447,7 +1472,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
for (i = 0; i < nsems; i++) {
sma->sems[i].semval = sem_io[i];
- sma->sems[i].sempid = task_tgid_vnr(current);
+ ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
}
ipc_assert_locked_object(&sma->sem_perm);
@@ -1479,7 +1504,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
err = curr->semval;
goto out_unlock;
case GETPID:
- err = curr->sempid;
+ err = pid_vnr(curr->sempid);
goto out_unlock;
case GETNCNT:
err = count_semcnt(sma, semnum, 0);
@@ -1550,7 +1575,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
sma = container_of(ipcp, struct sem_array, sem_perm);
- err = security_sem_semctl(sma, cmd);
+ err = security_sem_semctl(&sma->sem_perm, cmd);
if (err)
goto out_unlock1;
@@ -1977,7 +2002,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
goto out_free;
}
- error = security_sem_semop(sma, sops, nsops, alter);
+ error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
if (error) {
rcu_read_unlock();
goto out_free;
@@ -2008,7 +2033,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
queue.sops = sops;
queue.nsops = nsops;
queue.undo = un;
- queue.pid = task_tgid_vnr(current);
+ queue.pid = task_tgid(current);
queue.alter = alter;
queue.dupsop = dupsop;
@@ -2315,7 +2340,7 @@ void exit_sem(struct task_struct *tsk)
semaphore->semval = 0;
if (semaphore->semval > SEMVMX)
semaphore->semval = SEMVMX;
- semaphore->sempid = task_tgid_vnr(current);
+ ipc_update_pid(&semaphore->sempid, task_tgid(current));
}
}
/* maybe some queued-up processes were waiting for this */
diff --git a/ipc/shm.c b/ipc/shm.c
index c38c842..acefe44 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -48,6 +48,28 @@
#include "util.h"
+struct shmid_kernel /* private to the kernel */
+{
+ struct kern_ipc_perm shm_perm;
+ struct file *shm_file;
+ unsigned long shm_nattch;
+ unsigned long shm_segsz;
+ time64_t shm_atim;
+ time64_t shm_dtim;
+ time64_t shm_ctim;
+ struct pid *shm_cprid;
+ struct pid *shm_lprid;
+ struct user_struct *mlock_user;
+
+ /* The task created the shm object. NULL if the task is dead. */
+ struct task_struct *shm_creator;
+ struct list_head shm_clist; /* list by creator */
+} __randomize_layout;
+
+/* shm_mode upper byte flags */
+#define SHM_DEST 01000 /* segment will be destroyed on last detach */
+#define SHM_LOCKED 02000 /* segment will not be swapped */
+
struct shm_file_data {
int id;
struct ipc_namespace *ns;
@@ -181,7 +203,7 @@ static void shm_rcu_free(struct rcu_head *head)
rcu);
struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
shm_perm);
- security_shm_free(shp);
+ security_shm_free(&shp->shm_perm);
kvfree(shp);
}
@@ -204,7 +226,7 @@ static int __shm_open(struct vm_area_struct *vma)
return PTR_ERR(shp);
shp->shm_atim = ktime_get_real_seconds();
- shp->shm_lprid = task_tgid_vnr(current);
+ ipc_update_pid(&shp->shm_lprid, task_tgid(current));
shp->shm_nattch++;
shm_unlock(shp);
return 0;
@@ -245,6 +267,8 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
user_shm_unlock(i_size_read(file_inode(shm_file)),
shp->mlock_user);
fput(shm_file);
+ ipc_update_pid(&shp->shm_cprid, NULL);
+ ipc_update_pid(&shp->shm_lprid, NULL);
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
}
@@ -289,7 +313,7 @@ static void shm_close(struct vm_area_struct *vma)
if (WARN_ON_ONCE(IS_ERR(shp)))
goto done; /* no-op */
- shp->shm_lprid = task_tgid_vnr(current);
+ ipc_update_pid(&shp->shm_lprid, task_tgid(current));
shp->shm_dtim = ktime_get_real_seconds();
shp->shm_nattch--;
if (shm_may_destroy(ns, shp))
@@ -566,7 +590,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
shp->mlock_user = NULL;
shp->shm_perm.security = NULL;
- error = security_shm_alloc(shp);
+ error = security_shm_alloc(&shp->shm_perm);
if (error) {
kvfree(shp);
return error;
@@ -604,8 +628,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
if (IS_ERR(file))
goto no_file;
- shp->shm_cprid = task_tgid_vnr(current);
- shp->shm_lprid = 0;
+ shp->shm_cprid = get_pid(task_tgid(current));
+ shp->shm_lprid = NULL;
shp->shm_atim = shp->shm_dtim = 0;
shp->shm_ctim = ktime_get_real_seconds();
shp->shm_segsz = size;
@@ -634,6 +658,8 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
return error;
no_id:
+ ipc_update_pid(&shp->shm_cprid, NULL);
+ ipc_update_pid(&shp->shm_lprid, NULL);
if (is_file_hugepages(file) && shp->mlock_user)
user_shm_unlock(size, shp->mlock_user);
fput(file);
@@ -645,17 +671,6 @@ no_file:
/*
* Called with shm_ids.rwsem and ipcp locked.
*/
-static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
-{
- struct shmid_kernel *shp;
-
- shp = container_of(ipcp, struct shmid_kernel, shm_perm);
- return security_shm_associate(shp, shmflg);
-}
-
-/*
- * Called with shm_ids.rwsem and ipcp locked.
- */
static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
struct ipc_params *params)
{
@@ -673,7 +688,7 @@ long ksys_shmget(key_t key, size_t size, int shmflg)
struct ipc_namespace *ns;
static const struct ipc_ops shm_ops = {
.getnew = newseg,
- .associate = shm_security,
+ .associate = security_shm_associate,
.more_checks = shm_more_checks,
};
struct ipc_params shm_params;
@@ -852,7 +867,7 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
- err = security_shm_shmctl(shp, cmd);
+ err = security_shm_shmctl(&shp->shm_perm, cmd);
if (err)
goto out_unlock1;
@@ -951,7 +966,7 @@ static int shmctl_stat(struct ipc_namespace *ns, int shmid,
if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
goto out_unlock;
- err = security_shm_shmctl(shp, cmd);
+ err = security_shm_shmctl(&shp->shm_perm, cmd);
if (err)
goto out_unlock;
@@ -968,8 +983,8 @@ static int shmctl_stat(struct ipc_namespace *ns, int shmid,
tbuf->shm_atime = shp->shm_atim;
tbuf->shm_dtime = shp->shm_dtim;
tbuf->shm_ctime = shp->shm_ctim;
- tbuf->shm_cpid = shp->shm_cprid;
- tbuf->shm_lpid = shp->shm_lprid;
+ tbuf->shm_cpid = pid_vnr(shp->shm_cprid);
+ tbuf->shm_lpid = pid_vnr(shp->shm_lprid);
tbuf->shm_nattch = shp->shm_nattch;
ipc_unlock_object(&shp->shm_perm);
@@ -995,7 +1010,7 @@ static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd)
}
audit_ipc_obj(&(shp->shm_perm));
- err = security_shm_shmctl(shp, cmd);
+ err = security_shm_shmctl(&shp->shm_perm, cmd);
if (err)
goto out_unlock1;
@@ -1375,7 +1390,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
if (ipcperms(ns, &shp->shm_perm, acc_mode))
goto out_unlock;
- err = security_shm_shmat(shp, shmaddr, shmflg);
+ err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg);
if (err)
goto out_unlock;
@@ -1618,6 +1633,7 @@ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
#ifdef CONFIG_PROC_FS
static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
{
+ struct pid_namespace *pid_ns = ipc_seq_pid_ns(s);
struct user_namespace *user_ns = seq_user_ns(s);
struct kern_ipc_perm *ipcp = it;
struct shmid_kernel *shp;
@@ -1640,8 +1656,8 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
shp->shm_perm.id,
shp->shm_perm.mode,
shp->shm_segsz,
- shp->shm_cprid,
- shp->shm_lprid,
+ pid_nr_ns(shp->shm_cprid, pid_ns),
+ pid_nr_ns(shp->shm_lprid, pid_ns),
shp->shm_nattch,
from_kuid_munged(user_ns, shp->shm_perm.uid),
from_kgid_munged(user_ns, shp->shm_perm.gid),
diff --git a/ipc/util.c b/ipc/util.c
index 4ed5a17..3783b79 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -747,9 +747,16 @@ int ipc_parse_version(int *cmd)
#ifdef CONFIG_PROC_FS
struct ipc_proc_iter {
struct ipc_namespace *ns;
+ struct pid_namespace *pid_ns;
struct ipc_proc_iface *iface;
};
+struct pid_namespace *ipc_seq_pid_ns(struct seq_file *s)
+{
+ struct ipc_proc_iter *iter = s->private;
+ return iter->pid_ns;
+}
+
/*
* This routine locks the ipc structure found at least at position pos.
*/
@@ -872,6 +879,7 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file)
iter->iface = PDE_DATA(inode);
iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
+ iter->pid_ns = get_pid_ns(task_active_pid_ns(current));
return 0;
}
@@ -881,6 +889,7 @@ static int sysvipc_proc_release(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data;
struct ipc_proc_iter *iter = seq->private;
put_ipc_ns(iter->ns);
+ put_pid_ns(iter->pid_ns);
return seq_release_private(inode, file);
}
diff --git a/ipc/util.h b/ipc/util.h
index 51853dc..acc5159 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/ipc_namespace.h>
+#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
#define SEQ_MULTIPLIER (IPCMNI)
int sem_init(void);
@@ -22,6 +23,7 @@ int msg_init(void);
void shm_init(void);
struct ipc_namespace;
+struct pid_namespace;
#ifdef CONFIG_POSIX_MQUEUE
extern void mq_clear_sbinfo(struct ipc_namespace *ns);
@@ -85,6 +87,7 @@ int ipc_init_ids(struct ipc_ids *);
#ifdef CONFIG_PROC_FS
void __init ipc_init_proc_interface(const char *path, const char *header,
int ids, int (*show)(struct seq_file *, void *));
+struct pid_namespace *ipc_seq_pid_ns(struct seq_file *);
#else
#define ipc_init_proc_interface(path, header, ids, show) do {} while (0)
#endif
@@ -149,6 +152,15 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm);
+static inline void ipc_update_pid(struct pid **pos, struct pid *pid)
+{
+ struct pid *old = *pos;
+ if (old != pid) {
+ *pos = get_pid(pid);
+ put_pid(old);
+ }
+}
+
#ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
/* On IA-64, we always use the "64-bit version" of the IPC structures. */
# define ipc_parse_version(cmd) IPC_64
OpenPOWER on IntegriCloud