summaryrefslogtreecommitdiffstats
path: root/security
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2009-09-30 07:47:33 +1000
committerJames Morris <jmorris@namei.org>2009-09-30 07:47:33 +1000
commit1669b049db50fc7f1d4e694fb115a0f408c63fce (patch)
tree9b3b90b5cbff9b8f30ecf0b2a44896ce8bef0c20 /security
parent7f366784f5c2b8fc0658b5b374f4c63ee42c789f (diff)
parent17d857be649a21ca90008c6dc425d849fa83db5c (diff)
downloadop-kernel-dev-1669b049db50fc7f1d4e694fb115a0f408c63fce.zip
op-kernel-dev-1669b049db50fc7f1d4e694fb115a0f408c63fce.tar.gz
Merge branch 'master' into next
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig30
-rw-r--r--security/device_cgroup.c3
-rw-r--r--security/integrity/ima/ima_fs.c4
-rw-r--r--security/keys/gc.c78
-rw-r--r--security/keys/key.c4
-rw-r--r--security/keys/keyctl.c3
-rw-r--r--security/keys/keyring.c24
-rw-r--r--security/lsm_audit.c2
-rw-r--r--security/min_addr.c4
-rw-r--r--security/selinux/avc.c41
-rw-r--r--security/selinux/exports.c6
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/smack/smack_lsm.c8
-rw-r--r--security/smack/smackfs.c6
14 files changed, 154 insertions, 61 deletions
diff --git a/security/Kconfig b/security/Kconfig
index 4c86534..fb363cd 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -113,6 +113,36 @@ config SECURITY_ROOTPLUG
If you are unsure how to answer this question, answer N.
+config INTEL_TXT
+ bool "Enable Intel(R) Trusted Execution Technology (Intel(R) TXT)"
+ depends on HAVE_INTEL_TXT
+ help
+ This option enables support for booting the kernel with the
+ Trusted Boot (tboot) module. This will utilize
+ Intel(R) Trusted Execution Technology to perform a measured launch
+ of the kernel. If the system does not support Intel(R) TXT, this
+ will have no effect.
+
+ Intel TXT will provide higher assurance of system configuration and
+ initial state as well as data reset protection. This is used to
+ create a robust initial kernel measurement and verification, which
+ helps to ensure that kernel security mechanisms are functioning
+ correctly. This level of protection requires a root of trust outside
+ of the kernel itself.
+
+ Intel TXT also helps solve real end user concerns about having
+ confidence that their hardware is running the VMM or kernel that
+ it was configured with, especially since they may be responsible for
+ providing such assurances to VMs and services running on it.
+
+ See <http://www.intel.com/technology/security/> for more information
+ about Intel(R) TXT.
+ See <http://tboot.sourceforge.net> for more information about tboot.
+ See Documentation/intel_txt.txt for a description of how to enable
+ Intel TXT support in a kernel boot.
+
+ If you are unsure as to whether this is required, answer N.
+
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index b8186ba..6cf8fd2 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -61,7 +61,8 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
struct cgroup_subsys devices_subsys;
static int devcgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *new_cgroup, struct task_struct *task)
+ struct cgroup *new_cgroup, struct task_struct *task,
+ bool threadgroup)
{
if (current != task && !capable(CAP_SYS_ADMIN))
return -EPERM;
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 6bfc7ea..8e9777b 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -146,7 +146,7 @@ static int ima_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations ima_measurments_seqops = {
+static const struct seq_operations ima_measurments_seqops = {
.start = ima_measurements_start,
.next = ima_measurements_next,
.stop = ima_measurements_stop,
@@ -221,7 +221,7 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
return 0;
}
-static struct seq_operations ima_ascii_measurements_seqops = {
+static const struct seq_operations ima_ascii_measurements_seqops = {
.start = ima_measurements_start,
.next = ima_measurements_next,
.stop = ima_measurements_stop,
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 1e616ae..4770be3 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -26,8 +26,10 @@ static void key_garbage_collector(struct work_struct *);
static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
static DECLARE_WORK(key_gc_work, key_garbage_collector);
static key_serial_t key_gc_cursor; /* the last key the gc considered */
+static bool key_gc_again;
static unsigned long key_gc_executing;
static time_t key_gc_next_run = LONG_MAX;
+static time_t key_gc_new_timer;
/*
* Schedule a garbage collection run
@@ -40,9 +42,7 @@ void key_schedule_gc(time_t gc_at)
kenter("%ld", gc_at - now);
- gc_at += key_gc_delay;
-
- if (now >= gc_at) {
+ if (gc_at <= now) {
schedule_work(&key_gc_work);
} else if (gc_at < key_gc_next_run) {
expires = jiffies + (gc_at - now) * HZ;
@@ -112,16 +112,18 @@ static void key_garbage_collector(struct work_struct *work)
struct rb_node *rb;
key_serial_t cursor;
struct key *key, *xkey;
- time_t new_timer = LONG_MAX, limit;
+ time_t new_timer = LONG_MAX, limit, now;
- kenter("");
+ now = current_kernel_time().tv_sec;
+ kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);
if (test_and_set_bit(0, &key_gc_executing)) {
- key_schedule_gc(current_kernel_time().tv_sec);
+ key_schedule_gc(current_kernel_time().tv_sec + 1);
+ kleave(" [busy; deferring]");
return;
}
- limit = current_kernel_time().tv_sec;
+ limit = now;
if (limit > key_gc_delay)
limit -= key_gc_delay;
else
@@ -129,12 +131,19 @@ static void key_garbage_collector(struct work_struct *work)
spin_lock(&key_serial_lock);
- if (RB_EMPTY_ROOT(&key_serial_tree))
- goto reached_the_end;
+ if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) {
+ spin_unlock(&key_serial_lock);
+ clear_bit(0, &key_gc_executing);
+ return;
+ }
cursor = key_gc_cursor;
if (cursor < 0)
cursor = 0;
+ if (cursor > 0)
+ new_timer = key_gc_new_timer;
+ else
+ key_gc_again = false;
/* find the first key above the cursor */
key = NULL;
@@ -160,35 +169,50 @@ static void key_garbage_collector(struct work_struct *work)
/* trawl through the keys looking for keyrings */
for (;;) {
- if (key->expiry > 0 && key->expiry < new_timer)
+ if (key->expiry > limit && key->expiry < new_timer) {
+ kdebug("will expire %x in %ld",
+ key_serial(key), key->expiry - limit);
new_timer = key->expiry;
+ }
if (key->type == &key_type_keyring &&
- key_gc_keyring(key, limit)) {
- /* the gc ate our lock */
- schedule_work(&key_gc_work);
- goto no_unlock;
- }
+ key_gc_keyring(key, limit))
+ /* the gc had to release our lock so that the keyring
+ * could be modified, so we have to get it again */
+ goto gc_released_our_lock;
rb = rb_next(&key->serial_node);
- if (!rb) {
- key_gc_cursor = 0;
- break;
- }
+ if (!rb)
+ goto reached_the_end;
key = rb_entry(rb, struct key, serial_node);
}
-out:
- spin_unlock(&key_serial_lock);
-no_unlock:
+gc_released_our_lock:
+ kdebug("gc_released_our_lock");
+ key_gc_new_timer = new_timer;
+ key_gc_again = true;
clear_bit(0, &key_gc_executing);
- if (new_timer < LONG_MAX)
- key_schedule_gc(new_timer);
-
- kleave("");
+ schedule_work(&key_gc_work);
+ kleave(" [continue]");
return;
+ /* when we reach the end of the run, we set the timer for the next one */
reached_the_end:
+ kdebug("reached_the_end");
+ spin_unlock(&key_serial_lock);
+ key_gc_new_timer = new_timer;
key_gc_cursor = 0;
- goto out;
+ clear_bit(0, &key_gc_executing);
+
+ if (key_gc_again) {
+ /* there may have been a key that expired whilst we were
+ * scanning, so if we discarded any links we should do another
+ * scan */
+ new_timer = now + 1;
+ key_schedule_gc(new_timer);
+ } else if (new_timer < LONG_MAX) {
+ new_timer += key_gc_delay;
+ key_schedule_gc(new_timer);
+ }
+ kleave(" [end]");
}
diff --git a/security/keys/key.c b/security/keys/key.c
index 08531ad..e50d264 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -500,7 +500,7 @@ int key_negate_and_link(struct key *key,
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
- key_schedule_gc(key->expiry);
+ key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
@@ -909,7 +909,7 @@ void key_revoke(struct key *key)
time = now.tv_sec;
if (key->revoked_at == 0 || key->revoked_at > time) {
key->revoked_at = time;
- key_schedule_gc(key->revoked_at);
+ key_schedule_gc(key->revoked_at + key_gc_delay);
}
up_write(&key->sem);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 74c9685..2fb28ef 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1115,7 +1115,7 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout)
}
key->expiry = expiry;
- key_schedule_gc(key->expiry);
+ key_schedule_gc(key->expiry + key_gc_delay);
up_write(&key->sem);
key_put(key);
@@ -1319,6 +1319,7 @@ long keyctl_session_to_parent(void)
already_same:
ret = 0;
not_permitted:
+ write_unlock_irq(&tasklist_lock);
put_cred(cred);
return ret;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index ac977f6..8ec0274 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1019,18 +1019,18 @@ void keyring_gc(struct key *keyring, time_t limit)
struct key *key;
int loop, keep, max;
- kenter("%x", key_serial(keyring));
+ kenter("{%x,%s}", key_serial(keyring), keyring->description);
down_write(&keyring->sem);
klist = keyring->payload.subscriptions;
if (!klist)
- goto just_return;
+ goto no_klist;
/* work out how many subscriptions we're keeping */
keep = 0;
for (loop = klist->nkeys - 1; loop >= 0; loop--)
- if (!key_is_dead(klist->keys[loop], limit));
+ if (!key_is_dead(klist->keys[loop], limit))
keep++;
if (keep == klist->nkeys)
@@ -1041,7 +1041,7 @@ void keyring_gc(struct key *keyring, time_t limit)
new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *),
GFP_KERNEL);
if (!new)
- goto just_return;
+ goto nomem;
new->maxkeys = max;
new->nkeys = 0;
new->delkey = 0;
@@ -1081,7 +1081,21 @@ void keyring_gc(struct key *keyring, time_t limit)
discard_new:
new->nkeys = keep;
keyring_clear_rcu_disposal(&new->rcu);
+ up_write(&keyring->sem);
+ kleave(" [discard]");
+ return;
+
just_return:
up_write(&keyring->sem);
- kleave(" [no]");
+ kleave(" [no dead]");
+ return;
+
+no_klist:
+ up_write(&keyring->sem);
+ kleave(" [no_klist]");
+ return;
+
+nomem:
+ up_write(&keyring->sem);
+ kleave(" [oom]");
}
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 500aad0..3bb90b6 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -187,7 +187,7 @@ static inline void print_ipv6_addr(struct audit_buffer *ab,
char *name1, char *name2)
{
if (!ipv6_addr_any(addr))
- audit_log_format(ab, " %s=%pI6", name1, addr);
+ audit_log_format(ab, " %s=%pI6c", name1, addr);
if (port)
audit_log_format(ab, " %s=%d", name2, ntohs(port));
}
diff --git a/security/min_addr.c b/security/min_addr.c
index 14cc7b3..c844eed 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -28,12 +28,12 @@ static void update_mmap_min_addr(void)
* sysctl handler which just sets dac_mmap_min_addr = the new value and then
* calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly
*/
-int mmap_min_addr_handler(struct ctl_table *table, int write, struct file *filp,
+int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
- ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+ ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
update_mmap_min_addr();
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index e3d1901..b4b5da1 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -709,18 +709,16 @@ out:
}
/**
- * avc_ss_reset - Flush the cache and revalidate migrated permissions.
- * @seqno: policy sequence number
+ * avc_flush - Flush the cache
*/
-int avc_ss_reset(u32 seqno)
+static void avc_flush(void)
{
- struct avc_callback_node *c;
- int i, rc = 0, tmprc;
- unsigned long flag;
- struct avc_node *node;
struct hlist_head *head;
struct hlist_node *next;
+ struct avc_node *node;
spinlock_t *lock;
+ unsigned long flag;
+ int i;
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
head = &avc_cache.slots[i];
@@ -737,6 +735,18 @@ int avc_ss_reset(u32 seqno)
rcu_read_unlock();
spin_unlock_irqrestore(lock, flag);
}
+}
+
+/**
+ * avc_ss_reset - Flush the cache and revalidate migrated permissions.
+ * @seqno: policy sequence number
+ */
+int avc_ss_reset(u32 seqno)
+{
+ struct avc_callback_node *c;
+ int rc = 0, tmprc;
+
+ avc_flush();
for (c = avc_callbacks; c; c = c->next) {
if (c->events & AVC_CALLBACK_RESET) {
@@ -858,6 +868,19 @@ u32 avc_policy_seqno(void)
void avc_disable(void)
{
- if (avc_node_cachep)
- kmem_cache_destroy(avc_node_cachep);
+ /*
+ * If you are looking at this because you have realized that we are
+ * not destroying the avc_node_cachep it might be easy to fix, but
+ * I don't know the memory barrier semantics well enough to know. It's
+ * possible that some other task dereferenced security_ops when
+ * it still pointed to selinux operations. If that is the case it's
+ * possible that it is about to use the avc and is about to need the
+ * avc_node_cachep. I know I could wrap the security.c security_ops call
+ * in an rcu_lock, but seriously, it's not worth it. Instead I just flush
+ * the cache and get that memory back.
+ */
+ if (avc_node_cachep) {
+ avc_flush();
+ /* kmem_cache_destroy(avc_node_cachep); */
+ }
}
diff --git a/security/selinux/exports.c b/security/selinux/exports.c
index c73aeaa..c0a454a 100644
--- a/security/selinux/exports.c
+++ b/security/selinux/exports.c
@@ -63,3 +63,9 @@ void selinux_secmark_refcount_dec(void)
atomic_dec(&selinux_secmark_refcount);
}
EXPORT_SYMBOL_GPL(selinux_secmark_refcount_dec);
+
+bool selinux_is_enabled(void)
+{
+ return selinux_enabled;
+}
+EXPORT_SYMBOL_GPL(selinux_is_enabled);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 417f7c9..bb230d5 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2411,7 +2411,7 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
/* Wake up the parent if it is waiting so that it can recheck
* wait permission to the new task SID. */
read_lock(&tasklist_lock);
- wake_up_interruptible(&current->real_parent->signal->wait_chldexit);
+ __wake_up_parent(current, current->real_parent);
read_unlock(&tasklist_lock);
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index acae7ef4..c33b6bb 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -30,17 +30,11 @@
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <linux/audit.h>
+#include <linux/magic.h>
#include "smack.h"
#define task_security(task) (task_cred_xxx((task), security))
-/*
- * I hope these are the hokeyist lines of code in the module. Casey.
- */
-#define DEVPTS_SUPER_MAGIC 0x1cd1
-#define SOCKFS_MAGIC 0x534F434B
-#define TMPFS_MAGIC 0x01021994
-
/**
* smk_fetch - Fetch the smack label from a file.
* @ip: a pointer to the inode
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index f83a809..aeead75 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -187,7 +187,7 @@ static void load_seq_stop(struct seq_file *s, void *v)
/* No-op */
}
-static struct seq_operations load_seq_ops = {
+static const struct seq_operations load_seq_ops = {
.start = load_seq_start,
.next = load_seq_next,
.show = load_seq_show,
@@ -503,7 +503,7 @@ static void cipso_seq_stop(struct seq_file *s, void *v)
/* No-op */
}
-static struct seq_operations cipso_seq_ops = {
+static const struct seq_operations cipso_seq_ops = {
.start = cipso_seq_start,
.stop = cipso_seq_stop,
.next = cipso_seq_next,
@@ -697,7 +697,7 @@ static void netlbladdr_seq_stop(struct seq_file *s, void *v)
/* No-op */
}
-static struct seq_operations netlbladdr_seq_ops = {
+static const struct seq_operations netlbladdr_seq_ops = {
.start = netlbladdr_seq_start,
.stop = netlbladdr_seq_stop,
.next = netlbladdr_seq_next,
OpenPOWER on IntegriCloud