summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditsc.c9
-rw-r--r--kernel/cgroup.c4
-rw-r--r--kernel/cred.c4
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/kexec.c8
-rw-r--r--kernel/kfifo.c750
-rw-r--r--kernel/panic.c60
-rw-r--r--kernel/pid.c56
-rw-r--r--kernel/ptrace.c12
-rw-r--r--kernel/time/timekeeping.c11
-rw-r--r--kernel/trace/blktrace.c8
11 files changed, 563 insertions, 366 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index b87a63b..1b31c13 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1835,13 +1835,8 @@ void __audit_getname(const char *name)
context->names[context->name_count].ino = (unsigned long)-1;
context->names[context->name_count].osid = 0;
++context->name_count;
- if (!context->pwd.dentry) {
- read_lock(&current->fs->lock);
- context->pwd = current->fs->pwd;
- path_get(&current->fs->pwd);
- read_unlock(&current->fs->lock);
- }
-
+ if (!context->pwd.dentry)
+ get_fs_pwd(current->fs, &context->pwd);
}
/* audit_putname - intercept a putname request
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d83cab0..192f88c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1102,7 +1102,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
if (opts->release_agent)
return -EINVAL;
opts->release_agent =
- kstrndup(token + 14, PATH_MAX, GFP_KERNEL);
+ kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
if (!opts->release_agent)
return -ENOMEM;
} else if (!strncmp(token, "name=", 5)) {
@@ -1123,7 +1123,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
if (opts->name)
return -EINVAL;
opts->name = kstrndup(name,
- MAX_CGROUP_ROOT_NAMELEN,
+ MAX_CGROUP_ROOT_NAMELEN - 1,
GFP_KERNEL);
if (!opts->name)
return -ENOMEM;
diff --git a/kernel/cred.c b/kernel/cred.c
index 60bc8b1..9a3e226 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -22,10 +22,6 @@
#define kdebug(FMT, ...) \
printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__)
#else
-static inline __attribute__((format(printf, 1, 2)))
-void no_printk(const char *fmt, ...)
-{
-}
#define kdebug(FMT, ...) \
no_printk("[%-5.5s%5u] "FMT"\n", current->comm, current->pid ,##__VA_ARGS__)
#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index ceffc67..671ed56 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -771,9 +771,12 @@ static void forget_original_parent(struct task_struct *father)
struct task_struct *p, *n, *reaper;
LIST_HEAD(dead_children);
- exit_ptrace(father);
-
write_lock_irq(&tasklist_lock);
+ /*
+ * Note that exit_ptrace() and find_new_reaper() might
+ * drop tasklist_lock and reacquire it.
+ */
+ exit_ptrace(father);
reaper = find_new_reaper(father);
list_for_each_entry_safe(p, n, &father->children, sibling) {
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 131b170..c0613f7 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -151,8 +151,10 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
image->nr_segments = nr_segments;
segment_bytes = nr_segments * sizeof(*segments);
result = copy_from_user(image->segment, segments, segment_bytes);
- if (result)
+ if (result) {
+ result = -EFAULT;
goto out;
+ }
/*
* Verify we have good destination addresses. The caller is
@@ -827,7 +829,7 @@ static int kimage_load_normal_segment(struct kimage *image,
result = copy_from_user(ptr, buf, uchunk);
kunmap(page);
if (result) {
- result = (result < 0) ? result : -EIO;
+ result = -EFAULT;
goto out;
}
ubytes -= uchunk;
@@ -882,7 +884,7 @@ static int kimage_load_crash_segment(struct kimage *image,
kexec_flush_icache_page(page);
kunmap(page);
if (result) {
- result = (result < 0) ? result : -EIO;
+ result = -EFAULT;
goto out;
}
ubytes -= uchunk;
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 35edbe22..4502604 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -1,8 +1,7 @@
/*
- * A generic kernel FIFO implementation.
+ * A generic kernel FIFO implementation
*
- * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net>
- * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
+ * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,7 +10,7 @@
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
@@ -24,422 +23,579 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/kfifo.h>
#include <linux/log2.h>
#include <linux/uaccess.h>
+#include <linux/kfifo.h>
-static void _kfifo_init(struct kfifo *fifo, void *buffer,
- unsigned int size)
-{
- fifo->buffer = buffer;
- fifo->size = size;
-
- kfifo_reset(fifo);
-}
-
-/**
- * kfifo_init - initialize a FIFO using a preallocated buffer
- * @fifo: the fifo to assign the buffer
- * @buffer: the preallocated buffer to be used.
- * @size: the size of the internal buffer, this has to be a power of 2.
- *
+/*
+ * internal helper to calculate the unused elements in a fifo
*/
-void kfifo_init(struct kfifo *fifo, void *buffer, unsigned int size)
+static inline unsigned int kfifo_unused(struct __kfifo *fifo)
{
- /* size must be a power of 2 */
- BUG_ON(!is_power_of_2(size));
-
- _kfifo_init(fifo, buffer, size);
+ return (fifo->mask + 1) - (fifo->in - fifo->out);
}
-EXPORT_SYMBOL(kfifo_init);
-/**
- * kfifo_alloc - allocates a new FIFO internal buffer
- * @fifo: the fifo to assign then new buffer
- * @size: the size of the buffer to be allocated, this have to be a power of 2.
- * @gfp_mask: get_free_pages mask, passed to kmalloc()
- *
- * This function dynamically allocates a new fifo internal buffer
- *
- * The size will be rounded-up to a power of 2.
- * The buffer will be release with kfifo_free().
- * Return 0 if no error, otherwise the an error code
- */
-int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask)
+int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
+ size_t esize, gfp_t gfp_mask)
{
- unsigned char *buffer;
-
/*
- * round up to the next power of 2, since our 'let the indices
+ * round down to the next power of 2, since our 'let the indices
* wrap' technique works only in this case.
*/
- if (!is_power_of_2(size)) {
- BUG_ON(size > 0x80000000);
- size = roundup_pow_of_two(size);
+ if (!is_power_of_2(size))
+ size = rounddown_pow_of_two(size);
+
+ fifo->in = 0;
+ fifo->out = 0;
+ fifo->esize = esize;
+
+ if (size < 2) {
+ fifo->data = NULL;
+ fifo->mask = 0;
+ return -EINVAL;
}
- buffer = kmalloc(size, gfp_mask);
- if (!buffer) {
- _kfifo_init(fifo, NULL, 0);
+ fifo->data = kmalloc(size * esize, gfp_mask);
+
+ if (!fifo->data) {
+ fifo->mask = 0;
return -ENOMEM;
}
-
- _kfifo_init(fifo, buffer, size);
+ fifo->mask = size - 1;
return 0;
}
-EXPORT_SYMBOL(kfifo_alloc);
+EXPORT_SYMBOL(__kfifo_alloc);
-/**
- * kfifo_free - frees the FIFO internal buffer
- * @fifo: the fifo to be freed.
- */
-void kfifo_free(struct kfifo *fifo)
+void __kfifo_free(struct __kfifo *fifo)
{
- kfree(fifo->buffer);
- _kfifo_init(fifo, NULL, 0);
+ kfree(fifo->data);
+ fifo->in = 0;
+ fifo->out = 0;
+ fifo->esize = 0;
+ fifo->data = NULL;
+ fifo->mask = 0;
}
-EXPORT_SYMBOL(kfifo_free);
+EXPORT_SYMBOL(__kfifo_free);
-/**
- * kfifo_skip - skip output data
- * @fifo: the fifo to be used.
- * @len: number of bytes to skip
- */
-void kfifo_skip(struct kfifo *fifo, unsigned int len)
+int __kfifo_init(struct __kfifo *fifo, void *buffer,
+ unsigned int size, size_t esize)
{
- if (len < kfifo_len(fifo)) {
- __kfifo_add_out(fifo, len);
- return;
+ size /= esize;
+
+ if (!is_power_of_2(size))
+ size = rounddown_pow_of_two(size);
+
+ fifo->in = 0;
+ fifo->out = 0;
+ fifo->esize = esize;
+ fifo->data = buffer;
+
+ if (size < 2) {
+ fifo->mask = 0;
+ return -EINVAL;
}
- kfifo_reset_out(fifo);
+ fifo->mask = size - 1;
+
+ return 0;
}
-EXPORT_SYMBOL(kfifo_skip);
+EXPORT_SYMBOL(__kfifo_init);
-static inline void __kfifo_in_data(struct kfifo *fifo,
- const void *from, unsigned int len, unsigned int off)
+static void kfifo_copy_in(struct __kfifo *fifo, const void *src,
+ unsigned int len, unsigned int off)
{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
unsigned int l;
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ memcpy(fifo->data + off, src, l);
+ memcpy(fifo->data, src + l, len - l);
/*
- * Ensure that we sample the fifo->out index -before- we
- * start putting bytes into the kfifo.
+ * make sure that the data in the fifo is up to date before
+ * incrementing the fifo->in index counter
*/
+ smp_wmb();
+}
- smp_mb();
-
- off = __kfifo_off(fifo, fifo->in + off);
+unsigned int __kfifo_in(struct __kfifo *fifo,
+ const void *buf, unsigned int len)
+{
+ unsigned int l;
- /* first put the data starting from fifo->in to buffer end */
- l = min(len, fifo->size - off);
- memcpy(fifo->buffer + off, from, l);
+ l = kfifo_unused(fifo);
+ if (len > l)
+ len = l;
- /* then put the rest (if any) at the beginning of the buffer */
- memcpy(fifo->buffer, from + l, len - l);
+ kfifo_copy_in(fifo, buf, len, fifo->in);
+ fifo->in += len;
+ return len;
}
+EXPORT_SYMBOL(__kfifo_in);
-static inline void __kfifo_out_data(struct kfifo *fifo,
- void *to, unsigned int len, unsigned int off)
+static void kfifo_copy_out(struct __kfifo *fifo, void *dst,
+ unsigned int len, unsigned int off)
{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
unsigned int l;
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ memcpy(dst, fifo->data + off, l);
+ memcpy(dst + l, fifo->data, len - l);
/*
- * Ensure that we sample the fifo->in index -before- we
- * start removing bytes from the kfifo.
+ * make sure that the data is copied before
+ * incrementing the fifo->out index counter
*/
+ smp_wmb();
+}
- smp_rmb();
+unsigned int __kfifo_out_peek(struct __kfifo *fifo,
+ void *buf, unsigned int len)
+{
+ unsigned int l;
- off = __kfifo_off(fifo, fifo->out + off);
+ l = fifo->in - fifo->out;
+ if (len > l)
+ len = l;
- /* first get the data from fifo->out until the end of the buffer */
- l = min(len, fifo->size - off);
- memcpy(to, fifo->buffer + off, l);
+ kfifo_copy_out(fifo, buf, len, fifo->out);
+ return len;
+}
+EXPORT_SYMBOL(__kfifo_out_peek);
- /* then get the rest (if any) from the beginning of the buffer */
- memcpy(to + l, fifo->buffer, len - l);
+unsigned int __kfifo_out(struct __kfifo *fifo,
+ void *buf, unsigned int len)
+{
+ len = __kfifo_out_peek(fifo, buf, len);
+ fifo->out += len;
+ return len;
}
+EXPORT_SYMBOL(__kfifo_out);
-static inline int __kfifo_from_user_data(struct kfifo *fifo,
- const void __user *from, unsigned int len, unsigned int off,
- unsigned *lenout)
+static unsigned long kfifo_copy_from_user(struct __kfifo *fifo,
+ const void __user *from, unsigned int len, unsigned int off,
+ unsigned int *copied)
{
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
unsigned int l;
- int ret;
+ unsigned long ret;
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ ret = copy_from_user(fifo->data + off, from, l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret + len - l, esize);
+ else {
+ ret = copy_from_user(fifo->data, from + l, len - l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret, esize);
+ }
/*
- * Ensure that we sample the fifo->out index -before- we
- * start putting bytes into the kfifo.
+ * make sure that the data in the fifo is up to date before
+ * incrementing the fifo->in index counter
*/
+ smp_wmb();
+ *copied = len - ret;
+ /* return the number of elements which are not copied */
+ return ret;
+}
- smp_mb();
+int __kfifo_from_user(struct __kfifo *fifo, const void __user *from,
+ unsigned long len, unsigned int *copied)
+{
+ unsigned int l;
+ unsigned long ret;
+ unsigned int esize = fifo->esize;
+ int err;
- off = __kfifo_off(fifo, fifo->in + off);
+ if (esize != 1)
+ len /= esize;
- /* first put the data starting from fifo->in to buffer end */
- l = min(len, fifo->size - off);
- ret = copy_from_user(fifo->buffer + off, from, l);
- if (unlikely(ret)) {
- *lenout = ret;
- return -EFAULT;
- }
- *lenout = l;
+ l = kfifo_unused(fifo);
+ if (len > l)
+ len = l;
- /* then put the rest (if any) at the beginning of the buffer */
- ret = copy_from_user(fifo->buffer, from + l, len - l);
- *lenout += ret ? ret : len - l;
- return ret ? -EFAULT : 0;
+ ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied);
+ if (unlikely(ret)) {
+ len -= ret;
+ err = -EFAULT;
+ } else
+ err = 0;
+ fifo->in += len;
+ return err;
}
+EXPORT_SYMBOL(__kfifo_from_user);
-static inline int __kfifo_to_user_data(struct kfifo *fifo,
- void __user *to, unsigned int len, unsigned int off, unsigned *lenout)
+static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
+ unsigned int len, unsigned int off, unsigned int *copied)
{
unsigned int l;
- int ret;
-
+ unsigned long ret;
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ ret = copy_to_user(to, fifo->data + off, l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret + len - l, esize);
+ else {
+ ret = copy_to_user(to + l, fifo->data, len - l);
+ if (unlikely(ret))
+ ret = DIV_ROUND_UP(ret, esize);
+ }
/*
- * Ensure that we sample the fifo->in index -before- we
- * start removing bytes from the kfifo.
+ * make sure that the data is copied before
+ * incrementing the fifo->out index counter
*/
+ smp_wmb();
+ *copied = len - ret;
+ /* return the number of elements which are not copied */
+ return ret;
+}
- smp_rmb();
+int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
+ unsigned long len, unsigned int *copied)
+{
+ unsigned int l;
+ unsigned long ret;
+ unsigned int esize = fifo->esize;
+ int err;
- off = __kfifo_off(fifo, fifo->out + off);
+ if (esize != 1)
+ len /= esize;
- /* first get the data from fifo->out until the end of the buffer */
- l = min(len, fifo->size - off);
- ret = copy_to_user(to, fifo->buffer + off, l);
- *lenout = l;
+ l = fifo->in - fifo->out;
+ if (len > l)
+ len = l;
+ ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied);
if (unlikely(ret)) {
- *lenout -= ret;
- return -EFAULT;
- }
+ len -= ret;
+ err = -EFAULT;
+ } else
+ err = 0;
+ fifo->out += len;
+ return err;
+}
+EXPORT_SYMBOL(__kfifo_to_user);
- /* then get the rest (if any) from the beginning of the buffer */
- len -= l;
- ret = copy_to_user(to + l, fifo->buffer, len);
- if (unlikely(ret)) {
- *lenout += len - ret;
- return -EFAULT;
+static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
+ int nents, unsigned int len)
+{
+ int n;
+ unsigned int l;
+ unsigned int off;
+ struct page *page;
+
+ if (!nents)
+ return 0;
+
+ if (!len)
+ return 0;
+
+ n = 0;
+ page = virt_to_page(buf);
+ off = offset_in_page(buf);
+ l = 0;
+
+ while (len >= l + PAGE_SIZE - off) {
+ struct page *npage;
+
+ l += PAGE_SIZE;
+ buf += PAGE_SIZE;
+ npage = virt_to_page(buf);
+ if (page_to_phys(page) != page_to_phys(npage) - l) {
+ sg_set_page(sgl, page, l - off, off);
+ sgl = sg_next(sgl);
+ if (++n == nents || sgl == NULL)
+ return n;
+ page = npage;
+ len -= l - off;
+ l = off = 0;
+ }
}
- *lenout += len;
- return 0;
+ sg_set_page(sgl, page, len, off);
+ return n + 1;
}
-unsigned int __kfifo_in_n(struct kfifo *fifo,
- const void *from, unsigned int len, unsigned int recsize)
+static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
+ int nents, unsigned int len, unsigned int off)
{
- if (kfifo_avail(fifo) < len + recsize)
- return len + 1;
+ unsigned int size = fifo->mask + 1;
+ unsigned int esize = fifo->esize;
+ unsigned int l;
+ unsigned int n;
- __kfifo_in_data(fifo, from, len, recsize);
- return 0;
+ off &= fifo->mask;
+ if (esize != 1) {
+ off *= esize;
+ size *= esize;
+ len *= esize;
+ }
+ l = min(len, size - off);
+
+ n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
+ n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
+
+ if (n)
+ sg_mark_end(sgl + n - 1);
+ return n;
}
-EXPORT_SYMBOL(__kfifo_in_n);
-/**
- * kfifo_in - puts some data into the FIFO
- * @fifo: the fifo to be used.
- * @from: the data to be added.
- * @len: the length of the data to be added.
- *
- * This function copies at most @len bytes from the @from buffer into
- * the FIFO depending on the free space, and returns the number of
- * bytes copied.
- *
- * Note that with only one concurrent reader and one concurrent
- * writer, you don't need extra locking to use these functions.
- */
-unsigned int kfifo_in(struct kfifo *fifo, const void *from,
- unsigned int len)
+unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len)
{
- len = min(kfifo_avail(fifo), len);
+ unsigned int l;
- __kfifo_in_data(fifo, from, len, 0);
- __kfifo_add_in(fifo, len);
- return len;
+ l = kfifo_unused(fifo);
+ if (len > l)
+ len = l;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->in);
}
-EXPORT_SYMBOL(kfifo_in);
+EXPORT_SYMBOL(__kfifo_dma_in_prepare);
-unsigned int __kfifo_in_generic(struct kfifo *fifo,
- const void *from, unsigned int len, unsigned int recsize)
+unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len)
{
- return __kfifo_in_rec(fifo, from, len, recsize);
+ unsigned int l;
+
+ l = fifo->in - fifo->out;
+ if (len > l)
+ len = l;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->out);
}
-EXPORT_SYMBOL(__kfifo_in_generic);
+EXPORT_SYMBOL(__kfifo_dma_out_prepare);
-unsigned int __kfifo_out_n(struct kfifo *fifo,
- void *to, unsigned int len, unsigned int recsize)
+unsigned int __kfifo_max_r(unsigned int len, size_t recsize)
{
- if (kfifo_len(fifo) < len + recsize)
- return len;
+ unsigned int max = (1 << (recsize << 3)) - 1;
- __kfifo_out_data(fifo, to, len, recsize);
- __kfifo_add_out(fifo, len + recsize);
- return 0;
+ if (len > max)
+ return max;
+ return len;
}
-EXPORT_SYMBOL(__kfifo_out_n);
-/**
- * kfifo_out - gets some data from the FIFO
- * @fifo: the fifo to be used.
- * @to: where the data must be copied.
- * @len: the size of the destination buffer.
- *
- * This function copies at most @len bytes from the FIFO into the
- * @to buffer and returns the number of copied bytes.
- *
- * Note that with only one concurrent reader and one concurrent
- * writer, you don't need extra locking to use these functions.
+#define __KFIFO_PEEK(data, out, mask) \
+ ((data)[(out) & (mask)])
+/*
+ * __kfifo_peek_n internal helper function for determinate the length of
+ * the next record in the fifo
*/
-unsigned int kfifo_out(struct kfifo *fifo, void *to, unsigned int len)
+static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
{
- len = min(kfifo_len(fifo), len);
+ unsigned int l;
+ unsigned int mask = fifo->mask;
+ unsigned char *data = fifo->data;
- __kfifo_out_data(fifo, to, len, 0);
- __kfifo_add_out(fifo, len);
+ l = __KFIFO_PEEK(data, fifo->out, mask);
- return len;
+ if (--recsize)
+ l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8;
+
+ return l;
}
-EXPORT_SYMBOL(kfifo_out);
-
-/**
- * kfifo_out_peek - copy some data from the FIFO, but do not remove it
- * @fifo: the fifo to be used.
- * @to: where the data must be copied.
- * @len: the size of the destination buffer.
- * @offset: offset into the fifo
- *
- * This function copies at most @len bytes at @offset from the FIFO
- * into the @to buffer and returns the number of copied bytes.
- * The data is not removed from the FIFO.
+
+#define __KFIFO_POKE(data, in, mask, val) \
+ ( \
+ (data)[(in) & (mask)] = (unsigned char)(val) \
+ )
+
+/*
+ * __kfifo_poke_n internal helper function for storeing the length of
+ * the record into the fifo
*/
-unsigned int kfifo_out_peek(struct kfifo *fifo, void *to, unsigned int len,
- unsigned offset)
+static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
{
- len = min(kfifo_len(fifo), len + offset);
+ unsigned int mask = fifo->mask;
+ unsigned char *data = fifo->data;
- __kfifo_out_data(fifo, to, len, offset);
- return len;
+ __KFIFO_POKE(data, fifo->in, mask, n);
+
+ if (recsize > 1)
+ __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8);
}
-EXPORT_SYMBOL(kfifo_out_peek);
-unsigned int __kfifo_out_generic(struct kfifo *fifo,
- void *to, unsigned int len, unsigned int recsize,
- unsigned int *total)
+unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize)
{
- return __kfifo_out_rec(fifo, to, len, recsize, total);
+ return __kfifo_peek_n(fifo, recsize);
}
-EXPORT_SYMBOL(__kfifo_out_generic);
+EXPORT_SYMBOL(__kfifo_len_r);
-unsigned int __kfifo_from_user_n(struct kfifo *fifo,
- const void __user *from, unsigned int len, unsigned int recsize)
+unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf,
+ unsigned int len, size_t recsize)
{
- unsigned total;
+ if (len + recsize > kfifo_unused(fifo))
+ return 0;
- if (kfifo_avail(fifo) < len + recsize)
- return len + 1;
+ __kfifo_poke_n(fifo, len, recsize);
- __kfifo_from_user_data(fifo, from, len, recsize, &total);
- return total;
+ kfifo_copy_in(fifo, buf, len, fifo->in + recsize);
+ fifo->in += len + recsize;
+ return len;
}
-EXPORT_SYMBOL(__kfifo_from_user_n);
-
-/**
- * kfifo_from_user - puts some data from user space into the FIFO
- * @fifo: the fifo to be used.
- * @from: pointer to the data to be added.
- * @len: the length of the data to be added.
- * @total: the actual returned data length.
- *
- * This function copies at most @len bytes from the @from into the
- * FIFO depending and returns -EFAULT/0.
- *
- * Note that with only one concurrent reader and one concurrent
- * writer, you don't need extra locking to use these functions.
- */
-int kfifo_from_user(struct kfifo *fifo,
- const void __user *from, unsigned int len, unsigned *total)
+EXPORT_SYMBOL(__kfifo_in_r);
+
+static unsigned int kfifo_out_copy_r(struct __kfifo *fifo,
+ void *buf, unsigned int len, size_t recsize, unsigned int *n)
{
- int ret;
- len = min(kfifo_avail(fifo), len);
- ret = __kfifo_from_user_data(fifo, from, len, 0, total);
- if (ret)
- return ret;
- __kfifo_add_in(fifo, len);
- return 0;
+ *n = __kfifo_peek_n(fifo, recsize);
+
+ if (len > *n)
+ len = *n;
+
+ kfifo_copy_out(fifo, buf, len, fifo->out + recsize);
+ return len;
+}
+
+unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
+ unsigned int len, size_t recsize)
+{
+ unsigned int n;
+
+ if (fifo->in == fifo->out)
+ return 0;
+
+ return kfifo_out_copy_r(fifo, buf, len, recsize, &n);
}
-EXPORT_SYMBOL(kfifo_from_user);
+EXPORT_SYMBOL(__kfifo_out_peek_r);
-unsigned int __kfifo_from_user_generic(struct kfifo *fifo,
- const void __user *from, unsigned int len, unsigned int recsize)
+unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
+ unsigned int len, size_t recsize)
{
- return __kfifo_from_user_rec(fifo, from, len, recsize);
+ unsigned int n;
+
+ if (fifo->in == fifo->out)
+ return 0;
+
+ len = kfifo_out_copy_r(fifo, buf, len, recsize, &n);
+ fifo->out += n + recsize;
+ return len;
}
-EXPORT_SYMBOL(__kfifo_from_user_generic);
+EXPORT_SYMBOL(__kfifo_out_r);
-unsigned int __kfifo_to_user_n(struct kfifo *fifo,
- void __user *to, unsigned int len, unsigned int reclen,
- unsigned int recsize)
+int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
+ unsigned long len, unsigned int *copied, size_t recsize)
{
- unsigned int ret, total;
+ unsigned long ret;
- if (kfifo_len(fifo) < reclen + recsize)
- return len;
+ len = __kfifo_max_r(len, recsize);
- ret = __kfifo_to_user_data(fifo, to, reclen, recsize, &total);
+ if (len + recsize > kfifo_unused(fifo)) {
+ *copied = 0;
+ return 0;
+ }
- if (likely(ret == 0))
- __kfifo_add_out(fifo, reclen + recsize);
+ __kfifo_poke_n(fifo, len, recsize);
- return total;
+ ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied);
+ if (unlikely(ret)) {
+ *copied = 0;
+ return -EFAULT;
+ }
+ fifo->in += len + recsize;
+ return 0;
}
-EXPORT_SYMBOL(__kfifo_to_user_n);
-
-/**
- * kfifo_to_user - gets data from the FIFO and write it to user space
- * @fifo: the fifo to be used.
- * @to: where the data must be copied.
- * @len: the size of the destination buffer.
- * @lenout: pointer to output variable with copied data
- *
- * This function copies at most @len bytes from the FIFO into the
- * @to buffer and 0 or -EFAULT.
- *
- * Note that with only one concurrent reader and one concurrent
- * writer, you don't need extra locking to use these functions.
- */
-int kfifo_to_user(struct kfifo *fifo,
- void __user *to, unsigned int len, unsigned *lenout)
+EXPORT_SYMBOL(__kfifo_from_user_r);
+
+int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
+ unsigned long len, unsigned int *copied, size_t recsize)
{
- int ret;
- len = min(kfifo_len(fifo), len);
- ret = __kfifo_to_user_data(fifo, to, len, 0, lenout);
- __kfifo_add_out(fifo, *lenout);
- return ret;
+ unsigned long ret;
+ unsigned int n;
+
+ if (fifo->in == fifo->out) {
+ *copied = 0;
+ return 0;
+ }
+
+ n = __kfifo_peek_n(fifo, recsize);
+ if (len > n)
+ len = n;
+
+ ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied);
+ if (unlikely(ret)) {
+ *copied = 0;
+ return -EFAULT;
+ }
+ fifo->out += n + recsize;
+ return 0;
}
-EXPORT_SYMBOL(kfifo_to_user);
+EXPORT_SYMBOL(__kfifo_to_user_r);
-unsigned int __kfifo_to_user_generic(struct kfifo *fifo,
- void __user *to, unsigned int len, unsigned int recsize,
- unsigned int *total)
+unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
- return __kfifo_to_user_rec(fifo, to, len, recsize, total);
+ if (!nents)
+ BUG();
+
+ len = __kfifo_max_r(len, recsize);
+
+ if (len + recsize > kfifo_unused(fifo))
+ return 0;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
}
-EXPORT_SYMBOL(__kfifo_to_user_generic);
+EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
-unsigned int __kfifo_peek_generic(struct kfifo *fifo, unsigned int recsize)
+void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
+ unsigned int len, size_t recsize)
{
- if (recsize == 0)
- return kfifo_avail(fifo);
-
- return __kfifo_peek_n(fifo, recsize);
+ len = __kfifo_max_r(len, recsize);
+ __kfifo_poke_n(fifo, len, recsize);
+ fifo->in += len + recsize;
}
-EXPORT_SYMBOL(__kfifo_peek_generic);
+EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
-void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize)
+unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
- __kfifo_skip_rec(fifo, recsize);
+ if (!nents)
+ BUG();
+
+ len = __kfifo_max_r(len, recsize);
+
+ if (len + recsize > fifo->in - fifo->out)
+ return 0;
+
+ return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
}
-EXPORT_SYMBOL(__kfifo_skip_generic);
+EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
+
+void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
+{
+ unsigned int len;
+ len = __kfifo_peek_n(fifo, recsize);
+ fifo->out += len + recsize;
+}
+EXPORT_SYMBOL(__kfifo_dma_out_finish_r);
diff --git a/kernel/panic.c b/kernel/panic.c
index 3b16cd9..4c13b1a 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -24,6 +24,9 @@
#include <linux/nmi.h>
#include <linux/dmi.h>
+#define PANIC_TIMER_STEP 100
+#define PANIC_BLINK_SPD 18
+
int panic_on_oops;
static unsigned long tainted_mask;
static int pause_on_oops;
@@ -36,36 +39,15 @@ ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
EXPORT_SYMBOL(panic_notifier_list);
-/* Returns how long it waited in ms */
-long (*panic_blink)(long time);
-EXPORT_SYMBOL(panic_blink);
-
-static void panic_blink_one_second(void)
+static long no_blink(int state)
{
- static long i = 0, end;
-
- if (panic_blink) {
- end = i + MSEC_PER_SEC;
-
- while (i < end) {
- i += panic_blink(i);
- mdelay(1);
- i++;
- }
- } else {
- /*
- * When running under a hypervisor a small mdelay may get
- * rounded up to the hypervisor timeslice. For example, with
- * a 1ms in 10ms hypervisor timeslice we might inflate a
- * mdelay(1) loop by 10x.
- *
- * If we have nothing to blink, spin on 1 second calls to
- * mdelay to avoid this.
- */
- mdelay(MSEC_PER_SEC);
- }
+ return 0;
}
+/* Returns how long it waited in ms */
+long (*panic_blink)(int state);
+EXPORT_SYMBOL(panic_blink);
+
/**
* panic - halt the system
* @fmt: The text string to print
@@ -78,7 +60,8 @@ NORET_TYPE void panic(const char * fmt, ...)
{
static char buf[1024];
va_list args;
- long i;
+ long i, i_next = 0;
+ int state = 0;
/*
* It's possible to come here directly from a panic-assertion and
@@ -117,6 +100,9 @@ NORET_TYPE void panic(const char * fmt, ...)
bust_spinlocks(0);
+ if (!panic_blink)
+ panic_blink = no_blink;
+
if (panic_timeout > 0) {
/*
* Delay timeout seconds before rebooting the machine.
@@ -124,9 +110,13 @@ NORET_TYPE void panic(const char * fmt, ...)
*/
printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout);
- for (i = 0; i < panic_timeout; i++) {
+ for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
touch_nmi_watchdog();
- panic_blink_one_second();
+ if (i >= i_next) {
+ i += panic_blink(state ^= 1);
+ i_next = i + 3600 / PANIC_BLINK_SPD;
+ }
+ mdelay(PANIC_TIMER_STEP);
}
/*
* This will not be a clean reboot, with everything
@@ -152,9 +142,13 @@ NORET_TYPE void panic(const char * fmt, ...)
}
#endif
local_irq_enable();
- while (1) {
+ for (i = 0; ; i += PANIC_TIMER_STEP) {
touch_softlockup_watchdog();
- panic_blink_one_second();
+ if (i >= i_next) {
+ i += panic_blink(state ^= 1);
+ i_next = i + 3600 / PANIC_BLINK_SPD;
+ }
+ mdelay(PANIC_TIMER_STEP);
}
}
@@ -344,7 +338,7 @@ static int init_oops_id(void)
}
late_initcall(init_oops_id);
-static void print_oops_end_marker(void)
+void print_oops_end_marker(void)
{
init_oops_id();
printk(KERN_WARNING "---[ end trace %016llx ]---\n",
diff --git a/kernel/pid.c b/kernel/pid.c
index e9fd8c1..d55c6fb 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -122,6 +122,43 @@ static void free_pidmap(struct upid *upid)
atomic_inc(&map->nr_free);
}
+/*
+ * If we started walking pids at 'base', is 'a' seen before 'b'?
+ */
+static int pid_before(int base, int a, int b)
+{
+ /*
+ * This is the same as saying
+ *
+ * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
+ * and that mapping orders 'a' and 'b' with respect to 'base'.
+ */
+ return (unsigned)(a - base) < (unsigned)(b - base);
+}
+
+/*
+ * We might be racing with someone else trying to set pid_ns->last_pid.
+ * We want the winner to have the "later" value, because if the
+ * "earlier" value prevails, then a pid may get reused immediately.
+ *
+ * Since pids rollover, it is not sufficient to just pick the bigger
+ * value. We have to consider where we started counting from.
+ *
+ * 'base' is the value of pid_ns->last_pid that we observed when
+ * we started looking for a pid.
+ *
+ * 'pid' is the pid that we eventually found.
+ */
+static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
+{
+ int prev;
+ int last_write = base;
+ do {
+ prev = last_write;
+ last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
+ } while ((prev != last_write) && (pid_before(base, last_write, pid)));
+}
+
static int alloc_pidmap(struct pid_namespace *pid_ns)
{
int i, offset, max_scan, pid, last = pid_ns->last_pid;
@@ -132,7 +169,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
pid = RESERVED_PIDS;
offset = pid & BITS_PER_PAGE_MASK;
map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
- max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
+ /*
+ * If last_pid points into the middle of the map->page we
+ * want to scan this bitmap block twice, the second time
+ * we start with offset == 0 (or RESERVED_PIDS).
+ */
+ max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
for (i = 0; i <= max_scan; ++i) {
if (unlikely(!map->page)) {
void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
@@ -154,20 +196,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
do {
if (!test_and_set_bit(offset, map->page)) {
atomic_dec(&map->nr_free);
- pid_ns->last_pid = pid;
+ set_last_pid(pid_ns, last, pid);
return pid;
}
offset = find_next_offset(map, offset);
pid = mk_pid(pid_ns, map, offset);
- /*
- * find_next_offset() found a bit, the pid from it
- * is in-bounds, and if we fell back to the last
- * bitmap block and the final block was the same
- * as the starting point, pid is before last_pid.
- */
- } while (offset < BITS_PER_PAGE && pid < pid_max &&
- (i != max_scan || pid < last ||
- !((last+1) & BITS_PER_PAGE_MASK)));
+ } while (offset < BITS_PER_PAGE && pid < pid_max);
}
if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
++map;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 74a3d69..f34d798 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -324,26 +324,32 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
}
/*
- * Detach all tasks we were using ptrace on.
+ * Detach all tasks we were using ptrace on. Called with tasklist held
+ * for writing, and returns with it held too. But note it can release
+ * and reacquire the lock.
*/
void exit_ptrace(struct task_struct *tracer)
{
struct task_struct *p, *n;
LIST_HEAD(ptrace_dead);
- write_lock_irq(&tasklist_lock);
+ if (likely(list_empty(&tracer->ptraced)))
+ return;
+
list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
if (__ptrace_detach(tracer, p))
list_add(&p->ptrace_entry, &ptrace_dead);
}
- write_unlock_irq(&tasklist_lock);
+ write_unlock_irq(&tasklist_lock);
BUG_ON(!list_empty(&tracer->ptraced));
list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
list_del_init(&p->ptrace_entry);
release_task(p);
}
+
+ write_lock_irq(&tasklist_lock);
}
int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e14c839..e960d82 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -690,6 +690,7 @@ static void timekeeping_adjust(s64 offset)
static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
{
u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
+ u64 raw_nsecs;
/* If the offset is smaller then a shifted interval, do nothing */
if (offset < timekeeper.cycle_interval<<shift)
@@ -706,12 +707,14 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
second_overflow();
}
- /* Accumulate into raw time */
- raw_time.tv_nsec += timekeeper.raw_interval << shift;;
- while (raw_time.tv_nsec >= NSEC_PER_SEC) {
- raw_time.tv_nsec -= NSEC_PER_SEC;
+ /* Accumulate raw time */
+ raw_nsecs = timekeeper.raw_interval << shift;
+ raw_nsecs += raw_time.tv_nsec;
+ while (raw_nsecs >= NSEC_PER_SEC) {
+ raw_nsecs -= NSEC_PER_SEC;
raw_time.tv_sec++;
}
+ raw_time.tv_nsec = raw_nsecs;
/* Accumulate error between NTP and clock interval */
timekeeper.ntp_error += tick_length << shift;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 82499a5..959f8d6 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -710,6 +710,9 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (rq->cmd_flags & REQ_DISCARD)
rw |= REQ_DISCARD;
+ if (rq->cmd_flags & REQ_SECURE)
+ rw |= REQ_SECURE;
+
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
@@ -1816,6 +1819,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
rwbs[i++] = 'S';
if (rw & REQ_META)
rwbs[i++] = 'M';
+ if (rw & REQ_SECURE)
+ rwbs[i++] = 'E';
rwbs[i] = '\0';
}
@@ -1828,6 +1833,9 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
if (rq->cmd_flags & REQ_DISCARD)
rw |= REQ_DISCARD;
+ if (rq->cmd_flags & REQ_SECURE)
+ rw |= REQ_SECURE;
+
bytes = blk_rq_bytes(rq);
blk_fill_rwbs(rwbs, rw, bytes);
OpenPOWER on IntegriCloud