summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS2
-rw-r--r--MAINTAINERS2
-rw-r--r--arch/i386/Kconfig1
-rw-r--r--arch/i386/kernel/cpu/amd.c8
-rw-r--r--arch/i386/kernel/cpu/centaur.c8
-rw-r--r--arch/i386/kernel/cpu/common.c11
-rw-r--r--arch/i386/kernel/cpu/cyrix.c18
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c1
-rw-r--r--arch/i386/kernel/cpu/nexgen.c8
-rw-r--r--arch/i386/kernel/cpu/rise.c8
-rw-r--r--arch/i386/kernel/cpu/transmeta.c10
-rw-r--r--arch/i386/kernel/cpu/umc.c8
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--arch/i386/kernel/process.c6
-rw-r--r--arch/i386/kernel/traps.c9
-rw-r--r--arch/x86_64/kernel/smpboot.c3
-rw-r--r--block/elevator.c3
-rw-r--r--block/ll_rw_blk.c40
-rw-r--r--drivers/block/Kconfig8
-rw-r--r--drivers/block/pktcdvd.c58
-rw-r--r--drivers/message/i2o/i2o_scsi.c2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--fs/file.c3
-rw-r--r--fs/fuse/dev.c40
-rw-r--r--fs/jbd/transaction.c10
-rw-r--r--fs/namei.c37
-rw-r--r--include/asm-i386/system.h2
-rw-r--r--include/asm-x86_64/numa.h7
-rw-r--r--include/asm-x86_64/system.h2
-rw-r--r--include/linux/jbd.h4
-rw-r--r--include/linux/netfilter_ipv4/ipt_connbytes.h4
-rw-r--r--include/linux/netfilter_ipv4/ipt_policy.h22
-rw-r--r--include/linux/netfilter_ipv6/ip6t_policy.h22
-rw-r--r--include/linux/pktcdvd.h8
-rw-r--r--include/linux/reiserfs_acl.h6
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h15
-rw-r--r--kernel/intermodule.c3
-rw-r--r--kernel/sched.c16
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/slab.c169
-rw-r--r--net/bridge/netfilter/ebt_ulog.c10
-rw-r--r--net/bridge/netfilter/ebtables.c7
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/utils.c4
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c3
-rw-r--r--net/ipv4/netfilter/ip_conntrack_tftp.c1
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c7
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c26
-rw-r--r--net/ipv4/netfilter/ipt_policy.c11
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c7
-rw-r--r--net/ipv6/netfilter/ip6t_policy.c7
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nfnetlink_log.c20
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/socket.c2
-rw-r--r--scripts/kconfig/Makefile12
-rw-r--r--security/selinux/Kconfig2
-rw-r--r--security/selinux/Makefile4
-rw-r--r--security/selinux/hooks.c21
67 files changed, 499 insertions, 281 deletions
diff --git a/CREDITS b/CREDITS
index 8e577ce..6957ef4 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3101,7 +3101,7 @@ S: Minto, NSW, 2566
S: Australia
N: Stephen Smalley
-E: sds@epoch.ncsc.mil
+E: sds@tycho.nsa.gov
D: portions of the Linux Security Module (LSM) framework and security modules
N: Chris Smith
diff --git a/MAINTAINERS b/MAINTAINERS
index b6cbac5..11d44da 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2298,7 +2298,7 @@ S: Supported
SELINUX SECURITY MODULE
P: Stephen Smalley
-M: sds@epoch.ncsc.mil
+M: sds@tycho.nsa.gov
P: James Morris
M: jmorris@namei.org
L: linux-kernel@vger.kernel.org (kernel issues)
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index d86c865..0afec85 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -442,6 +442,7 @@ config HIGHMEM4G
config HIGHMEM64G
bool "64GB"
+ depends on X86_CMPXCHG64
help
Select this if you have a 32-bit processor and more than 4
gigabytes of physical RAM.
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 333578a..0810f81f 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -282,3 +282,11 @@ int __init amd_init_cpu(void)
}
//early_arch_initcall(amd_init_cpu);
+
+static int __init amd_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_AMD] = NULL;
+ return 0;
+}
+
+late_initcall(amd_exit_cpu);
diff --git a/arch/i386/kernel/cpu/centaur.c b/arch/i386/kernel/cpu/centaur.c
index 0dd92a2..f52669e 100644
--- a/arch/i386/kernel/cpu/centaur.c
+++ b/arch/i386/kernel/cpu/centaur.c
@@ -470,3 +470,11 @@ int __init centaur_init_cpu(void)
}
//early_arch_initcall(centaur_init_cpu);
+
+static int __init centaur_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_CENTAUR] = NULL;
+ return 0;
+}
+
+late_initcall(centaur_exit_cpu);
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 15aee26..7eb9213 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -44,6 +44,7 @@ static void default_init(struct cpuinfo_x86 * c)
static struct cpu_dev default_cpu = {
.c_init = default_init,
+ .c_vendor = "Unknown",
};
static struct cpu_dev * this_cpu = &default_cpu;
@@ -150,6 +151,7 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
{
char *v = c->x86_vendor_id;
int i;
+ static int printed;
for (i = 0; i < X86_VENDOR_NUM; i++) {
if (cpu_devs[i]) {
@@ -159,10 +161,17 @@ static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
c->x86_vendor = i;
if (!early)
this_cpu = cpu_devs[i];
- break;
+ return;
}
}
}
+ if (!printed) {
+ printed++;
+ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
+ printk(KERN_ERR "CPU: Your system may be unstable.\n");
+ }
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ this_cpu = &default_cpu;
}
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index 7501597..00f2e05 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -345,7 +345,7 @@ static void __init init_cyrix(struct cpuinfo_x86 *c)
/*
* Handle National Semiconductor branded processors
*/
-static void __devinit init_nsc(struct cpuinfo_x86 *c)
+static void __init init_nsc(struct cpuinfo_x86 *c)
{
/* There may be GX1 processors in the wild that are branded
* NSC and not Cyrix.
@@ -444,6 +444,14 @@ int __init cyrix_init_cpu(void)
//early_arch_initcall(cyrix_init_cpu);
+static int __init cyrix_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_CYRIX] = NULL;
+ return 0;
+}
+
+late_initcall(cyrix_exit_cpu);
+
static struct cpu_dev nsc_cpu_dev __initdata = {
.c_vendor = "NSC",
.c_ident = { "Geode by NSC" },
@@ -458,3 +466,11 @@ int __init nsc_init_cpu(void)
}
//early_arch_initcall(nsc_init_cpu);
+
+static int __init nsc_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_NSC] = NULL;
+ return 0;
+}
+
+late_initcall(nsc_exit_cpu);
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index af591c7..ffe58ce 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -152,6 +152,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
return 0;
}
+/* will only be called once; __init is safe here */
static int __init find_num_cache_leaves(void)
{
unsigned int eax, ebx, ecx, edx;
diff --git a/arch/i386/kernel/cpu/nexgen.c b/arch/i386/kernel/cpu/nexgen.c
index 30898a2..ad87fa5 100644
--- a/arch/i386/kernel/cpu/nexgen.c
+++ b/arch/i386/kernel/cpu/nexgen.c
@@ -61,3 +61,11 @@ int __init nexgen_init_cpu(void)
}
//early_arch_initcall(nexgen_init_cpu);
+
+static int __init nexgen_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_NEXGEN] = NULL;
+ return 0;
+}
+
+late_initcall(nexgen_exit_cpu);
diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c
index 8602425..d08d5a2 100644
--- a/arch/i386/kernel/cpu/rise.c
+++ b/arch/i386/kernel/cpu/rise.c
@@ -51,3 +51,11 @@ int __init rise_init_cpu(void)
}
//early_arch_initcall(rise_init_cpu);
+
+static int __init rise_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_RISE] = NULL;
+ return 0;
+}
+
+late_initcall(rise_exit_cpu);
diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c
index fc42638..bdbeb77 100644
--- a/arch/i386/kernel/cpu/transmeta.c
+++ b/arch/i386/kernel/cpu/transmeta.c
@@ -84,7 +84,7 @@ static void __init init_transmeta(struct cpuinfo_x86 *c)
#endif
}
-static void transmeta_identify(struct cpuinfo_x86 * c)
+static void __init transmeta_identify(struct cpuinfo_x86 * c)
{
u32 xlvl;
generic_identify(c);
@@ -111,3 +111,11 @@ int __init transmeta_init_cpu(void)
}
//early_arch_initcall(transmeta_init_cpu);
+
+static int __init transmeta_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_TRANSMETA] = NULL;
+ return 0;
+}
+
+late_initcall(transmeta_exit_cpu);
diff --git a/arch/i386/kernel/cpu/umc.c b/arch/i386/kernel/cpu/umc.c
index 264fcad..2cd988f 100644
--- a/arch/i386/kernel/cpu/umc.c
+++ b/arch/i386/kernel/cpu/umc.c
@@ -31,3 +31,11 @@ int __init umc_init_cpu(void)
}
//early_arch_initcall(umc_init_cpu);
+
+static int __init umc_exit_cpu(void)
+{
+ cpu_devs[X86_VENDOR_UMC] = NULL;
+ return 0;
+}
+
+late_initcall(umc_exit_cpu);
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index d661703..63f39a7 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -138,7 +138,7 @@ static int __init check_nmi_watchdog(void)
if (nmi_watchdog == NMI_LOCAL_APIC)
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for_each_cpu(cpu)
prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count;
local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 2185377..0480454 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -297,8 +297,10 @@ void show_regs(struct pt_regs * regs)
if (user_mode(regs))
printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
- printk(" EFLAGS: %08lx %s (%s)\n",
- regs->eflags, print_tainted(), system_utsname.release);
+ printk(" EFLAGS: %08lx %s (%s %.*s)\n",
+ regs->eflags, print_tainted(), system_utsname.release,
+ (int)strcspn(system_utsname.version, " "),
+ system_utsname.version);
printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
regs->eax,regs->ebx,regs->ecx,regs->edx);
printk("ESI: %08lx EDI: %08lx EBP: %08lx",
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 0aaebf3..b814dbd 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -166,7 +166,8 @@ static void show_trace_log_lvl(struct task_struct *task,
stack = (unsigned long*)context->previous_esp;
if (!stack)
break;
- printk(KERN_EMERG " =======================\n");
+ printk(log_lvl);
+ printk(" =======================\n");
}
}
@@ -239,9 +240,11 @@ void show_registers(struct pt_regs *regs)
}
print_modules();
printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
- "EFLAGS: %08lx (%s) \n",
+ "EFLAGS: %08lx (%s %.*s) \n",
smp_processor_id(), 0xffff & regs->xcs, regs->eip,
- print_tainted(), regs->eflags, system_utsname.release);
+ print_tainted(), regs->eflags, system_utsname.release,
+ (int)strcspn(system_utsname.version, " "),
+ system_utsname.version);
print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
regs->eax, regs->ebx, regs->ecx, regs->edx);
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 67e4e28..a28756e 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -59,7 +59,6 @@
#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
-#include <asm/numa.h>
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -891,7 +890,6 @@ do_rest:
if (boot_error) {
cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
- clear_node_cpumask(cpu); /* was set by numa_add_cpu */
cpu_clear(cpu, cpu_present_map);
cpu_clear(cpu, cpu_possible_map);
x86_cpu_to_apicid[cpu] = BAD_APICID;
@@ -1189,7 +1187,6 @@ void remove_cpu_from_maps(void)
cpu_clear(cpu, cpu_callout_map);
cpu_clear(cpu, cpu_callin_map);
clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
- clear_node_cpumask(cpu);
}
int __cpu_disable(void)
diff --git a/block/elevator.c b/block/elevator.c
index 96a61e0..2fc269f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -323,7 +323,8 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
/*
* toggle ordered color
*/
- q->ordcolor ^= 1;
+ if (blk_barrier_rq(rq))
+ q->ordcolor ^= 1;
/*
* barriers implicitly indicate back insertion
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index f9fc07e..ee5ed98 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -508,7 +508,7 @@ static inline struct request *start_ordered(request_queue_t *q,
int blk_do_ordered(request_queue_t *q, struct request **rqp)
{
- struct request *rq = *rqp, *allowed_rq;
+ struct request *rq = *rqp;
int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
if (!q->ordseq) {
@@ -532,32 +532,26 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
}
}
+ /*
+ * Ordered sequence in progress
+ */
+
+ /* Special requests are not subject to ordering rules. */
+ if (!blk_fs_request(rq) &&
+ rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
+ return 1;
+
if (q->ordered & QUEUE_ORDERED_TAG) {
+ /* Ordered by tag. Blocking the next barrier is enough. */
if (is_barrier && rq != &q->bar_rq)
*rqp = NULL;
- return 1;
- }
-
- switch (blk_ordered_cur_seq(q)) {
- case QUEUE_ORDSEQ_PREFLUSH:
- allowed_rq = &q->pre_flush_rq;
- break;
- case QUEUE_ORDSEQ_BAR:
- allowed_rq = &q->bar_rq;
- break;
- case QUEUE_ORDSEQ_POSTFLUSH:
- allowed_rq = &q->post_flush_rq;
- break;
- default:
- allowed_rq = NULL;
- break;
+ } else {
+ /* Ordered by draining. Wait for turn. */
+ WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
+ if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
+ *rqp = NULL;
}
- if (rq != allowed_rq &&
- (blk_fs_request(rq) || rq == &q->pre_flush_rq ||
- rq == &q->post_flush_rq))
- *rqp = NULL;
-
return 1;
}
@@ -3453,7 +3447,7 @@ int __init blk_dev_init(void)
iocontext_cachep = kmem_cache_create("blkdev_ioc",
sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
- for (i = 0; i < NR_CPUS; i++)
+ for_each_cpu(i)
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 139cbba..8b13316 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -433,12 +433,12 @@ config CDROM_PKTCDVD_BUFFERS
This controls the maximum number of active concurrent packets. More
concurrent packets can increase write performance, but also require
more memory. Each concurrent packet will require approximately 64Kb
- of non-swappable kernel memory, memory which will be allocated at
- pktsetup time.
+ of non-swappable kernel memory, memory which will be allocated when
+ a disc is opened for writing.
config CDROM_PKTCDVD_WCACHE
- bool "Enable write caching"
- depends on CDROM_PKTCDVD
+ bool "Enable write caching (EXPERIMENTAL)"
+ depends on CDROM_PKTCDVD && EXPERIMENTAL
help
If enabled, write caching will be set for the CD-R/W device. For now
this option is dangerous unless the CD-RW media is known good, as we
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 93affee..4e7dbcc 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -43,8 +43,6 @@
*
*************************************************************************/
-#define VERSION_CODE "v0.2.0a 2004-07-14 Jens Axboe (axboe@suse.de) and petero2@telia.com"
-
#include <linux/pktcdvd.h>
#include <linux/config.h>
#include <linux/module.h>
@@ -131,7 +129,7 @@ static struct bio *pkt_bio_alloc(int nr_iovecs)
/*
* Allocate a packet_data struct
*/
-static struct packet_data *pkt_alloc_packet_data(void)
+static struct packet_data *pkt_alloc_packet_data(int frames)
{
int i;
struct packet_data *pkt;
@@ -140,11 +138,12 @@ static struct packet_data *pkt_alloc_packet_data(void)
if (!pkt)
goto no_pkt;
- pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE);
+ pkt->frames = frames;
+ pkt->w_bio = pkt_bio_alloc(frames);
if (!pkt->w_bio)
goto no_bio;
- for (i = 0; i < PAGES_PER_PACKET; i++) {
+ for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!pkt->pages[i])
goto no_page;
@@ -152,7 +151,7 @@ static struct packet_data *pkt_alloc_packet_data(void)
spin_lock_init(&pkt->lock);
- for (i = 0; i < PACKET_MAX_SIZE; i++) {
+ for (i = 0; i < frames; i++) {
struct bio *bio = pkt_bio_alloc(1);
if (!bio)
goto no_rd_bio;
@@ -162,14 +161,14 @@ static struct packet_data *pkt_alloc_packet_data(void)
return pkt;
no_rd_bio:
- for (i = 0; i < PACKET_MAX_SIZE; i++) {
+ for (i = 0; i < frames; i++) {
struct bio *bio = pkt->r_bios[i];
if (bio)
bio_put(bio);
}
no_page:
- for (i = 0; i < PAGES_PER_PACKET; i++)
+ for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
if (pkt->pages[i])
__free_page(pkt->pages[i]);
bio_put(pkt->w_bio);
@@ -186,12 +185,12 @@ static void pkt_free_packet_data(struct packet_data *pkt)
{
int i;
- for (i = 0; i < PACKET_MAX_SIZE; i++) {
+ for (i = 0; i < pkt->frames; i++) {
struct bio *bio = pkt->r_bios[i];
if (bio)
bio_put(bio);
}
- for (i = 0; i < PAGES_PER_PACKET; i++)
+ for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
__free_page(pkt->pages[i]);
bio_put(pkt->w_bio);
kfree(pkt);
@@ -206,17 +205,17 @@ static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
pkt_free_packet_data(pkt);
}
+ INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
}
static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
{
struct packet_data *pkt;
- INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
- INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
- spin_lock_init(&pd->cdrw.active_list_lock);
+ BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
+
while (nr_packets > 0) {
- pkt = pkt_alloc_packet_data();
+ pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
if (!pkt) {
pkt_shrink_pktlist(pd);
return 0;
@@ -951,7 +950,7 @@ try_next_bio:
pd->current_sector = zone + pd->settings.size;
pkt->sector = zone;
- pkt->frames = pd->settings.size >> 2;
+ BUG_ON(pkt->frames != pd->settings.size >> 2);
pkt->write_size = 0;
/*
@@ -1639,7 +1638,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
if (pd->settings.size == 0) {
printk("pktcdvd: detected zero packet size!\n");
- pd->settings.size = 128;
+ return -ENXIO;
}
if (pd->settings.size > PACKET_MAX_SECTORS) {
printk("pktcdvd: packet size is too big\n");
@@ -1987,8 +1986,14 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
if ((ret = pkt_set_segment_merging(pd, q)))
goto out_unclaim;
- if (write)
+ if (write) {
+ if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
+ printk("pktcdvd: not enough memory for buffers\n");
+ ret = -ENOMEM;
+ goto out_unclaim;
+ }
printk("pktcdvd: %lukB available on disc\n", lba << 1);
+ }
return 0;
@@ -2014,6 +2019,8 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
bd_release(pd->bdev);
blkdev_put(pd->bdev);
+
+ pkt_shrink_pktlist(pd);
}
static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
@@ -2379,12 +2386,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
- if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
- printk("pktcdvd: not enough memory for buffers\n");
- ret = -ENOMEM;
- goto out_mem;
- }
-
pd->bdev = bdev;
set_blocksize(bdev, CD_FRAMESIZE);
@@ -2395,7 +2396,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
if (IS_ERR(pd->cdrw.thread)) {
printk("pktcdvd: can't start kernel thread\n");
ret = -ENOMEM;
- goto out_thread;
+ goto out_mem;
}
proc = create_proc_entry(pd->name, 0, pkt_proc);
@@ -2406,8 +2407,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
return 0;
-out_thread:
- pkt_shrink_pktlist(pd);
out_mem:
blkdev_put(bdev);
/* This is safe: open() is still holding a reference. */
@@ -2503,6 +2502,10 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
goto out_mem;
pd->disk = disk;
+ INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+ INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
+ spin_lock_init(&pd->cdrw.active_list_lock);
+
spin_lock_init(&pd->lock);
spin_lock_init(&pd->iosched.lock);
sprintf(pd->name, "pktcdvd%d", idx);
@@ -2567,8 +2570,6 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
blkdev_put(pd->bdev);
- pkt_shrink_pktlist(pd);
-
remove_proc_entry(pd->name, pkt_proc);
DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
@@ -2678,7 +2679,6 @@ static int __init pkt_init(void)
pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
- DPRINTK("pktcdvd: %s\n", VERSION_CODE);
return 0;
out:
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index f9e5a23..c08ddac 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -732,7 +732,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
- if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
+ if (!i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
status = SUCCESS;
return status;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 245ca99..c551bb8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1245,7 +1245,7 @@ static int __init init_scsi(void)
if (error)
goto cleanup_sysctl;
- for (i = 0; i < NR_CPUS; i++)
+ for_each_cpu(i)
INIT_LIST_HEAD(&per_cpu(scsi_done_q, i));
devfs_mk_dir("scsi");
diff --git a/fs/file.c b/fs/file.c
index fd066b2..cea7cbe 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -379,7 +379,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
void __init files_defer_init(void)
{
int i;
- /* Really early - can't use for_each_cpu */
- for (i = 0; i < NR_CPUS; i++)
+ for_each_cpu(i)
fdtable_defer_list_init(i);
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 4526da8..f556a0d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -120,9 +120,9 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc)
return do_get_request(fc);
}
+/* Must be called with fuse_lock held */
static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
{
- spin_lock(&fuse_lock);
if (req->preallocated) {
atomic_dec(&fc->num_waiting);
list_add(&req->list, &fc->unused_list);
@@ -134,11 +134,19 @@ static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
fc->outstanding_debt--;
else
up(&fc->outstanding_sem);
- spin_unlock(&fuse_lock);
}
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
{
+ if (atomic_dec_and_test(&req->count)) {
+ spin_lock(&fuse_lock);
+ fuse_putback_request(fc, req);
+ spin_unlock(&fuse_lock);
+ }
+}
+
+static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req)
+{
if (atomic_dec_and_test(&req->count))
fuse_putback_request(fc, req);
}
@@ -163,26 +171,36 @@ void fuse_release_background(struct fuse_req *req)
* still waiting), the 'end' callback is called if given, else the
* reference to the request is released
*
+ * Releasing extra reference for foreground requests must be done
+ * within the same locked region as setting state to finished. This
+ * is because fuse_reset_request() may be called after request is
+ * finished and it must be the sole possessor. If request is
+ * interrupted and put in the background, it will return with an error
+ * and hence never be reset and reused.
+ *
* Called with fuse_lock, unlocks it
*/
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
{
- void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
- req->end = NULL;
list_del(&req->list);
req->state = FUSE_REQ_FINISHED;
- spin_unlock(&fuse_lock);
- if (req->background) {
+ if (!req->background) {
+ wake_up(&req->waitq);
+ fuse_put_request_locked(fc, req);
+ spin_unlock(&fuse_lock);
+ } else {
+ void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
+ req->end = NULL;
+ spin_unlock(&fuse_lock);
down_read(&fc->sbput_sem);
if (fc->mounted)
fuse_release_background(req);
up_read(&fc->sbput_sem);
+ if (end)
+ end(fc, req);
+ else
+ fuse_put_request(fc, req);
}
- wake_up(&req->waitq);
- if (end)
- end(fc, req);
- else
- fuse_put_request(fc, req);
}
/*
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 429f4b2..ca91797 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1308,6 +1308,7 @@ int journal_stop(handle_t *handle)
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
int old_handle_count, err;
+ pid_t pid;
J_ASSERT(transaction->t_updates > 0);
J_ASSERT(journal_current_handle() == handle);
@@ -1333,8 +1334,15 @@ int journal_stop(handle_t *handle)
* It doesn't cost much - we're about to run a commit and sleep
* on IO anyway. Speeds up many-threaded, many-dir operations
* by 30x or more...
+ *
+ * But don't do this if this process was the most recent one to
+ * perform a synchronous write. We do this to detect the case where a
+ * single process is doing a stream of sync writes. No point in waiting
+ * for joiners in that case.
*/
- if (handle->h_sync) {
+ pid = current->pid;
+ if (handle->h_sync && journal->j_last_sync_writer != pid) {
+ journal->j_last_sync_writer = pid;
do {
old_handle_count = transaction->t_handle_count;
schedule_timeout_uninterruptible(1);
diff --git a/fs/namei.c b/fs/namei.c
index 7ac9fb4..faf61c35 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -790,7 +790,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
inode = nd->dentry->d_inode;
if (nd->depth)
- lookup_flags = LOOKUP_FOLLOW;
+ lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE);
/* At this point we know we have a real path component. */
for(;;) {
@@ -885,7 +885,8 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
last_with_slashes:
lookup_flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
last_component:
- nd->flags &= ~LOOKUP_CONTINUE;
+ /* Clear LOOKUP_CONTINUE iff it was previously unset */
+ nd->flags &= lookup_flags | ~LOOKUP_CONTINUE;
if (lookup_flags & LOOKUP_PARENT)
goto lookup_parent;
if (this.name[0] == '.') switch (this.len) {
@@ -1069,6 +1070,8 @@ static int fastcall do_path_lookup(int dfd, const char *name,
unsigned int flags, struct nameidata *nd)
{
int retval = 0;
+ int fput_needed;
+ struct file *file;
nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags;
@@ -1090,29 +1093,22 @@ static int fastcall do_path_lookup(int dfd, const char *name,
nd->mnt = mntget(current->fs->pwdmnt);
nd->dentry = dget(current->fs->pwd);
} else {
- struct file *file;
- int fput_needed;
struct dentry *dentry;
file = fget_light(dfd, &fput_needed);
- if (!file) {
- retval = -EBADF;
- goto out_fail;
- }
+ retval = -EBADF;
+ if (!file)
+ goto unlock_fail;
dentry = file->f_dentry;
- if (!S_ISDIR(dentry->d_inode->i_mode)) {
- retval = -ENOTDIR;
- fput_light(file, fput_needed);
- goto out_fail;
- }
+ retval = -ENOTDIR;
+ if (!S_ISDIR(dentry->d_inode->i_mode))
+ goto fput_unlock_fail;
retval = file_permission(file, MAY_EXEC);
- if (retval) {
- fput_light(file, fput_needed);
- goto out_fail;
- }
+ if (retval)
+ goto fput_unlock_fail;
nd->mnt = mntget(file->f_vfsmnt);
nd->dentry = dget(dentry);
@@ -1126,7 +1122,12 @@ out:
if (unlikely(current->audit_context
&& nd && nd->dentry && nd->dentry->d_inode))
audit_inode(name, nd->dentry->d_inode, flags);
-out_fail:
+ return retval;
+
+fput_unlock_fail:
+ fput_light(file, fput_needed);
+unlock_fail:
+ read_unlock(&current->fs->lock);
return retval;
}
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 36a92ed..399145a 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -507,7 +507,7 @@ struct alt_instr {
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index dffe276..34e434c 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -22,15 +22,8 @@ extern void numa_set_node(int cpu, int node);
extern unsigned char apicid_to_node[256];
#ifdef CONFIG_NUMA
extern void __init init_cpu_to_node(void);
-
-static inline void clear_node_cpumask(int cpu)
-{
- clear_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
-}
-
#else
#define init_cpu_to_node() do {} while (0)
-#define clear_node_cpumask(cpu) do {} while (0)
#endif
#define NUMA_NO_NODE 0xff
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index a73f0c7..b7f6603 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -327,7 +327,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#define wmb() asm volatile("" ::: "memory")
#endif
#define read_barrier_depends() do {} while(0)
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 558cb4c..751bb38 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -23,6 +23,7 @@
#define jfs_debug jbd_debug
#else
+#include <linux/types.h>
#include <linux/buffer_head.h>
#include <linux/journal-head.h>
#include <linux/stddef.h>
@@ -618,6 +619,7 @@ struct transaction_s
* @j_wbuf: array of buffer_heads for journal_commit_transaction
* @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
* number that will fit in j_blocksize
+ * @j_last_sync_writer: most recent pid which did a synchronous write
* @j_private: An opaque pointer to fs-private information.
*/
@@ -807,6 +809,8 @@ struct journal_s
struct buffer_head **j_wbuf;
int j_wbufsize;
+ pid_t j_last_sync_writer;
+
/*
* An opaque pointer to fs-private information. ext3 puts its
* superblock pointer here
diff --git a/include/linux/netfilter_ipv4/ipt_connbytes.h b/include/linux/netfilter_ipv4/ipt_connbytes.h
index b04dfa3..f63e6ee 100644
--- a/include/linux/netfilter_ipv4/ipt_connbytes.h
+++ b/include/linux/netfilter_ipv4/ipt_connbytes.h
@@ -1,10 +1,10 @@
#ifndef _IPT_CONNBYTES_H
#define _IPT_CONNBYTES_H
-#include <net/netfilter/xt_connbytes.h>
+#include <linux/netfilter/xt_connbytes.h>
#define ipt_connbytes_what xt_connbytes_what
-#define IPT_CONNBYTES_PKTS XT_CONNBYTES_PACKETS
+#define IPT_CONNBYTES_PKTS XT_CONNBYTES_PKTS
#define IPT_CONNBYTES_BYTES XT_CONNBYTES_BYTES
#define IPT_CONNBYTES_AVGPKT XT_CONNBYTES_AVGPKT
diff --git a/include/linux/netfilter_ipv4/ipt_policy.h b/include/linux/netfilter_ipv4/ipt_policy.h
index 7fd1bec..a3f6eff 100644
--- a/include/linux/netfilter_ipv4/ipt_policy.h
+++ b/include/linux/netfilter_ipv4/ipt_policy.h
@@ -27,16 +27,22 @@ struct ipt_policy_spec
reqid:1;
};
+union ipt_policy_addr
+{
+ struct in_addr a4;
+ struct in6_addr a6;
+};
+
struct ipt_policy_elem
{
- u_int32_t saddr;
- u_int32_t smask;
- u_int32_t daddr;
- u_int32_t dmask;
- u_int32_t spi;
- u_int32_t reqid;
- u_int8_t proto;
- u_int8_t mode;
+ union ipt_policy_addr saddr;
+ union ipt_policy_addr smask;
+ union ipt_policy_addr daddr;
+ union ipt_policy_addr dmask;
+ u_int32_t spi;
+ u_int32_t reqid;
+ u_int8_t proto;
+ u_int8_t mode;
struct ipt_policy_spec match;
struct ipt_policy_spec invert;
diff --git a/include/linux/netfilter_ipv6/ip6t_policy.h b/include/linux/netfilter_ipv6/ip6t_policy.h
index 5a93afc..671bd81 100644
--- a/include/linux/netfilter_ipv6/ip6t_policy.h
+++ b/include/linux/netfilter_ipv6/ip6t_policy.h
@@ -27,16 +27,22 @@ struct ip6t_policy_spec
reqid:1;
};
+union ip6t_policy_addr
+{
+ struct in_addr a4;
+ struct in6_addr a6;
+};
+
struct ip6t_policy_elem
{
- struct in6_addr saddr;
- struct in6_addr smask;
- struct in6_addr daddr;
- struct in6_addr dmask;
- u_int32_t spi;
- u_int32_t reqid;
- u_int8_t proto;
- u_int8_t mode;
+ union ip6t_policy_addr saddr;
+ union ip6t_policy_addr smask;
+ union ip6t_policy_addr daddr;
+ union ip6t_policy_addr dmask;
+ u_int32_t spi;
+ u_int32_t reqid;
+ u_int8_t proto;
+ u_int8_t mode;
struct ip6t_policy_spec match;
struct ip6t_policy_spec invert;
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 2c177e4..8a94c71 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -114,7 +114,7 @@ struct pkt_ctrl_command {
struct packet_settings
{
- __u8 size; /* packet size in (512 byte) sectors */
+ __u32 size; /* packet size in (512 byte) sectors */
__u8 fp; /* fixed packets */
__u8 link_loss; /* the rest is specified
* as per Mt Fuji */
@@ -169,8 +169,8 @@ struct packet_iosched
#if (PAGE_SIZE % CD_FRAMESIZE) != 0
#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE"
#endif
-#define PACKET_MAX_SIZE 32
-#define PAGES_PER_PACKET (PACKET_MAX_SIZE * CD_FRAMESIZE / PAGE_SIZE)
+#define PACKET_MAX_SIZE 128
+#define FRAMES_PER_PAGE (PAGE_SIZE / CD_FRAMESIZE)
#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9)
enum packet_data_state {
@@ -219,7 +219,7 @@ struct packet_data
atomic_t io_errors; /* Number of read/write errors during IO */
struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
- struct page *pages[PAGES_PER_PACKET];
+ struct page *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE];
int cache_valid; /* If non-zero, the data for the zone defined */
/* by the sector variable is completely cached */
diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h
index 0a36050..806ec5b 100644
--- a/include/linux/reiserfs_acl.h
+++ b/include/linux/reiserfs_acl.h
@@ -58,9 +58,13 @@ extern struct reiserfs_xattr_handler posix_acl_default_handler;
extern struct reiserfs_xattr_handler posix_acl_access_handler;
#else
-#define reiserfs_get_acl NULL
#define reiserfs_cache_default_acl(inode) 0
+static inline struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
+{
+ return NULL;
+}
+
static inline int reiserfs_xattr_posix_acl_init(void)
{
return 0;
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 67856eb..dac43b1 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -88,12 +88,6 @@ extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
-static inline struct nf_conntrack_l3proto *
-__nf_ct_l3proto_find(u_int16_t l3proto)
-{
- return nf_ct_l3protos[l3proto];
-}
-
extern struct nf_conntrack_l3proto *
nf_ct_l3proto_find_get(u_int16_t l3proto);
@@ -103,4 +97,13 @@ extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
extern struct nf_conntrack_l3proto nf_conntrack_generic_l3proto;
+
+static inline struct nf_conntrack_l3proto *
+__nf_ct_l3proto_find(u_int16_t l3proto)
+{
+ if (unlikely(l3proto >= AF_MAX))
+ return &nf_conntrack_generic_l3proto;
+ return nf_ct_l3protos[l3proto];
+}
+
#endif /*_NF_CONNTRACK_L3PROTO_H*/
diff --git a/kernel/intermodule.c b/kernel/intermodule.c
index 0cbe633..55b1e5b 100644
--- a/kernel/intermodule.c
+++ b/kernel/intermodule.c
@@ -179,3 +179,6 @@ EXPORT_SYMBOL(inter_module_register);
EXPORT_SYMBOL(inter_module_unregister);
EXPORT_SYMBOL(inter_module_get_request);
EXPORT_SYMBOL(inter_module_put);
+
+MODULE_LICENSE("GPL");
+
diff --git a/kernel/sched.c b/kernel/sched.c
index f77f23f..bc38804 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5551,13 +5551,15 @@ static void calibrate_migration_costs(const cpumask_t *cpu_map)
-1
#endif
);
- printk("migration_cost=");
- for (distance = 0; distance <= max_distance; distance++) {
- if (distance)
- printk(",");
- printk("%ld", (long)migration_cost[distance] / 1000);
+ if (system_state == SYSTEM_BOOTING) {
+ printk("migration_cost=");
+ for (distance = 0; distance <= max_distance; distance++) {
+ if (distance)
+ printk(",");
+ printk("%ld", (long)migration_cost[distance] / 1000);
+ }
+ printk("\n");
}
- printk("\n");
j1 = jiffies;
if (migration_debug)
printk("migration: %ld seconds\n", (j1-j0)/HZ);
@@ -6109,7 +6111,7 @@ void __init sched_init(void)
runqueue_t *rq;
int i, j, k;
- for (i = 0; i < NR_CPUS; i++) {
+ for_each_cpu(i) {
prio_array_t *array;
rq = cpu_rq(i);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b21d78c..ceb3ebb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -444,6 +444,15 @@ retry:
page = alloc_huge_page(vma, address);
if (!page) {
hugetlb_put_quota(mapping);
+ /*
+ * No huge pages available. So this is an OOM
+ * condition but we do not want to trigger the OOM
+ * killer, so we return VM_FAULT_SIGBUS.
+ *
+ * A program using hugepages may fault with Bus Error
+ * because no huge pages are available in the cpuset, per
+ * memory policy or because all are in use!
+ */
goto out;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 44b4eb4..dde04ff 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1213,18 +1213,21 @@ static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
{
int cpu = 0;
- memset(ret, 0, sizeof(*ret));
+ memset(ret, 0, nr * sizeof(unsigned long));
cpus_and(*cpumask, *cpumask, cpu_online_map);
cpu = first_cpu(*cpumask);
while (cpu < NR_CPUS) {
unsigned long *in, *out, off;
+ if (!cpu_isset(cpu, *cpumask))
+ continue;
+
in = (unsigned long *)&per_cpu(page_states, cpu);
cpu = next_cpu(cpu, *cpumask);
- if (cpu < NR_CPUS)
+ if (likely(cpu < NR_CPUS))
prefetch(&per_cpu(page_states, cpu));
out = (unsigned long *)ret;
@@ -1886,8 +1889,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
* not check if the processor is online before following the pageset pointer.
* Other parts of the kernel may not check if the zone is available.
*/
-static struct per_cpu_pageset
- boot_pageset[NR_CPUS];
+static struct per_cpu_pageset boot_pageset[NR_CPUS];
/*
* Dynamically allocate memory for the
diff --git a/mm/slab.c b/mm/slab.c
index 7137025..9cc049a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -294,6 +294,7 @@ struct kmem_list3 {
unsigned long next_reap;
int free_touched;
unsigned int free_limit;
+ unsigned int colour_next; /* Per-node cache coloring */
spinlock_t list_lock;
struct array_cache *shared; /* shared per node */
struct array_cache **alien; /* on other nodes */
@@ -344,6 +345,7 @@ static void kmem_list3_init(struct kmem_list3 *parent)
INIT_LIST_HEAD(&parent->slabs_free);
parent->shared = NULL;
parent->alien = NULL;
+ parent->colour_next = 0;
spin_lock_init(&parent->list_lock);
parent->free_objects = 0;
parent->free_touched = 0;
@@ -390,7 +392,6 @@ struct kmem_cache {
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
- unsigned int colour_next; /* cache colouring */
struct kmem_cache *slabp_cache;
unsigned int slab_size;
unsigned int dflags; /* dynamic flags */
@@ -883,14 +884,14 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
}
}
-static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
+static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
{
int i = 0;
struct array_cache *ac;
unsigned long flags;
for_each_online_node(i) {
- ac = l3->alien[i];
+ ac = alien[i];
if (ac) {
spin_lock_irqsave(&ac->lock, flags);
__drain_alien_cache(cachep, ac, i);
@@ -900,8 +901,11 @@ static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
}
#else
#define alloc_alien_cache(node, limit) do { } while (0)
-#define free_alien_cache(ac_ptr) do { } while (0)
-#define drain_alien_cache(cachep, l3) do { } while (0)
+#define drain_alien_cache(cachep, alien) do { } while (0)
+
+static inline void free_alien_cache(struct array_cache **ac_ptr)
+{
+}
#endif
static int __devinit cpuup_callback(struct notifier_block *nfb,
@@ -935,6 +939,11 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ /*
+ * The l3s don't come and go as CPUs come and
+ * go. cache_chain_mutex is sufficient
+ * protection here.
+ */
cachep->nodelists[node] = l3;
}
@@ -949,26 +958,47 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
& array cache's */
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
+ struct array_cache *shared;
+ struct array_cache **alien;
nc = alloc_arraycache(node, cachep->limit,
- cachep->batchcount);
+ cachep->batchcount);
if (!nc)
goto bad;
+ shared = alloc_arraycache(node,
+ cachep->shared * cachep->batchcount,
+ 0xbaadf00d);
+ if (!shared)
+ goto bad;
+#ifdef CONFIG_NUMA
+ alien = alloc_alien_cache(node, cachep->limit);
+ if (!alien)
+ goto bad;
+#endif
cachep->array[cpu] = nc;
l3 = cachep->nodelists[node];
BUG_ON(!l3);
- if (!l3->shared) {
- if (!(nc = alloc_arraycache(node,
- cachep->shared *
- cachep->batchcount,
- 0xbaadf00d)))
- goto bad;
- /* we are serialised from CPU_DEAD or
- CPU_UP_CANCELLED by the cpucontrol lock */
- l3->shared = nc;
+ spin_lock_irq(&l3->list_lock);
+ if (!l3->shared) {
+ /*
+ * We are serialised from CPU_DEAD or
+ * CPU_UP_CANCELLED by the cpucontrol lock
+ */
+ l3->shared = shared;
+ shared = NULL;
+ }
+#ifdef CONFIG_NUMA
+ if (!l3->alien) {
+ l3->alien = alien;
+ alien = NULL;
}
+#endif
+ spin_unlock_irq(&l3->list_lock);
+
+ kfree(shared);
+ free_alien_cache(alien);
}
mutex_unlock(&cache_chain_mutex);
break;
@@ -977,25 +1007,34 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
+ /*
+ * Even if all the cpus of a node are down, we don't free the
+ * kmem_list3 of any cache. This to avoid a race between
+ * cpu_down, and a kmalloc allocation from another cpu for
+ * memory from the node of the cpu going down. The list3
+ * structure is usually allocated from kmem_cache_create() and
+ * gets destroyed at kmem_cache_destroy().
+ */
/* fall thru */
case CPU_UP_CANCELED:
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
+ struct array_cache *shared;
+ struct array_cache **alien;
cpumask_t mask;
mask = node_to_cpumask(node);
- spin_lock_irq(&cachep->spinlock);
/* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu];
cachep->array[cpu] = NULL;
l3 = cachep->nodelists[node];
if (!l3)
- goto unlock_cache;
+ goto free_array_cache;
- spin_lock(&l3->list_lock);
+ spin_lock_irq(&l3->list_lock);
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
@@ -1003,34 +1042,44 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
free_block(cachep, nc->entry, nc->avail, node);
if (!cpus_empty(mask)) {
- spin_unlock(&l3->list_lock);
- goto unlock_cache;
+ spin_unlock_irq(&l3->list_lock);
+ goto free_array_cache;
}
- if (l3->shared) {
+ shared = l3->shared;
+ if (shared) {
free_block(cachep, l3->shared->entry,
l3->shared->avail, node);
- kfree(l3->shared);
l3->shared = NULL;
}
- if (l3->alien) {
- drain_alien_cache(cachep, l3);
- free_alien_cache(l3->alien);
- l3->alien = NULL;
- }
- /* free slabs belonging to this node */
- if (__node_shrink(cachep, node)) {
- cachep->nodelists[node] = NULL;
- spin_unlock(&l3->list_lock);
- kfree(l3);
- } else {
- spin_unlock(&l3->list_lock);
+ alien = l3->alien;
+ l3->alien = NULL;
+
+ spin_unlock_irq(&l3->list_lock);
+
+ kfree(shared);
+ if (alien) {
+ drain_alien_cache(cachep, alien);
+ free_alien_cache(alien);
}
- unlock_cache:
- spin_unlock_irq(&cachep->spinlock);
+free_array_cache:
kfree(nc);
}
+ /*
+ * In the previous loop, all the objects were freed to
+ * the respective cache's slabs, now we can go ahead and
+ * shrink each nodelist to its limit.
+ */
+ list_for_each_entry(cachep, &cache_chain, next) {
+ l3 = cachep->nodelists[node];
+ if (!l3)
+ continue;
+ spin_lock_irq(&l3->list_lock);
+ /* free slabs belonging to this node */
+ __node_shrink(cachep, node);
+ spin_unlock_irq(&l3->list_lock);
+ }
mutex_unlock(&cache_chain_mutex);
break;
#endif
@@ -1119,7 +1168,6 @@ void __init kmem_cache_init(void)
BUG();
cache_cache.colour = left_over / cache_cache.colour_off;
- cache_cache.colour_next = 0;
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
sizeof(struct slab), cache_line_size());
@@ -2011,18 +2059,16 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
smp_call_function_all_cpus(do_drain, cachep);
check_irq_on();
- spin_lock_irq(&cachep->spinlock);
for_each_online_node(node) {
l3 = cachep->nodelists[node];
if (l3) {
- spin_lock(&l3->list_lock);
+ spin_lock_irq(&l3->list_lock);
drain_array_locked(cachep, l3->shared, 1, node);
- spin_unlock(&l3->list_lock);
+ spin_unlock_irq(&l3->list_lock);
if (l3->alien)
- drain_alien_cache(cachep, l3);
+ drain_alien_cache(cachep, l3->alien);
}
}
- spin_unlock_irq(&cachep->spinlock);
}
static int __node_shrink(struct kmem_cache *cachep, int node)
@@ -2324,20 +2370,20 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
*/
ctor_flags |= SLAB_CTOR_ATOMIC;
- /* About to mess with non-constant members - lock. */
+ /* Take the l3 list lock to change the colour_next on this node */
check_irq_off();
- spin_lock(&cachep->spinlock);
+ l3 = cachep->nodelists[nodeid];
+ spin_lock(&l3->list_lock);
/* Get colour for the slab, and cal the next value. */
- offset = cachep->colour_next;
- cachep->colour_next++;
- if (cachep->colour_next >= cachep->colour)
- cachep->colour_next = 0;
- offset *= cachep->colour_off;
+ offset = l3->colour_next;
+ l3->colour_next++;
+ if (l3->colour_next >= cachep->colour)
+ l3->colour_next = 0;
+ spin_unlock(&l3->list_lock);
- spin_unlock(&cachep->spinlock);
+ offset *= cachep->colour_off;
- check_irq_off();
if (local_flags & __GFP_WAIT)
local_irq_enable();
@@ -2367,7 +2413,6 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
if (local_flags & __GFP_WAIT)
local_irq_disable();
check_irq_off();
- l3 = cachep->nodelists[nodeid];
spin_lock(&l3->list_lock);
/* Make slab active. */
@@ -2725,6 +2770,7 @@ static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int node
BUG_ON(!l3);
retry:
+ check_irq_off();
spin_lock(&l3->list_lock);
entry = l3->slabs_partial.next;
if (entry == &l3->slabs_partial) {
@@ -3304,11 +3350,11 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount
smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
check_irq_on();
- spin_lock_irq(&cachep->spinlock);
+ spin_lock(&cachep->spinlock);
cachep->batchcount = batchcount;
cachep->limit = limit;
cachep->shared = shared;
- spin_unlock_irq(&cachep->spinlock);
+ spin_unlock(&cachep->spinlock);
for_each_online_cpu(i) {
struct array_cache *ccold = new.new[i];
@@ -3440,7 +3486,7 @@ static void cache_reap(void *unused)
l3 = searchp->nodelists[numa_node_id()];
if (l3->alien)
- drain_alien_cache(searchp, l3);
+ drain_alien_cache(searchp, l3->alien);
spin_lock_irq(&l3->list_lock);
drain_array_locked(searchp, cpu_cache_get(searchp), 0,
@@ -3564,8 +3610,7 @@ static int s_show(struct seq_file *m, void *p)
int node;
struct kmem_list3 *l3;
- check_irq_on();
- spin_lock_irq(&cachep->spinlock);
+ spin_lock(&cachep->spinlock);
active_objs = 0;
num_slabs = 0;
for_each_online_node(node) {
@@ -3573,7 +3618,8 @@ static int s_show(struct seq_file *m, void *p)
if (!l3)
continue;
- spin_lock(&l3->list_lock);
+ check_irq_on();
+ spin_lock_irq(&l3->list_lock);
list_for_each(q, &l3->slabs_full) {
slabp = list_entry(q, struct slab, list);
@@ -3598,9 +3644,10 @@ static int s_show(struct seq_file *m, void *p)
num_slabs++;
}
free_objects += l3->free_objects;
- shared_avail += l3->shared->avail;
+ if (l3->shared)
+ shared_avail += l3->shared->avail;
- spin_unlock(&l3->list_lock);
+ spin_unlock_irq(&l3->list_lock);
}
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
@@ -3644,7 +3691,7 @@ static int s_show(struct seq_file *m, void *p)
}
#endif
seq_putc(m, '\n');
- spin_unlock_irq(&cachep->spinlock);
+ spin_unlock(&cachep->spinlock);
return 0;
}
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ce617b3..802baf7 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -46,7 +46,7 @@
#define PRINTR(format, args...) do { if (net_ratelimit()) \
printk(format , ## args); } while (0)
-static unsigned int nlbufsiz = 4096;
+static unsigned int nlbufsiz = NLMSG_GOODSIZE;
module_param(nlbufsiz, uint, 0600);
MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
"(defaults to 4096)");
@@ -98,12 +98,14 @@ static void ulog_timer(unsigned long data)
static struct sk_buff *ulog_alloc_skb(unsigned int size)
{
struct sk_buff *skb;
+ unsigned int n;
- skb = alloc_skb(nlbufsiz, GFP_ATOMIC);
+ n = max(size, nlbufsiz);
+ skb = alloc_skb(n, GFP_ATOMIC);
if (!skb) {
PRINTR(KERN_ERR "ebt_ulog: can't alloc whole buffer "
- "of size %ub!\n", nlbufsiz);
- if (size < nlbufsiz) {
+ "of size %ub!\n", n);
+ if (n > size) {
/* try to allocate only as much as we need for
* current packet */
skb = alloc_skb(size, GFP_ATOMIC);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 00729b3..cbd4020 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -934,6 +934,13 @@ static int do_replace(void __user *user, unsigned int len)
BUGPRINT("Entries_size never zero\n");
return -EINVAL;
}
+ /* overflow check */
+ if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
+ SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
+ return -ENOMEM;
+ if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
+ return -ENOMEM;
+
countersize = COUNTER_OFFSET(tmp.nentries) *
(highest_possible_processor_id()+1);
newinfo = (struct ebt_table_info *)
diff --git a/net/core/dev.c b/net/core/dev.c
index ffb8207..2afb0de 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3237,7 +3237,7 @@ static int __init net_dev_init(void)
* Initialise the packet receive queues.
*/
- for (i = 0; i < NR_CPUS; i++) {
+ for_each_cpu(i) {
struct softnet_data *queue;
queue = &per_cpu(softnet_data, i);
diff --git a/net/core/utils.c b/net/core/utils.c
index ac1d1fc..fdc4f38 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -121,7 +121,7 @@ void __init net_random_init(void)
{
int i;
- for (i = 0; i < NR_CPUS; i++) {
+ for_each_cpu(i) {
struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, i+jiffies);
}
@@ -133,7 +133,7 @@ static int net_random_reseed(void)
unsigned long seed[NR_CPUS];
get_random_bytes(seed, sizeof(seed));
- for (i = 0; i < NR_CPUS; i++) {
+ for_each_cpu(i) {
struct nrnd_state *state = &per_cpu(net_rand_state,i);
__net_srandom(state, seed[i]);
}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 6bc0887..4d1c409 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -524,7 +524,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, u32 info)
iph->tos;
if (ip_options_echo(&icmp_param.replyopts, skb_in))
- goto ende;
+ goto out_unlock;
/*
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index afe3d8f..dd1048b 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -807,6 +807,13 @@ static int do_replace(void __user *user, unsigned int len)
if (len != sizeof(tmp) + tmp.size)
return -ENOPROTOOPT;
+ /* overflow check */
+ if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
+ SMP_CACHE_BYTES)
+ return -ENOMEM;
+ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ return -ENOMEM;
+
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index c9ebbe0..e0b5926 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -1216,7 +1216,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
b = skb->tail;
- type |= NFNL_SUBSYS_CTNETLINK << 8;
+ type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
nfmsg = NLMSG_DATA(nlh);
@@ -1567,6 +1567,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
};
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
static int __init ctnetlink_init(void)
{
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c
index d3c5a37..4ba4463 100644
--- a/net/ipv4/netfilter/ip_conntrack_tftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_tftp.c
@@ -71,6 +71,7 @@ static int tftp_help(struct sk_buff **pskb,
exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
exp->mask.src.ip = 0xffffffff;
+ exp->mask.src.u.udp.port = 0;
exp->mask.dst.ip = 0xffffffff;
exp->mask.dst.u.udp.port = 0xffff;
exp->mask.dst.protonum = 0xff;
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index ad438fb..92c5499 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -209,8 +209,8 @@ ip_nat_in(unsigned int hooknum,
&& (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
- if (ct->tuplehash[dir].tuple.src.ip !=
- ct->tuplehash[!dir].tuple.dst.ip) {
+ if (ct->tuplehash[dir].tuple.dst.ip !=
+ ct->tuplehash[!dir].tuple.src.ip) {
dst_release((*pskb)->dst);
(*pskb)->dst = NULL;
}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 2371b20..16f47c6 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -921,6 +921,13 @@ do_replace(void __user *user, unsigned int len)
if (len != sizeof(tmp) + tmp.size)
return -ENOPROTOOPT;
+ /* overflow check */
+ if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
+ SMP_CACHE_BYTES)
+ return -ENOMEM;
+ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ return -ENOMEM;
+
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 641dbc4..180a9ea 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -35,6 +35,10 @@
* each nlgroup you are using, so the total kernel memory usage increases
* by that factor.
*
+ * Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since
+ * nlbufsiz is used with alloc_skb, which adds another
+ * sizeof(struct skb_shared_info). Use NLMSG_GOODSIZE instead.
+ *
* flushtimeout:
* Specify, after how many hundredths of a second the queue should be
* flushed even if it is not full yet.
@@ -76,7 +80,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
#define PRINTR(format, args...) do { if (net_ratelimit()) printk(format , ## args); } while (0)
-static unsigned int nlbufsiz = 4096;
+static unsigned int nlbufsiz = NLMSG_GOODSIZE;
module_param(nlbufsiz, uint, 0400);
MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
@@ -143,22 +147,26 @@ static void ulog_timer(unsigned long data)
static struct sk_buff *ulog_alloc_skb(unsigned int size)
{
struct sk_buff *skb;
+ unsigned int n;
/* alloc skb which should be big enough for a whole
* multipart message. WARNING: has to be <= 131000
* due to slab allocator restrictions */
- skb = alloc_skb(nlbufsiz, GFP_ATOMIC);
+ n = max(size, nlbufsiz);
+ skb = alloc_skb(n, GFP_ATOMIC);
if (!skb) {
- PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n",
- nlbufsiz);
+ PRINTR("ipt_ULOG: can't alloc whole buffer %ub!\n", n);
- /* try to allocate only as much as we need for
- * current packet */
+ if (n > size) {
+ /* try to allocate only as much as we need for
+ * current packet */
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb)
- PRINTR("ipt_ULOG: can't even allocate %ub\n", size);
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (!skb)
+ PRINTR("ipt_ULOG: can't even allocate %ub\n",
+ size);
+ }
}
return skb;
diff --git a/net/ipv4/netfilter/ipt_policy.c b/net/ipv4/netfilter/ipt_policy.c
index 18ca825..5a7a265 100644
--- a/net/ipv4/netfilter/ipt_policy.c
+++ b/net/ipv4/netfilter/ipt_policy.c
@@ -26,10 +26,13 @@ MODULE_LICENSE("GPL");
static inline int
match_xfrm_state(struct xfrm_state *x, const struct ipt_policy_elem *e)
{
-#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
+#define MATCH_ADDR(x,y,z) (!e->match.x || \
+ ((e->x.a4.s_addr == (e->y.a4.s_addr & (z))) \
+ ^ e->invert.x))
+#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
- return MATCH(saddr, x->props.saddr.a4 & e->smask) &&
- MATCH(daddr, x->id.daddr.a4 & e->dmask) &&
+ return MATCH_ADDR(saddr, smask, x->props.saddr.a4) &&
+ MATCH_ADDR(daddr, dmask, x->id.daddr.a4) &&
MATCH(proto, x->id.proto) &&
MATCH(mode, x->props.mode) &&
MATCH(spi, x->id.spi) &&
@@ -89,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ipt_policy_info *info)
return 0;
}
- return strict ? 1 : 0;
+ return strict ? i == info->len : 0;
}
static int match(const struct sk_buff *skb,
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 39d49dc..1b167c4b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -49,7 +49,7 @@ static int fold_prot_inuse(struct proto *proto)
int res = 0;
int cpu;
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for_each_cpu(cpu)
res += proto->stats[cpu].inuse;
return res;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 847068f..74ff56c 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -978,6 +978,13 @@ do_replace(void __user *user, unsigned int len)
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
+ /* overflow check */
+ if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
+ SMP_CACHE_BYTES)
+ return -ENOMEM;
+ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
+ return -ENOMEM;
+
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
diff --git a/net/ipv6/netfilter/ip6t_policy.c b/net/ipv6/netfilter/ip6t_policy.c
index afe1cc4..3d39ec9 100644
--- a/net/ipv6/netfilter/ip6t_policy.c
+++ b/net/ipv6/netfilter/ip6t_policy.c
@@ -26,8 +26,9 @@ MODULE_LICENSE("GPL");
static inline int
match_xfrm_state(struct xfrm_state *x, const struct ip6t_policy_elem *e)
{
-#define MATCH_ADDR(x,y,z) (!e->match.x || \
- ((ip6_masked_addrcmp((z), &e->x, &e->y)) == 0) ^ e->invert.x)
+#define MATCH_ADDR(x,y,z) (!e->match.x || \
+ ((!ip6_masked_addrcmp(&e->x.a6, &e->y.a6, z)) \
+ ^ e->invert.x))
#define MATCH(x,y) (!e->match.x || ((e->x == (y)) ^ e->invert.x))
return MATCH_ADDR(saddr, smask, (struct in6_addr *)&x->props.saddr.a6) &&
@@ -91,7 +92,7 @@ match_policy_out(const struct sk_buff *skb, const struct ip6t_policy_info *info)
return 0;
}
- return strict ? 1 : 0;
+ return strict ? i == info->len : 0;
}
static int match(const struct sk_buff *skb,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 50a13e7..4238b1e 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -38,7 +38,7 @@ static int fold_prot_inuse(struct proto *proto)
int res = 0;
int cpu;
- for (cpu=0; cpu<NR_CPUS; cpu++)
+ for_each_cpu(cpu)
res += proto->stats[cpu].inuse;
return res;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 62bb509..0ce337a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -188,7 +188,7 @@ extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
struct nf_conntrack_protocol *
__nf_ct_proto_find(u_int16_t l3proto, u_int8_t protocol)
{
- if (unlikely(nf_ct_protos[l3proto] == NULL))
+ if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
return &nf_conntrack_generic_protocol;
return nf_ct_protos[l3proto][protocol];
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index ab0c920..6f210f3 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -657,8 +657,6 @@ static int __init init(void)
/* FIXME should be configurable whether IPv4 and IPv6 FTP connections
are tracked or not - YK */
for (i = 0; i < ports_c; i++) {
- memset(&ftp[i], 0, sizeof(struct nf_conntrack_helper));
-
ftp[i][0].tuple.src.l3num = PF_INET;
ftp[i][1].tuple.src.l3num = PF_INET6;
for (j = 0; j < 2; j++) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 73ab16b..9ff34630 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1232,7 +1232,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
b = skb->tail;
- type |= NFNL_SUBSYS_CTNETLINK << 8;
+ type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(struct nfgenmsg));
nfmsg = NLMSG_DATA(nlh);
@@ -1589,6 +1589,7 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
};
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
static int __init ctnetlink_init(void)
{
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e10512e..3b3c781 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -37,7 +37,7 @@
#include "../bridge/br_private.h"
#endif
-#define NFULNL_NLBUFSIZ_DEFAULT 4096
+#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
@@ -314,24 +314,28 @@ static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size,
unsigned int pkt_size)
{
struct sk_buff *skb;
+ unsigned int n;
UDEBUG("entered (%u, %u)\n", inst_size, pkt_size);
/* alloc skb which should be big enough for a whole multipart
* message. WARNING: has to be <= 128k due to slab restrictions */
- skb = alloc_skb(inst_size, GFP_ATOMIC);
+ n = max(inst_size, pkt_size);
+ skb = alloc_skb(n, GFP_ATOMIC);
if (!skb) {
PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
inst_size);
- /* try to allocate only as much as we need for current
- * packet */
+ if (n > pkt_size) {
+ /* try to allocate only as much as we need for current
+ * packet */
- skb = alloc_skb(pkt_size, GFP_ATOMIC);
- if (!skb)
- PRINTR("nfnetlink_log: can't even alloc %u bytes\n",
- pkt_size);
+ skb = alloc_skb(pkt_size, GFP_ATOMIC);
+ if (!skb)
+ PRINTR("nfnetlink_log: can't even alloc %u "
+ "bytes\n", pkt_size);
+ }
}
return skb;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 18ed9c5..cac38b2e 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -825,7 +825,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
}
if (nfqa[NFQA_MARK-1])
- skb->nfmark = ntohl(*(u_int32_t *)NFA_DATA(nfqa[NFQA_MARK-1]));
+ entry->skb->nfmark = ntohl(*(u_int32_t *)
+ NFA_DATA(nfqa[NFQA_MARK-1]));
issue_verdict(entry, verdict);
instance_put(queue);
diff --git a/net/socket.c b/net/socket.c
index b38a263..a00851f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2078,7 +2078,7 @@ void socket_seq_show(struct seq_file *seq)
int cpu;
int counter = 0;
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for_each_cpu(cpu)
counter += per_cpu(sockets_in_use, cpu);
/* It can be negative, by the way. 8) */
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 5760e05..d64aae8 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -123,7 +123,17 @@ KBUILD_HAVE_NLS := $(shell \
then echo yes ; \
else echo no ; fi)
ifeq ($(KBUILD_HAVE_NLS),no)
-HOSTCFLAGS += -DKBUILD_NO_NLS
+ HOSTCFLAGS += -DKBUILD_NO_NLS
+else
+ KBUILD_NEED_LINTL := $(shell \
+ if echo -e "\#include <libintl.h>\nint main(int a, char** b) { gettext(\"\"); return 0; }\n" | \
+ $(HOSTCC) $(HOSTCFLAGS) -x c - -o /dev/null> /dev/null 2>&1 ; \
+ then echo no ; \
+ else echo yes ; fi)
+ ifeq ($(KBUILD_NEED_LINTL),yes)
+ HOSTLOADLIBES_conf += -lintl
+ HOSTLOADLIBES_mconf += -lintl
+ endif
endif
# generated files seem to need this to find local include files
diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig
index b59582b..502f78f 100644
--- a/security/selinux/Kconfig
+++ b/security/selinux/Kconfig
@@ -1,6 +1,6 @@
config SECURITY_SELINUX
bool "NSA SELinux Support"
- depends on SECURITY && NET && INET
+ depends on SECURITY_NETWORK && NET && INET
default n
help
This selects NSA Security-Enhanced Linux (SELinux).
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index 06d54d9..688c0a2 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -4,9 +4,7 @@
obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/
-selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o
-
-selinux-$(CONFIG_SECURITY_NETWORK) += netif.o
+selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o
selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4ae834d..b7773bf 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -232,7 +232,6 @@ static void superblock_free_security(struct super_block *sb)
kfree(sbsec);
}
-#ifdef CONFIG_SECURITY_NETWORK
static int sk_alloc_security(struct sock *sk, int family, gfp_t priority)
{
struct sk_security_struct *ssec;
@@ -261,7 +260,6 @@ static void sk_free_security(struct sock *sk)
sk->sk_security = NULL;
kfree(ssec);
}
-#endif /* CONFIG_SECURITY_NETWORK */
/* The security server must be initialized before
any labeling or access decisions can be provided. */
@@ -2736,8 +2734,6 @@ static void selinux_task_to_inode(struct task_struct *p,
return;
}
-#ifdef CONFIG_SECURITY_NETWORK
-
/* Returns error only if unable to parse addresses */
static int selinux_parse_skb_ipv4(struct sk_buff *skb, struct avc_audit_data *ad)
{
@@ -3556,15 +3552,6 @@ static unsigned int selinux_ipv6_postroute_last(unsigned int hooknum,
#endif /* CONFIG_NETFILTER */
-#else
-
-static inline int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
-{
- return 0;
-}
-
-#endif /* CONFIG_SECURITY_NETWORK */
-
static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
{
struct task_security_struct *tsec;
@@ -4340,7 +4327,6 @@ static struct security_operations selinux_ops = {
.getprocattr = selinux_getprocattr,
.setprocattr = selinux_setprocattr,
-#ifdef CONFIG_SECURITY_NETWORK
.unix_stream_connect = selinux_socket_unix_stream_connect,
.unix_may_send = selinux_socket_unix_may_send,
@@ -4362,7 +4348,6 @@ static struct security_operations selinux_ops = {
.sk_alloc_security = selinux_sk_alloc_security,
.sk_free_security = selinux_sk_free_security,
.sk_getsid = selinux_sk_getsid_security,
-#endif
#ifdef CONFIG_SECURITY_NETWORK_XFRM
.xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
@@ -4440,7 +4425,7 @@ next_sb:
all processes and objects when they are created. */
security_initcall(selinux_init);
-#if defined(CONFIG_SECURITY_NETWORK) && defined(CONFIG_NETFILTER)
+#if defined(CONFIG_NETFILTER)
static struct nf_hook_ops selinux_ipv4_op = {
.hook = selinux_ipv4_postroute_last,
@@ -4501,13 +4486,13 @@ static void selinux_nf_ip_exit(void)
}
#endif
-#else /* CONFIG_SECURITY_NETWORK && CONFIG_NETFILTER */
+#else /* CONFIG_NETFILTER */
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
#define selinux_nf_ip_exit()
#endif
-#endif /* CONFIG_SECURITY_NETWORK && CONFIG_NETFILTER */
+#endif /* CONFIG_NETFILTER */
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
int selinux_disable(void)
OpenPOWER on IntegriCloud