summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <christoph@lameter.com>2005-07-07 17:56:59 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-07 18:23:46 -0700
commit6c036527a630720063b67d9a65455e8caca2c8fa (patch)
tree316e947f5f4efcda0205e48044ed1d12665eaed1
parent0db925af1db5f3dfe1691c35b39496e2baaff9c9 (diff)
downloadop-kernel-dev-6c036527a630720063b67d9a65455e8caca2c8fa.zip
op-kernel-dev-6c036527a630720063b67d9a65455e8caca2c8fa.tar.gz
[PATCH] mostly_read data section
Add a new section called ".data.read_mostly" for data items that are read frequently and rarely written to like cpumaps etc. If these maps are placed in the .data section then these frequenly read items may end up in cachelines with data is is frequently updated. In that case all processors in an SMP system must needlessly reload the cachelines again and again containing elements of those frequently used variables. The ability to share these cachelines will allow each cpu in an SMP system to keep local copies of those shared cachelines thereby optimizing performance. Signed-off-by: Alok N Kataria <alokk@calsoftinc.com> Signed-off-by: Shobhit Dayal <shobhit@calsoftinc.com> Signed-off-by: Christoph Lameter <christoph@scalex86.org> Signed-off-by: Shai Fultheim <shai@scalex86.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/cpu/intel.c2
-rw-r--r--arch/i386/kernel/smpboot.c18
-rw-r--r--arch/i386/kernel/time.c2
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c4
-rw-r--r--arch/i386/kernel/vmlinux.lds.S3
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S4
-rw-r--r--drivers/char/random.c2
-rw-r--r--fs/bio.c2
-rw-r--r--include/linux/cache.h6
-rw-r--r--kernel/profile.c4
-rw-r--r--lib/radix-tree.c2
11 files changed, 31 insertions, 18 deletions
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 96a75d0..a2c33c1 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -25,7 +25,7 @@ extern int trap_init_f00f_bug(void);
/*
* Alignment at which movsl is preferred for bulk memory copies.
*/
-struct movsl_mask movsl_mask;
+struct movsl_mask movsl_mask __read_mostly;
#endif
void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index d66bf48..8ac8e9f 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -68,21 +68,21 @@ EXPORT_SYMBOL(smp_num_siblings);
#endif
/* Package ID of each logical CPU */
-int phys_proc_id[NR_CPUS] = {[0 ... NR_CPUS-1] = BAD_APICID};
+int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
EXPORT_SYMBOL(phys_proc_id);
/* Core ID of each logical CPU */
-int cpu_core_id[NR_CPUS] = {[0 ... NR_CPUS-1] = BAD_APICID};
+int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
EXPORT_SYMBOL(cpu_core_id);
-cpumask_t cpu_sibling_map[NR_CPUS];
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
-cpumask_t cpu_core_map[NR_CPUS];
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
/* bitmap of online cpus */
-cpumask_t cpu_online_map;
+cpumask_t cpu_online_map __read_mostly;
EXPORT_SYMBOL(cpu_online_map);
cpumask_t cpu_callin_map;
@@ -100,7 +100,7 @@ static int __devinitdata tsc_sync_disabled;
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_data);
-u8 x86_cpu_to_apicid[NR_CPUS] =
+u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly =
{ [0 ... NR_CPUS-1] = 0xff };
EXPORT_SYMBOL(x86_cpu_to_apicid);
@@ -550,10 +550,10 @@ extern struct {
#ifdef CONFIG_NUMA
/* which logical CPUs are on which nodes */
-cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
+cpumask_t node_2_cpu_mask[MAX_NUMNODES] __read_mostly =
{ [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
/* which node each logical CPU is on */
-int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
+int cpu_2_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
EXPORT_SYMBOL(cpu_2_node);
/* set up a mapping between cpu and node. */
@@ -581,7 +581,7 @@ static inline void unmap_cpu_to_node(int cpu)
#endif /* CONFIG_NUMA */
-u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
static void map_cpu_to_logical_apicid(void)
{
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 2854c35..0ee9dee 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(rtc_lock);
DEFINE_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
-struct timer_opts *cur_timer = &timer_none;
+struct timer_opts *cur_timer __read_mostly = &timer_none;
/*
* This is a special lock that is owned by the CPU and holds the index
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index d766e09..ef8dac5 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -18,7 +18,7 @@
#include "mach_timer.h"
#include <asm/hpet.h>
-static unsigned long hpet_usec_quotient; /* convert hpet clks to usec */
+static unsigned long __read_mostly hpet_usec_quotient; /* convert hpet clks to usec */
static unsigned long tsc_hpet_quotient; /* convert tsc to hpet clks */
static unsigned long hpet_last; /* hpet counter value at last tick*/
static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
@@ -180,7 +180,7 @@ static int __init init_hpet(char* override)
/************************************************************/
/* tsc timer_opts struct */
-static struct timer_opts timer_hpet = {
+static struct timer_opts timer_hpet __read_mostly = {
.name = "hpet",
.mark_offset = mark_offset_hpet,
.get_offset = get_offset_hpet,
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 7e01a52..761972f 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -57,6 +57,9 @@ SECTIONS
*(.data.cacheline_aligned)
}
+ /* rarely changed data like cpu maps */
+ . = ALIGN(32);
+ .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { *(.data.read_mostly) }
_edata = .; /* End of data section */
. = ALIGN(THREAD_SIZE); /* init_task */
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 73389f5..61c1275 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -56,6 +56,10 @@ SECTIONS
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
*(.data.cacheline_aligned)
}
+ . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
+ .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
+ *(.data.read_mostly)
+ }
#define VSYSCALL_ADDR (-10*1024*1024)
#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.cacheline_aligned) + SIZEOF(.data.cacheline_aligned) + 4095) & ~(4095))
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 460b5d4..6b11d6b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -271,7 +271,7 @@ static int random_write_wakeup_thresh = 128;
* samples to avoid wasting CPU time and reduce lock contention.
*/
-static int trickle_thresh = INPUT_POOL_WORDS * 28;
+static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
static DEFINE_PER_CPU(int, trickle_count) = 0;
diff --git a/fs/bio.c b/fs/bio.c
index 3a1472a..ca8f7a8 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -52,7 +52,7 @@ struct biovec_slab {
*/
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
-static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] = {
+static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
};
#undef BV
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 4d767b9..2b66a36 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -13,6 +13,12 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
+#ifdef CONFIG_X86
+#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#else
+#define __read_mostly
+#endif
+
#ifndef ____cacheline_aligned
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#endif
diff --git a/kernel/profile.c b/kernel/profile.c
index ad8cbb7..f89248e 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -35,11 +35,11 @@ struct profile_hit {
#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
/* Oprofile timer tick hook */
-int (*timer_hook)(struct pt_regs *);
+int (*timer_hook)(struct pt_regs *) __read_mostly;
static atomic_t *prof_buffer;
static unsigned long prof_len, prof_shift;
-static int prof_on;
+static int prof_on __read_mostly;
static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 04d6643..10bed1c 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -58,7 +58,7 @@ struct radix_tree_path {
#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
#define RADIX_TREE_MAX_PATH (RADIX_TREE_INDEX_BITS/RADIX_TREE_MAP_SHIFT + 2)
-static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH];
+static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH] __read_mostly;
/*
* Radix tree node cache.
OpenPOWER on IntegriCloud