summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2010-06-02 14:18:06 +0200
committerTakashi Iwai <tiwai@suse.de>2010-06-02 14:18:06 +0200
commitc7a441bba9de3b4e166b6a4449208bc906d70558 (patch)
tree346fdf11e464c8201a9aaa8abdd1c1b6dc4f86e0 /include
parentead54d878465291746c91c95749990d62742a6cf (diff)
parente4caa8bab3862a7694ab7c6dfede223227ad7fc5 (diff)
downloadop-kernel-dev-c7a441bba9de3b4e166b6a4449208bc906d70558.zip
op-kernel-dev-c7a441bba9de3b4e166b6a4449208bc906d70558.tar.gz
Merge branch 'fix/hda' into for-linus
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/percpu.h10
-rw-r--r--include/asm-generic/vmlinux.lds.h38
-rw-r--r--include/drm/i915_drm.h5
-rw-r--r--include/linux/cache.h2
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/linkage.h8
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/percpu-defs.h4
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/spinlock.h2
-rw-r--r--include/net/sock.h15
12 files changed, 41 insertions, 51 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 04f91c2..b5043a9 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -80,7 +80,7 @@ extern void setup_per_cpu_areas(void);
#ifndef PER_CPU_BASE_SECTION
#ifdef CONFIG_SMP
-#define PER_CPU_BASE_SECTION ".data.percpu"
+#define PER_CPU_BASE_SECTION ".data..percpu"
#else
#define PER_CPU_BASE_SECTION ".data"
#endif
@@ -92,15 +92,15 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION ""
#else
-#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
-#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
+#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#endif
-#define PER_CPU_FIRST_SECTION ".first"
+#define PER_CPU_FIRST_SECTION "..first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#define PER_CPU_ALIGNED_SECTION ".shared_aligned"
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#define PER_CPU_FIRST_SECTION ""
#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index ef779c6..48c5299 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -175,25 +175,25 @@
#define NOSAVE_DATA \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_begin) = .; \
- *(.data.nosave) \
+ *(.data..nosave) \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_end) = .;
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
- *(.data.page_aligned)
+ *(.data..page_aligned)
#define READ_MOSTLY_DATA(align) \
. = ALIGN(align); \
- *(.data.read_mostly)
+ *(.data..read_mostly)
#define CACHELINE_ALIGNED_DATA(align) \
. = ALIGN(align); \
- *(.data.cacheline_aligned)
+ *(.data..cacheline_aligned)
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
- *(.data.init_task)
+ *(.data..init_task)
/*
* Read only Data
@@ -435,7 +435,7 @@
*/
#define INIT_TASK_DATA_SECTION(align) \
. = ALIGN(align); \
- .data.init_task : { \
+ .data..init_task : { \
INIT_TASK_DATA(align) \
}
@@ -499,7 +499,7 @@
#define BSS(bss_align) \
. = ALIGN(bss_align); \
.bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
- *(.bss.page_aligned) \
+ *(.bss..page_aligned) \
*(.dynbss) \
*(.bss) \
*(COMMON) \
@@ -666,16 +666,16 @@
*/
#define PERCPU_VADDR(vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
- *(.data.percpu.first) \
- *(.data.percpu.page_aligned) \
- *(.data.percpu) \
- *(.data.percpu.shared_aligned) \
+ *(.data..percpu..first) \
+ *(.data..percpu..page_aligned) \
+ *(.data..percpu) \
+ *(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
} phdr \
- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
+ . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
/**
* PERCPU - define output section for percpu area, simple version
@@ -687,18 +687,18 @@
*
* This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
* that __per_cpu_load is defined as a relative symbol against
- * .data.percpu which is required for relocatable x86_32
+ * .data..percpu which is required for relocatable x86_32
* configuration.
*/
#define PERCPU(align) \
. = ALIGN(align); \
- .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
+ .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
- *(.data.percpu.first) \
- *(.data.percpu.page_aligned) \
- *(.data.percpu) \
- *(.data.percpu.shared_aligned) \
+ *(.data..percpu..first) \
+ *(.data..percpu..page_aligned) \
+ *(.data..percpu) \
+ *(.data..percpu..shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
}
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index b64a8d7..7f0028e 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -275,6 +275,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_OVERLAY 7
#define I915_PARAM_HAS_PAGEFLIPPING 8
#define I915_PARAM_HAS_EXECBUF2 9
+#define I915_PARAM_HAS_BSD 10
typedef struct drm_i915_getparam {
int param;
@@ -616,7 +617,9 @@ struct drm_i915_gem_execbuffer2 {
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
- __u64 flags; /* currently unused */
+#define I915_EXEC_RENDER (1<<0)
+#define I915_EXEC_BSD (1<<1)
+ __u64 flags;
__u64 rsvd1;
__u64 rsvd2;
};
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 97e2488..4c570653 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -31,7 +31,7 @@
#ifndef __cacheline_aligned
#define __cacheline_aligned \
__attribute__((__aligned__(SMP_CACHE_BYTES), \
- __section__(".data.cacheline_aligned")))
+ __section__(".data..cacheline_aligned")))
#endif /* __cacheline_aligned */
#ifndef __cacheline_aligned_in_smp
diff --git a/include/linux/init.h b/include/linux/init.h
index ab1d31f..de99430 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -301,7 +301,7 @@ void __init parse_early_options(char *cmdline);
#endif
/* Data marked not to be saved by software suspend */
-#define __nosavedata __section(.data.nosave)
+#define __nosavedata __section(.data..nosave)
/* This means "can be init if no module support, otherwise module load
may call it." */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2beaa134..1f43fa5 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -183,7 +183,7 @@ extern struct cred init_cred;
}
/* Attach to the init_task data structure for proper alignment */
-#define __init_task_data __attribute__((__section__(".data.init_task")))
+#define __init_task_data __attribute__((__section__(".data..init_task")))
#endif
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 5126cce..7135ebc 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -18,8 +18,8 @@
# define asmregparm
#endif
-#define __page_aligned_data __section(.data.page_aligned) __aligned(PAGE_SIZE)
-#define __page_aligned_bss __section(.bss.page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
/*
* For assembly routines.
@@ -27,8 +27,8 @@
* Note when using these that you must specify the appropriate
* alignment directives yourself
*/
-#define __PAGE_ALIGNED_DATA .section ".data.page_aligned", "aw"
-#define __PAGE_ALIGNED_BSS .section ".bss.page_aligned", "aw"
+#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw"
+#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw"
/*
* This is used by architectures to keep arguments on the stack
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index c00cc0c..24e5d01 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -397,7 +397,7 @@ struct xt_table_info {
* @stacksize jumps (number of user chains) can possibly be made.
*/
unsigned int stacksize;
- unsigned int *stackptr;
+ unsigned int __percpu *stackptr;
void ***jumpstack;
/* ipt_entry tables: one per CPU */
/* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 68567c0..ce2dc65 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -131,11 +131,11 @@
* Declaration/definition used for per-CPU variables that must be page aligned.
*/
#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
- DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") \
+ DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \
__aligned(PAGE_SIZE)
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
- DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") \
+ DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
__aligned(PAGE_SIZE)
/*
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 7cdfb4d..bf243fc 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -501,7 +501,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, 1, -1);
}
-extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
+extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 89fac6a..f885465 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -60,7 +60,7 @@
/*
* Must define these before including other files, inline functions need them
*/
-#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
+#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
#define LOCK_SECTION_START(extra) \
".subsection 1\n\t" \
diff --git a/include/net/sock.h b/include/net/sock.h
index ca241ea..731150d5 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1524,20 +1524,7 @@ extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
-{
- /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
- number of warnings when compiling with -W --ANK
- */
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
- (unsigned)sk->sk_rcvbuf)
- return -ENOMEM;
- skb_set_owner_r(skb, sk);
- skb_queue_tail(&sk->sk_error_queue, skb);
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skb->len);
- return 0;
-}
+extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
/*
* Recover an error report and clear atomically
OpenPOWER on IntegriCloud