summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-frv/mem-layout.h2
-rw-r--r--include/asm-frv/pgtable.h19
-rw-r--r--include/asm-frv/spr-regs.h14
-rw-r--r--include/asm-frv/system.h21
-rw-r--r--include/asm-mips/cacheflush.h18
-rw-r--r--include/asm-mips/mach-pb1x00/pb1200.h2
-rw-r--r--include/asm-x86/irqflags.h29
-rw-r--r--include/asm-x86/linkage.h35
-rw-r--r--include/asm-x86/nops.h20
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/hpet.h2
-rw-r--r--include/linux/if_tun.h39
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/libata.h25
-rw-r--r--include/linux/linkage.h20
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/spinlock.h3
-rw-r--r--include/linux/virtio.h5
-rw-r--r--include/net/inet_ecn.h2
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/ulpevent.h2
-rw-r--r--include/net/tcp.h2
23 files changed, 172 insertions, 96 deletions
diff --git a/include/asm-frv/mem-layout.h b/include/asm-frv/mem-layout.h
index 8353225..734a1d0 100644
--- a/include/asm-frv/mem-layout.h
+++ b/include/asm-frv/mem-layout.h
@@ -60,7 +60,7 @@
*/
#define BRK_BASE __UL(2 * 1024 * 1024 + PAGE_SIZE)
#define STACK_TOP __UL(2 * 1024 * 1024)
-#define STACK_TOP_MAX STACK_TOP
+#define STACK_TOP_MAX __UL(0xc0000000)
/* userspace process size */
#ifdef CONFIG_MMU
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 6c0682e..4e21904 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -507,13 +507,22 @@ static inline int pte_file(pte_t pte)
*/
static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
+ struct mm_struct *mm;
unsigned long ampr;
- pgd_t *pge = pgd_offset(current->mm, address);
- pud_t *pue = pud_offset(pge, address);
- pmd_t *pme = pmd_offset(pue, address);
- ampr = pme->ste[0] & 0xffffff00;
- ampr |= xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C | xAMPRx_V;
+ mm = current->mm;
+ if (mm) {
+ pgd_t *pge = pgd_offset(mm, address);
+ pud_t *pue = pud_offset(pge, address);
+ pmd_t *pme = pmd_offset(pue, address);
+
+ ampr = pme->ste[0] & 0xffffff00;
+ ampr |= xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C |
+ xAMPRx_V;
+ } else {
+ address = ULONG_MAX;
+ ampr = 0;
+ }
asm volatile("movgs %0,scr0\n"
"movgs %0,scr1\n"
diff --git a/include/asm-frv/spr-regs.h b/include/asm-frv/spr-regs.h
index c2a541e..01e6af5 100644
--- a/include/asm-frv/spr-regs.h
+++ b/include/asm-frv/spr-regs.h
@@ -99,9 +99,23 @@
#define TBR_TT_TRAP1 (0x81 << 4)
#define TBR_TT_TRAP2 (0x82 << 4)
#define TBR_TT_TRAP3 (0x83 << 4)
+#define TBR_TT_TRAP120 (0xf8 << 4)
+#define TBR_TT_TRAP121 (0xf9 << 4)
+#define TBR_TT_TRAP122 (0xfa << 4)
+#define TBR_TT_TRAP123 (0xfb << 4)
+#define TBR_TT_TRAP124 (0xfc << 4)
+#define TBR_TT_TRAP125 (0xfd << 4)
#define TBR_TT_TRAP126 (0xfe << 4)
#define TBR_TT_BREAK (0xff << 4)
+#define TBR_TT_ATOMIC_CMPXCHG32 TBR_TT_TRAP120
+#define TBR_TT_ATOMIC_XCHG32 TBR_TT_TRAP121
+#define TBR_TT_ATOMIC_XOR TBR_TT_TRAP122
+#define TBR_TT_ATOMIC_OR TBR_TT_TRAP123
+#define TBR_TT_ATOMIC_AND TBR_TT_TRAP124
+#define TBR_TT_ATOMIC_SUB TBR_TT_TRAP125
+#define TBR_TT_ATOMIC_ADD TBR_TT_TRAP126
+
#define __get_TBR() ({ unsigned long x; asm volatile("movsg tbr,%0" : "=r"(x)); x; })
/*
diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h
index 2c57f47..30a67a9 100644
--- a/include/asm-frv/system.h
+++ b/include/asm-frv/system.h
@@ -179,14 +179,23 @@ do { \
#define mb() asm volatile ("membar" : : :"memory")
#define rmb() asm volatile ("membar" : : :"memory")
#define wmb() asm volatile ("membar" : : :"memory")
-#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define read_barrier_depends() barrier()
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-
-#define read_barrier_depends() do {} while(0)
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
+#define set_mb(var, value) \
+ do { xchg(&var, (value)); } while (0)
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() do {} while(0)
+#define set_mb(var, value) \
+ do { var = (value); barrier(); } while (0)
+#endif
#define HARD_RESET_NOW() \
do { \
diff --git a/include/asm-mips/cacheflush.h b/include/asm-mips/cacheflush.h
index 01e7ead..d5c0f2f 100644
--- a/include/asm-mips/cacheflush.h
+++ b/include/asm-mips/cacheflush.h
@@ -63,8 +63,22 @@ static inline void flush_icache_page(struct vm_area_struct *vma,
}
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
-#define flush_cache_vmap(start, end) flush_cache_all()
-#define flush_cache_vunmap(start, end) flush_cache_all()
+
+extern void (*__flush_cache_vmap)(void);
+
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ if (cpu_has_dc_aliases)
+ __flush_cache_vmap();
+}
+
+extern void (*__flush_cache_vunmap)(void);
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ if (cpu_has_dc_aliases)
+ __flush_cache_vunmap();
+}
extern void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
diff --git a/include/asm-mips/mach-pb1x00/pb1200.h b/include/asm-mips/mach-pb1x00/pb1200.h
index ed5fd73..72213e3 100644
--- a/include/asm-mips/mach-pb1x00/pb1200.h
+++ b/include/asm-mips/mach-pb1x00/pb1200.h
@@ -245,7 +245,7 @@ enum external_pb1200_ints {
PB1200_SD1_INSERT_INT,
PB1200_SD1_EJECT_INT,
- PB1200_INT_END (PB1200_INT_BEGIN + 15)
+ PB1200_INT_END = PB1200_INT_BEGIN + 15
};
/* For drivers/pcmcia/au1000_db1x00.c */
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h
index 92021c1..0e22924 100644
--- a/include/asm-x86/irqflags.h
+++ b/include/asm-x86/irqflags.h
@@ -70,6 +70,26 @@ static inline void raw_local_irq_restore(unsigned long flags)
native_restore_fl(flags);
}
+#ifdef CONFIG_X86_VSMP
+
+/*
+ * Interrupt control for the VSMP architecture:
+ */
+
+static inline void raw_local_irq_disable(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+ raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
+}
+
+static inline void raw_local_irq_enable(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+ raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
+}
+
+#else
+
static inline void raw_local_irq_disable(void)
{
native_irq_disable();
@@ -80,6 +100,8 @@ static inline void raw_local_irq_enable(void)
native_irq_enable();
}
+#endif
+
/*
* Used in the idle loop; sti takes one instruction cycle
* to complete:
@@ -137,10 +159,17 @@ static inline unsigned long __raw_local_irq_save(void)
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
+#ifdef CONFIG_X86_VSMP
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
+}
+#else
static inline int raw_irqs_disabled_flags(unsigned long flags)
{
return !(flags & X86_EFLAGS_IF);
}
+#endif
static inline int raw_irqs_disabled(void)
{
diff --git a/include/asm-x86/linkage.h b/include/asm-x86/linkage.h
index 31739c7..c048353 100644
--- a/include/asm-x86/linkage.h
+++ b/include/asm-x86/linkage.h
@@ -8,12 +8,45 @@
#ifdef CONFIG_X86_32
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
-#define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret))
/*
* For 32-bit UML - mark functions implemented in assembly that use
* regparm input parameters:
*/
#define asmregparm __attribute__((regparm(3)))
+
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ *
+ * NOTE! On x86-64, all the arguments are in registers, so this
+ * only matters on a 32-bit kernel.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+ __asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+ __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+ __asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+ __asmlinkage_protect_n(ret, "g" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+ __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+ __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+ __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
+ "g" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+ __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
+ "g" (arg4), "g" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+ __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
+ "g" (arg4), "g" (arg5), "g" (arg6))
+
#endif
#ifdef CONFIG_X86_ALIGNMENT_16
diff --git a/include/asm-x86/nops.h b/include/asm-x86/nops.h
index e3b2bce..b3930ae 100644
--- a/include/asm-x86/nops.h
+++ b/include/asm-x86/nops.h
@@ -73,16 +73,7 @@
#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
-#if defined(CONFIG_MK8)
-#define ASM_NOP1 K8_NOP1
-#define ASM_NOP2 K8_NOP2
-#define ASM_NOP3 K8_NOP3
-#define ASM_NOP4 K8_NOP4
-#define ASM_NOP5 K8_NOP5
-#define ASM_NOP6 K8_NOP6
-#define ASM_NOP7 K8_NOP7
-#define ASM_NOP8 K8_NOP8
-#elif defined(CONFIG_MK7)
+#if defined(CONFIG_MK7)
#define ASM_NOP1 K7_NOP1
#define ASM_NOP2 K7_NOP2
#define ASM_NOP3 K7_NOP3
@@ -100,6 +91,15 @@
#define ASM_NOP6 P6_NOP6
#define ASM_NOP7 P6_NOP7
#define ASM_NOP8 P6_NOP8
+#elif defined(CONFIG_X86_64)
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
#else
#define ASM_NOP1 GENERIC_NOP1
#define ASM_NOP2 GENERIC_NOP2
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 9cdd12a..cedbbd8 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -86,6 +86,7 @@ header-y += if_plip.h
header-y += if_ppp.h
header-y += if_slip.h
header-y += if_strip.h
+header-y += if_tun.h
header-y += if_tunnel.h
header-y += in6.h
header-y += in_route.h
@@ -229,7 +230,6 @@ unifdef-y += if_link.h
unifdef-y += if_pppol2tp.h
unifdef-y += if_pppox.h
unifdef-y += if_tr.h
-unifdef-y += if_tun.h
unifdef-y += if_vlan.h
unifdef-y += if_wanpipe.h
unifdef-y += igmp.h
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 028ba3b..a6a6035 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -256,6 +256,7 @@ struct cgroup_subsys {
void (*bind)(struct cgroup_subsys *ss, struct cgroup *root);
int subsys_id;
int active;
+ int disabled;
int early_init;
#define MAX_CGROUP_TYPE_NAMELEN 32
const char *name;
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 9cd94bf..2dc29ce 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -64,7 +64,7 @@ struct hpet {
*/
#define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL)
-#define Tn_INT_ROUTE_CAP_SHIFT (32UL)
+#define Tn_INI_ROUTE_CAP_SHIFT (32UL)
#define Tn_FSB_INT_DELCAP_MASK (0x8000UL)
#define Tn_FSB_INT_DELCAP_SHIFT (15)
#define Tn_FSB_EN_CNF_MASK (0x4000UL)
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 72f1c5f..8c71fe2 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -18,47 +18,8 @@
#ifndef __IF_TUN_H
#define __IF_TUN_H
-/* Uncomment to enable debugging */
-/* #define TUN_DEBUG 1 */
-
#include <linux/types.h>
-#ifdef __KERNEL__
-
-#ifdef TUN_DEBUG
-#define DBG if(tun->debug)printk
-#define DBG1 if(debug==2)printk
-#else
-#define DBG( a... )
-#define DBG1( a... )
-#endif
-
-struct tun_struct {
- struct list_head list;
- unsigned long flags;
- int attached;
- uid_t owner;
- gid_t group;
-
- wait_queue_head_t read_wait;
- struct sk_buff_head readq;
-
- struct net_device *dev;
-
- struct fasync_struct *fasync;
-
- unsigned long if_flags;
- u8 dev_addr[ETH_ALEN];
- u32 chr_filter[2];
- u32 net_filter[2];
-
-#ifdef TUN_DEBUG
- int debug;
-#endif
-};
-
-#endif /* __KERNEL__ */
-
/* Read queue size */
#define TUN_READQ_SIZE 500
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 4aaefc3..134c8e5 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -53,7 +53,7 @@ struct ipv6_opt_hdr {
/*
* TLV encoded option data follows.
*/
-};
+} __attribute__ ((packed)); /* required for some archs */
#define ipv6_destopt_hdr ipv6_opt_hdr
#define ipv6_hopopt_hdr ipv6_opt_hdr
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b064bfe..37ee881 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -350,7 +350,8 @@ enum {
ATAPI_READ = 0, /* READs */
ATAPI_WRITE = 1, /* WRITEs */
ATAPI_READ_CD = 2, /* READ CD [MSF] */
- ATAPI_MISC = 3, /* the rest */
+ ATAPI_PASS_THRU = 3, /* SAT pass-thru */
+ ATAPI_MISC = 4, /* the rest */
};
enum ata_xfer_mask {
@@ -849,6 +850,7 @@ extern unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
*/
extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
+extern int atapi_cmd_type(u8 opcode);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
u8 pmp, int is_cmd, u8 *fis);
extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
@@ -1379,27 +1381,6 @@ static inline int ata_try_flush_cache(const struct ata_device *dev)
ata_id_has_flush_ext(dev->id);
}
-static inline int atapi_cmd_type(u8 opcode)
-{
- switch (opcode) {
- case GPCMD_READ_10:
- case GPCMD_READ_12:
- return ATAPI_READ;
-
- case GPCMD_WRITE_10:
- case GPCMD_WRITE_12:
- case GPCMD_WRITE_AND_VERIFY_10:
- return ATAPI_WRITE;
-
- case GPCMD_READ_CD:
- case GPCMD_READ_CD_MSF:
- return ATAPI_READ_CD;
-
- default:
- return ATAPI_MISC;
- }
-}
-
static inline unsigned int ac_err_mask(u8 status)
{
if (status & (ATA_BUSY | ATA_DRQ))
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 0592936..2119610 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -17,8 +17,24 @@
# define asmregparm
#endif
-#ifndef prevent_tail_call
-# define prevent_tail_call(ret) do { } while (0)
+/*
+ * This is used by architectures to keep arguments on the stack
+ * untouched by the compiler by keeping them live until the end.
+ * The argument stack may be owned by the assembly-language
+ * caller, not the callee, and gcc doesn't always understand
+ * that.
+ *
+ * We have the return value, and a maximum of six arguments.
+ *
+ * This should always be followed by a "return ret" for the
+ * protection to work (ie no more work that the compiler might
+ * end up needing stack temporaries for).
+ */
+/* Assembly files may be compiled with -traditional .. */
+#ifndef __ASSEMBLY__
+#ifndef asmlinkage_protect
+# define asmlinkage_protect(n, ret, args...) do { } while (0)
+#endif
#endif
#ifndef __ALIGN
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 29dd558..b2f05c2 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -175,7 +175,7 @@ static inline void pnp_set_card_drvdata(struct pnp_card_link *pcard, void *data)
struct pnp_dev {
struct device dev; /* Driver Model device interface */
u64 dma_mask;
- unsigned char number; /* used as an index, must be unique */
+ unsigned int number; /* used as an index, must be unique */
int status;
struct list_head global_list; /* node in global list of devices */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 576a5f7..1129ee0 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -341,6 +341,9 @@ static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2,
* atomic_dec_and_lock - lock on reaching reference count zero
* @atomic: the atomic counter
* @lock: the spinlock in question
+ *
+ * Decrements @atomic by 1. If the result is 0, returns true and locks
+ * @lock. Returns false for all other cases.
*/
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 12c18ac..e7d1084 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -41,6 +41,8 @@ struct virtqueue
* Returns NULL or the "data" token handed to add_buf.
* @disable_cb: disable callbacks
* vq: the struct virtqueue we're talking about.
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
* @enable_cb: restart callbacks after disable_cb.
* vq: the struct virtqueue we're talking about.
* This re-enables callbacks; it returns "false" if there are pending
@@ -48,7 +50,8 @@ struct virtqueue
* checking for more work, and enabling callbacks.
*
* Locking rules are straightforward: the driver is responsible for
- * locking. No two operations may be invoked simultaneously.
+ * locking. No two operations may be invoked simultaneously, with the exception
+ * of @disable_cb.
*
* All operations can be called in any context.
*/
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index ba33db0..7040a78 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -47,7 +47,7 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
} while (0)
#define IP6_ECN_flow_xmit(sk, label) do { \
- if (INET_ECN_is_capable(inet_sk(sk)->tos)) \
+ if (INET_ECN_is_capable(inet6_sk(sk)->tclass)) \
(label) |= htonl(INET_ECN_ECT_0 << 20); \
} while (0)
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 10ae2da..35b1e83 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -104,6 +104,7 @@ typedef enum {
SCTP_CMD_ADAPTATION_IND, /* generate and send adaptation event */
SCTP_CMD_ASSOC_SHKEY, /* generate the association shared keys */
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
+ SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_LAST
} sctp_verb_t;
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 9bcfc12..7ea12e8 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -67,7 +67,7 @@ struct sctp_ulpevent {
};
/* Retrieve the skb this event sits inside of. */
-static inline struct sk_buff *sctp_event2skb(struct sctp_ulpevent *ev)
+static inline struct sk_buff *sctp_event2skb(const struct sctp_ulpevent *ev)
{
return container_of((void *)ev, struct sk_buff, cb);
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7de4ea3..4fd3eb2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -752,6 +752,8 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
}
+extern int tcp_limit_reno_sacked(struct tcp_sock *tp);
+
/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
* The exception is rate halving phase, when cwnd is decreasing towards
* ssthresh.
OpenPOWER on IntegriCloud