summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ia64/ia32/sys_ia32.c28
-rw-r--r--arch/ia64/kernel/perfmon.c11
-rw-r--r--arch/ia64/kernel/uncached.c1
-rw-r--r--arch/ia64/sn/include/xtalk/hubdev.h9
-rw-r--r--arch/ia64/sn/kernel/io_init.c54
-rw-r--r--arch/ia64/sn/kernel/mca.c7
-rw-r--r--arch/ia64/sn/kernel/xp_main.c17
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c34
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c17
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c12
-rw-r--r--arch/powerpc/platforms/powermac/feature.c6
-rw-r--r--arch/ppc/syslib/mv64x60.c4
-rw-r--r--arch/sparc/kernel/entry.S58
-rw-r--r--arch/sparc/kernel/rtrap.S9
-rw-r--r--arch/sparc/kernel/signal.c117
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c2
-rw-r--r--arch/sparc/kernel/systbls.S10
-rw-r--r--arch/sparc/math-emu/math.c10
-rw-r--r--arch/sparc64/kernel/entry.S25
-rw-r--r--arch/sparc64/kernel/rtrap.S33
-rw-r--r--arch/sparc64/kernel/signal.c151
-rw-r--r--arch/sparc64/kernel/signal32.c122
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c4
-rw-r--r--arch/sparc64/kernel/sys32.S1
-rw-r--r--arch/sparc64/kernel/systbls.S23
-rw-r--r--arch/sparc64/solaris/entry64.S2
-rw-r--r--block/elevator.c45
-rw-r--r--block/ll_rw_blk.c7
-rw-r--r--drivers/net/Kconfig28
-rw-r--r--drivers/net/acenic.c4
-rw-r--r--drivers/net/b44.c5
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/mv643xx_eth.c108
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sungem.c55
-rw-r--r--drivers/net/wireless/hostap/Kconfig2
-rw-r--r--drivers/net/wireless/ipw2100.c49
-rw-r--r--drivers/net/wireless/ipw2200.c7
-rw-r--r--drivers/net/wireless/orinoco_cs.c4
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/scsi/libata-scsi.c27
-rw-r--r--drivers/serial/sn_console.c129
-rw-r--r--fs/bio.c1
-rw-r--r--fs/compat.c4
-rw-r--r--include/asm-ia64/semaphore.h8
-rw-r--r--include/asm-ia64/sn/xp.h3
-rw-r--r--include/asm-ia64/sn/xpc.h9
-rw-r--r--include/asm-ia64/topology.h4
-rw-r--r--include/asm-sparc/oplib.h2
-rw-r--r--include/asm-sparc/thread_info.h5
-rw-r--r--include/asm-sparc/unistd.h22
-rw-r--r--include/asm-sparc64/spinlock.h24
-rw-r--r--include/asm-sparc64/thread_info.h6
-rw-r--r--include/asm-sparc64/unistd.h23
-rw-r--r--include/linux/blkdev.h6
-rw-r--r--include/linux/netfilter/x_tables.h16
-rw-r--r--include/net/ieee80211.h4
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/structs.h89
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--kernel/rcutorture.c10
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/time.c2
-rw-r--r--kernel/user.c25
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/ieee80211/ieee80211_rx.c26
-rw-r--r--net/ieee80211/ieee80211_wx.c12
-rw-r--r--net/ipv4/igmp.c1
-rw-r--r--net/ipv4/tcp_htcp.c1
-rw-r--r--net/ipv6/mcast.c56
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/packet/af_packet.c16
-rw-r--r--net/sctp/input.c75
-rw-r--r--net/sctp/inqueue.c4
-rw-r--r--net/sctp/proc.c32
-rw-r--r--net/sctp/sm_make_chunk.c16
-rw-r--r--net/sctp/sm_sideeffect.c4
-rw-r--r--net/sctp/sm_statefuns.c10
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sctp/sysctl.c7
-rw-r--r--net/sctp/transport.c2
-rw-r--r--security/seclvl.c4
84 files changed, 917 insertions, 897 deletions
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 3945d37..70dba1f 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -52,9 +52,9 @@
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/mman.h>
+#include <linux/mutex.h>
#include <asm/intrinsics.h>
-#include <asm/semaphore.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -86,7 +86,7 @@
* while doing so.
*/
/* XXX make per-mm: */
-static DECLARE_MUTEX(ia32_mmap_sem);
+static DEFINE_MUTEX(ia32_mmap_mutex);
asmlinkage long
sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
@@ -895,11 +895,11 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot
prot = get_prot32(prot);
#if PAGE_SHIFT > IA32_PAGE_SHIFT
- down(&ia32_mmap_sem);
+ mutex_lock(&ia32_mmap_mutex);
{
addr = emulate_mmap(file, addr, len, prot, flags, offset);
}
- up(&ia32_mmap_sem);
+ mutex_unlock(&ia32_mmap_mutex);
#else
down_write(&current->mm->mmap_sem);
{
@@ -1000,11 +1000,9 @@ sys32_munmap (unsigned int start, unsigned int len)
if (start >= end)
return 0;
- down(&ia32_mmap_sem);
- {
- ret = sys_munmap(start, end - start);
- }
- up(&ia32_mmap_sem);
+ mutex_lock(&ia32_mmap_mutex);
+ ret = sys_munmap(start, end - start);
+ mutex_unlock(&ia32_mmap_mutex);
#endif
return ret;
}
@@ -1056,7 +1054,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot)
if (retval < 0)
return retval;
- down(&ia32_mmap_sem);
+ mutex_lock(&ia32_mmap_mutex);
{
if (offset_in_page(start)) {
/* start address is 4KB aligned but not page aligned. */
@@ -1080,7 +1078,7 @@ sys32_mprotect (unsigned int start, unsigned int len, int prot)
retval = sys_mprotect(start, end - start, prot);
}
out:
- up(&ia32_mmap_sem);
+ mutex_unlock(&ia32_mmap_mutex);
return retval;
#endif
}
@@ -1124,11 +1122,9 @@ sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
old_len = PAGE_ALIGN(old_end) - addr;
new_len = PAGE_ALIGN(new_end) - addr;
- down(&ia32_mmap_sem);
- {
- ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
- }
- up(&ia32_mmap_sem);
+ mutex_lock(&ia32_mmap_mutex);
+ ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
+ mutex_unlock(&ia32_mmap_mutex);
if ((ret >= 0) && (old_len < new_len)) {
/* mremap expanded successfully */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 2ea4b39..9c5194b 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -40,6 +40,7 @@
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/rcupdate.h>
+#include <linux/completion.h>
#include <asm/errno.h>
#include <asm/intrinsics.h>
@@ -286,7 +287,7 @@ typedef struct pfm_context {
unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
- struct semaphore ctx_restart_sem; /* use for blocking notification mode */
+ struct completion ctx_restart_done; /* use for blocking notification mode */
unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
@@ -1991,7 +1992,7 @@ pfm_close(struct inode *inode, struct file *filp)
/*
* force task to wake up from MASKED state
*/
- up(&ctx->ctx_restart_sem);
+ complete(&ctx->ctx_restart_done);
DPRINT(("waking up ctx_state=%d\n", state));
@@ -2706,7 +2707,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
/*
* init restart semaphore to locked
*/
- sema_init(&ctx->ctx_restart_sem, 0);
+ init_completion(&ctx->ctx_restart_done);
/*
* activation is used in SMP only
@@ -3687,7 +3688,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
*/
if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
DPRINT(("unblocking [%d] \n", task->pid));
- up(&ctx->ctx_restart_sem);
+ complete(&ctx->ctx_restart_done);
} else {
DPRINT(("[%d] armed exit trap\n", task->pid));
@@ -5089,7 +5090,7 @@ pfm_handle_work(void)
* may go through without blocking on SMP systems
* if restart has been received already by the time we call down()
*/
- ret = down_interruptible(&ctx->ctx_restart_sem);
+ ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
DPRINT(("after block sleeping ret=%d\n", ret));
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index b631cf8..fcd2bad 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -210,6 +210,7 @@ uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
+ touch_softlockup_watchdog();
memset((char *)start, 0, length);
node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET);
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h
index 7c88e9a..8182583 100644
--- a/arch/ia64/sn/include/xtalk/hubdev.h
+++ b/arch/ia64/sn/include/xtalk/hubdev.h
@@ -51,6 +51,15 @@ struct sn_flush_device_kernel {
struct sn_flush_device_common *common;
};
+/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included
+ * for older official PROMs to function on the new kernel base. This struct
+ * will be removed when the next official PROM release occurs. */
+
+struct sn_flush_device_war {
+ struct sn_flush_device_common common;
+ u32 filler; /* older PROMs expect the default size of a spinlock_t */
+};
+
/*
* **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
*/
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 233d551..00700f7 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -165,8 +165,45 @@ sn_pcidev_info_get(struct pci_dev *dev)
return NULL;
}
+/* Older PROM flush WAR
+ *
+ * 01/16/06 -- This war will be in place until a new official PROM is released.
+ * Additionally note that the struct sn_flush_device_war also has to be
+ * removed from arch/ia64/sn/include/xtalk/hubdev.h
+ */
+static u8 war_implemented = 0;
+
+static void sn_device_fixup_war(u64 nasid, u64 widget, int device,
+ struct sn_flush_device_common *common)
+{
+ struct sn_flush_device_war *war_list;
+ struct sn_flush_device_war *dev_entry;
+ struct ia64_sal_retval isrv = {0,0,0,0};
+
+ if (!war_implemented) {
+ printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
+ "PROM flush WAR\n");
+ war_implemented = 1;
+ }
+
+ war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
+ if (!war_list)
+ BUG();
+
+ SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
+ nasid, widget, __pa(war_list), 0, 0, 0 ,0);
+ if (isrv.status)
+ panic("sn_device_fixup_war failed: %s\n",
+ ia64_sal_strerror(isrv.status));
+
+ dev_entry = war_list + device;
+ memcpy(common,dev_entry, sizeof(*common));
+
+ kfree(war_list);
+}
+
/*
- * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
+ * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
* each node in the system.
*/
static void sn_fixup_ionodes(void)
@@ -246,8 +283,19 @@ static void sn_fixup_ionodes(void)
widget,
device,
(u64)(dev_entry->common));
- if (status)
- BUG();
+ if (status) {
+ if (sn_sal_rev() < 0x0450) {
+ /* shortlived WAR for older
+ * PROM images
+ */
+ sn_device_fixup_war(nasid,
+ widget,
+ device,
+ dev_entry->common);
+ }
+ else
+ BUG();
+ }
spin_lock_init(&dev_entry->sfdl_flush_lock);
}
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
index 6546db6..9ab684d 100644
--- a/arch/ia64/sn/kernel/mca.c
+++ b/arch/ia64/sn/kernel/mca.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
+#include <linux/mutex.h>
#include <asm/mca.h>
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
@@ -27,7 +28,7 @@ void sn_init_cpei_timer(void);
/* Printing oemdata from mca uses data that is not passed through SAL, it is
* global. Only one user at a time.
*/
-static DECLARE_MUTEX(sn_oemdata_mutex);
+static DEFINE_MUTEX(sn_oemdata_mutex);
static u8 **sn_oemdata;
static u64 *sn_oemdata_size, sn_oemdata_bufsize;
@@ -89,7 +90,7 @@ static int
sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
u64 * oemdata_size)
{
- down(&sn_oemdata_mutex);
+ mutex_lock(&sn_oemdata_mutex);
sn_oemdata = oemdata;
sn_oemdata_size = oemdata_size;
sn_oemdata_bufsize = 0;
@@ -107,7 +108,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
*sn_oemdata_size = 0;
ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
}
- up(&sn_oemdata_mutex);
+ mutex_unlock(&sn_oemdata_mutex);
return 0;
}
diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c
index 3be52a3..b7ea466 100644
--- a/arch/ia64/sn/kernel/xp_main.c
+++ b/arch/ia64/sn/kernel/xp_main.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/xp.h>
@@ -136,13 +137,13 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
registration = &xpc_registrations[ch_number];
- if (down_interruptible(&registration->sema) != 0) {
+ if (mutex_lock_interruptible(&registration->mutex) != 0) {
return xpcInterrupted;
}
/* if XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func != NULL) {
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
return xpcAlreadyRegistered;
}
@@ -154,7 +155,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
registration->key = key;
registration->func = func;
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
xpc_interface.connect(ch_number);
@@ -190,11 +191,11 @@ xpc_disconnect(int ch_number)
* figured XPC's users will just turn around and call xpc_disconnect()
* again anyways, so we might as well wait, if need be.
*/
- down(&registration->sema);
+ mutex_lock(&registration->mutex);
/* if !XPC_CHANNEL_REGISTERED(ch_number) */
if (registration->func == NULL) {
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
return;
}
@@ -208,7 +209,7 @@ xpc_disconnect(int ch_number)
xpc_interface.disconnect(ch_number);
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
return;
}
@@ -250,9 +251,9 @@ xp_init(void)
xp_nofault_PIOR_target = SH1_IPI_ACCESS;
}
- /* initialize the connection registration semaphores */
+ /* initialize the connection registration mutex */
for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
- sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */
+ mutex_init(&xpc_registrations[ch_number].mutex);
}
return 0;
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 0c0a689..8d950c7 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -22,6 +22,8 @@
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
#include <asm/sn/bte.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/xpc.h>
@@ -56,8 +58,8 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid)
atomic_set(&ch->n_to_notify, 0);
spin_lock_init(&ch->lock);
- sema_init(&ch->msg_to_pull_sema, 1); /* mutex */
- sema_init(&ch->wdisconnect_sema, 0); /* event wait */
+ mutex_init(&ch->msg_to_pull_mutex);
+ init_completion(&ch->wdisconnect_wait);
atomic_set(&ch->n_on_msg_allocate_wq, 0);
init_waitqueue_head(&ch->msg_allocate_wq);
@@ -534,7 +536,6 @@ static enum xpc_retval
xpc_allocate_msgqueues(struct xpc_channel *ch)
{
unsigned long irq_flags;
- int i;
enum xpc_retval ret;
@@ -552,11 +553,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch)
return ret;
}
- for (i = 0; i < ch->local_nentries; i++) {
- /* use a semaphore as an event wait queue */
- sema_init(&ch->notify_queue[i].sema, 0);
- }
-
spin_lock_irqsave(&ch->lock, irq_flags);
ch->flags |= XPC_C_SETUP;
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -799,10 +795,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
}
if (ch->flags & XPC_C_WDISCONNECT) {
- spin_unlock_irqrestore(&ch->lock, *irq_flags);
- up(&ch->wdisconnect_sema);
- spin_lock_irqsave(&ch->lock, *irq_flags);
-
+ /* we won't lose the CPU since we're holding ch->lock */
+ complete(&ch->wdisconnect_wait);
} else if (ch->delayed_IPI_flags) {
if (part->act_state != XPC_P_DEACTIVATING) {
/* time to take action on any delayed IPI flags */
@@ -1092,12 +1086,12 @@ xpc_connect_channel(struct xpc_channel *ch)
struct xpc_registration *registration = &xpc_registrations[ch->number];
- if (down_trylock(&registration->sema) != 0) {
+ if (mutex_trylock(&registration->mutex) == 0) {
return xpcRetry;
}
if (!XPC_CHANNEL_REGISTERED(ch->number)) {
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
return xpcUnregistered;
}
@@ -1108,7 +1102,7 @@ xpc_connect_channel(struct xpc_channel *ch)
if (ch->flags & XPC_C_DISCONNECTING) {
spin_unlock_irqrestore(&ch->lock, irq_flags);
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
return ch->reason;
}
@@ -1140,7 +1134,7 @@ xpc_connect_channel(struct xpc_channel *ch)
* channel lock be locked and will unlock and relock
* the channel lock as needed.
*/
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes,
&irq_flags);
spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -1155,7 +1149,7 @@ xpc_connect_channel(struct xpc_channel *ch)
atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
}
- up(&registration->sema);
+ mutex_unlock(&registration->mutex);
/* initiate the connection */
@@ -2089,7 +2083,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
enum xpc_retval ret;
- if (down_interruptible(&ch->msg_to_pull_sema) != 0) {
+ if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) {
/* we were interrupted by a signal */
return NULL;
}
@@ -2125,7 +2119,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
XPC_DEACTIVATE_PARTITION(part, ret);
- up(&ch->msg_to_pull_sema);
+ mutex_unlock(&ch->msg_to_pull_mutex);
return NULL;
}
@@ -2134,7 +2128,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get)
ch->next_msg_to_pull += nmsgs;
}
- up(&ch->msg_to_pull_sema);
+ mutex_unlock(&ch->msg_to_pull_mutex);
/* return the message we were looking for */
msg_offset = (get % ch->remote_nentries) * ch->msg_size;
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 8930586e..c75f8ae 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -55,6 +55,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/reboot.h>
+#include <linux/completion.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#include <asm/kdebug.h>
@@ -177,10 +178,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
static unsigned long xpc_hb_check_timeout;
/* notification that the xpc_hb_checker thread has exited */
-static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited);
+static DECLARE_COMPLETION(xpc_hb_checker_exited);
/* notification that the xpc_discovery thread has exited */
-static DECLARE_MUTEX_LOCKED(xpc_discovery_exited);
+static DECLARE_COMPLETION(xpc_discovery_exited);
static struct timer_list xpc_hb_timer;
@@ -321,7 +322,7 @@ xpc_hb_checker(void *ignore)
/* mark this thread as having exited */
- up(&xpc_hb_checker_exited);
+ complete(&xpc_hb_checker_exited);
return 0;
}
@@ -341,7 +342,7 @@ xpc_initiate_discovery(void *ignore)
dev_dbg(xpc_part, "discovery thread is exiting\n");
/* mark this thread as having exited */
- up(&xpc_discovery_exited);
+ complete(&xpc_discovery_exited);
return 0;
}
@@ -893,7 +894,7 @@ xpc_disconnect_wait(int ch_number)
continue;
}
- (void) down(&ch->wdisconnect_sema);
+ wait_for_completion(&ch->wdisconnect_wait);
spin_lock_irqsave(&ch->lock, irq_flags);
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
@@ -946,10 +947,10 @@ xpc_do_exit(enum xpc_retval reason)
free_irq(SGI_XPC_ACTIVATE, NULL);
/* wait for the discovery thread to exit */
- down(&xpc_discovery_exited);
+ wait_for_completion(&xpc_discovery_exited);
/* wait for the heartbeat checker thread to exit */
- down(&xpc_hb_checker_exited);
+ wait_for_completion(&xpc_hb_checker_exited);
/* sleep for a 1/3 of a second or so */
@@ -1367,7 +1368,7 @@ xpc_init(void)
dev_err(xpc_part, "failed while forking discovery thread\n");
/* mark this new thread as a non-starter */
- up(&xpc_discovery_exited);
+ complete(&xpc_discovery_exited);
xpc_do_exit(xpcUnloading);
return -EBUSY;
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 77a1262..2fac270 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -24,13 +24,15 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
+ u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
+ segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
- SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, (u64) busnum,
- (u64) device, (u64) resp, 0, 0, 0, 0);
+ SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
+ busnum, (u64) device, (u64) resp, 0, 0, 0);
return (int)ret_stuff.v0;
}
@@ -41,14 +43,16 @@ sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
+ u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
+ segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE,
- (u64) busnum, (u64) device, (u64) action,
- (u64) resp, 0, 0, 0);
+ segment, busnum, (u64) device, (u64) action,
+ (u64) resp, 0, 0);
return (int)ret_stuff.v0;
}
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 558dd06..2296f3d 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -910,16 +910,18 @@ core99_gmac_phy_reset(struct device_node *node, long param, long value)
macio->type != macio_intrepid)
return -ENODEV;
+ printk(KERN_DEBUG "Hard reset of PHY chip ...\n");
+
LOCK(flags);
MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, KEYLARGO_GPIO_OUTPUT_ENABLE);
(void)MACIO_IN8(KL_GPIO_ETH_PHY_RESET);
UNLOCK(flags);
- mdelay(10);
+ msleep(10);
LOCK(flags);
MACIO_OUT8(KL_GPIO_ETH_PHY_RESET, /*KEYLARGO_GPIO_OUTPUT_ENABLE | */
KEYLARGO_GPIO_OUTOUT_DATA);
UNLOCK(flags);
- mdelay(10);
+ msleep(10);
return 0;
}
diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c
index 94ea346..1f01b7e 100644
--- a/arch/ppc/syslib/mv64x60.c
+++ b/arch/ppc/syslib/mv64x60.c
@@ -313,7 +313,7 @@ static struct platform_device mpsc1_device = {
};
#endif
-#ifdef CONFIG_MV643XX_ETH
+#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
static struct resource mv64x60_eth_shared_resources[] = {
[0] = {
.name = "ethernet shared base",
@@ -456,7 +456,7 @@ static struct platform_device *mv64x60_pd_devs[] __initdata = {
&mpsc0_device,
&mpsc1_device,
#endif
-#ifdef CONFIG_MV643XX_ETH
+#if defined(CONFIG_MV643XX_ETH) || defined(CONFIG_MV643XX_ETH_MODULE)
&mv64x60_eth_shared_device,
#endif
#ifdef CONFIG_MV643XX_ETH_0
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 03ecb4e..267ec8f 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -38,7 +38,7 @@
#define curptr g6
-#define NR_SYSCALLS 284 /* Each OS is different... */
+#define NR_SYSCALLS 299 /* Each OS is different... */
/* These are just handy. */
#define _SV save %sp, -STACKFRAME_SZ, %sp
@@ -1277,62 +1277,6 @@ sys_sigstack:
mov %l5, %o7
.align 4
- .globl sys_sigpause
-sys_sigpause:
- /* Note: %o0 already has correct value... */
- call do_sigpause
- add %sp, STACKFRAME_SZ, %o1
-
- ld [%curptr + TI_FLAGS], %l5
- andcc %l5, _TIF_SYSCALL_TRACE, %g0
- be 1f
- nop
-
- call syscall_trace
- nop
-
-1:
- /* We are returning to a signal handler. */
- RESTORE_ALL
-
- .align 4
- .globl sys_sigsuspend
-sys_sigsuspend:
- call do_sigsuspend
- add %sp, STACKFRAME_SZ, %o0
-
- ld [%curptr + TI_FLAGS], %l5
- andcc %l5, _TIF_SYSCALL_TRACE, %g0
- be 1f
- nop
-
- call syscall_trace
- nop
-
-1:
- /* We are returning to a signal handler. */
- RESTORE_ALL
-
- .align 4
- .globl sys_rt_sigsuspend
-sys_rt_sigsuspend:
- /* Note: %o0, %o1 already have correct value... */
- call do_rt_sigsuspend
- add %sp, STACKFRAME_SZ, %o2
-
- ld [%curptr + TI_FLAGS], %l5
- andcc %l5, _TIF_SYSCALL_TRACE, %g0
- be 1f
- nop
-
- call syscall_trace
- nop
-
-1:
- /* We are returning to a signal handler. */
- RESTORE_ALL
-
- .align 4
.globl sys_sigreturn
sys_sigreturn:
call do_sigreturn
diff --git a/arch/sparc/kernel/rtrap.S b/arch/sparc/kernel/rtrap.S
index f7460d8..77ca6fd 100644
--- a/arch/sparc/kernel/rtrap.S
+++ b/arch/sparc/kernel/rtrap.S
@@ -68,15 +68,14 @@ ret_trap_lockless_ipi:
ld [%curptr + TI_FLAGS], %g2
signal_p:
- andcc %g2, (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING), %g0
+ andcc %g2, (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %g0
bz,a ret_trap_continue
ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
- clr %o0
- mov %l5, %o2
- mov %l6, %o3
+ mov %l5, %o1
+ mov %l6, %o2
call do_signal
- add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
+ add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr
/* Fall through. */
ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr
diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c
index 5f34d7d..0748d81 100644
--- a/arch/sparc/kernel/signal.c
+++ b/arch/sparc/kernel/signal.c
@@ -35,9 +35,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
void *fpqueue, unsigned long *fpqdepth);
extern void fpload(unsigned long *fpregs, unsigned long *fsr);
-asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
- unsigned long orig_o0, int restart_syscall);
-
/* Signal frames: the original one (compatible with SunOS):
*
* Set up a signal frame... Make the stack look the way SunOS
@@ -95,98 +92,30 @@ struct rt_signal_frame {
#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame) + 7) & (~7)))
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
-/*
- * atomically swap in the new signal mask, and wait for a signal.
- * This is really tricky on the Sparc, watch out...
- */
-asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
+static int _sigpause_common(old_sigset_t set)
{
- sigset_t saveset;
-
set &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
+ current->saved_sigmask = current->blocked;
siginitset(&current->blocked, set);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- regs->pc = regs->npc;
- regs->npc += 4;
-
- /* Condition codes and return value where set here for sigpause,
- * and so got used by setup_frame, which again causes sigreturn()
- * to return -EINTR.
- */
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- /*
- * Return -EINTR and set condition code here,
- * so the interrupted system call actually returns
- * these.
- */
- regs->psr |= PSR_C;
- regs->u_regs[UREG_I0] = EINTR;
- if (do_signal(&saveset, regs, 0, 0))
- return;
- }
-}
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ set_thread_flag(TIF_RESTORE_SIGMASK);
-asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
-{
- _sigpause_common(set, regs);
+ return -ERESTARTNOHAND;
}
-asmlinkage void do_sigsuspend (struct pt_regs *regs)
+asmlinkage int sys_sigpause(unsigned int set)
{
- _sigpause_common(regs->u_regs[UREG_I0], regs);
+ return _sigpause_common(set);
}
-asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize,
- struct pt_regs *regs)
+asmlinkage int sys_sigsuspend(old_sigset_t set)
{
- sigset_t oldset, set;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t)) {
- regs->psr |= PSR_C;
- regs->u_regs[UREG_I0] = EINVAL;
- return;
- }
-
- if (copy_from_user(&set, uset, sizeof(set))) {
- regs->psr |= PSR_C;
- regs->u_regs[UREG_I0] = EFAULT;
- return;
- }
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- oldset = current->blocked;
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs->pc = regs->npc;
- regs->npc += 4;
-
- /* Condition codes and return value where set here for sigpause,
- * and so got used by setup_frame, which again causes sigreturn()
- * to return -EINTR.
- */
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- /*
- * Return -EINTR and set condition code here,
- * so the interrupted system call actually returns
- * these.
- */
- regs->psr |= PSR_C;
- regs->u_regs[UREG_I0] = EINTR;
- if (do_signal(&oldset, regs, 0, 0))
- return;
- }
+ return _sigpause_common(set);
}
static inline int
@@ -1067,13 +996,13 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
- unsigned long orig_i0, int restart_syscall)
+asmlinkage void do_signal(struct pt_regs * regs, unsigned long orig_i0, int restart_syscall)
{
siginfo_t info;
struct sparc_deliver_cookie cookie;
struct k_sigaction ka;
int signr;
+ sigset_t *oldset;
/*
* XXX Disable svr4 signal handling until solaris emulation works.
@@ -1089,7 +1018,9 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
cookie.restart_syscall = restart_syscall;
cookie.orig_i0 = orig_i0;
- if (!oldset)
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, &cookie);
@@ -1098,7 +1029,14 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
syscall_restart(cookie.orig_i0, regs, &ka.sa);
handle_signal(signr, &ka, &info, oldset,
regs, svr4_signal);
- return 1;
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ return;
}
if (cookie.restart_syscall &&
(regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
@@ -1115,7 +1053,14 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
regs->pc -= 4;
regs->npc -= 4;
}
- return 0;
+
+ /* if there's no signal to deliver, we just put the saved sigmask
+ * back
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
}
asmlinkage int
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 0b0d492..19b2539 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -66,7 +66,6 @@ struct poll {
extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *);
extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *);
-void _sigpause_common (unsigned int set, struct pt_regs *);
extern void (*__copy_1page)(void *, const void *);
extern void __memmove(void *, const void *, __kernel_size_t);
extern void (*bzero_1page)(void *);
@@ -227,7 +226,6 @@ EXPORT_SYMBOL(kunmap_atomic);
/* Solaris/SunOS binary compatibility */
EXPORT_SYMBOL(svr4_setcontext);
EXPORT_SYMBOL(svr4_getcontext);
-EXPORT_SYMBOL(_sigpause_common);
EXPORT_SYMBOL(dump_thread);
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
index e457a40..6877ae4 100644
--- a/arch/sparc/kernel/systbls.S
+++ b/arch/sparc/kernel/systbls.S
@@ -75,7 +75,10 @@ sys_call_table:
/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
-/*280*/ .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
+/*280*/ .long sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat
+/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_newfstatat
+/*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
+/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll
#ifdef CONFIG_SUNOS_EMUL
/* Now the SunOS syscall table. */
@@ -181,6 +184,11 @@ sunos_sys_table:
.long sunos_nosys, sunos_nosys, sunos_nosys
.long sunos_nosys
/*280*/ .long sunos_nosys, sunos_nosys, sunos_nosys
+ .long sunos_nosys, sunos_nosys, sunos_nosys
+ .long sunos_nosys, sunos_nosys, sunos_nosys
.long sunos_nosys
+/*290*/ .long sunos_nosys, sunos_nosys, sunos_nosys
+ .long sunos_nosys, sunos_nosys, sunos_nosys
+ .long sunos_nosys, sunos_nosys, sunos_nosys
#endif
diff --git a/arch/sparc/math-emu/math.c b/arch/sparc/math-emu/math.c
index be2c809..8613b3e 100644
--- a/arch/sparc/math-emu/math.c
+++ b/arch/sparc/math-emu/math.c
@@ -323,11 +323,6 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
case FMOVS:
case FABSS:
case FNEGS: TYPE(2,1,0,1,0,0,0); break;
- default:
-#ifdef DEBUG_MATHEMU
- printk("unknown FPop1: %03lx\n",(insn>>5)&0x1ff);
-#endif
- break;
}
} else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
switch ((insn >> 5) & 0x1ff) {
@@ -337,11 +332,6 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
case FCMPED: TYPE(3,0,0,2,1,2,1); break;
case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
- default:
-#ifdef DEBUG_MATHEMU
- printk("unknown FPop2: %03lx\n",(insn>>5)&0x1ff);
-#endif
- break;
}
}
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 7100029..12911e7 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -25,7 +25,7 @@
#define curptr g6
-#define NR_SYSCALLS 284 /* Each OS is different... */
+#define NR_SYSCALLS 299 /* Each OS is different... */
.text
.align 32
@@ -1416,7 +1416,6 @@ execve_merge:
add %sp, PTREGS_OFF, %o0
.globl sys_pipe, sys_sigpause, sys_nis_syscall
- .globl sys_sigsuspend, sys_rt_sigsuspend
.globl sys_rt_sigreturn
.globl sys_ptrace
.globl sys_sigaltstack
@@ -1440,28 +1439,6 @@ sys32_sigaltstack:
mov %i6, %o2
#endif
.align 32
-sys_sigsuspend: add %sp, PTREGS_OFF, %o0
- call do_sigsuspend
- add %o7, 1f-.-4, %o7
- nop
-sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
- add %sp, PTREGS_OFF, %o2
- call do_rt_sigsuspend
- add %o7, 1f-.-4, %o7
- nop
-#ifdef CONFIG_COMPAT
- .globl sys32_rt_sigsuspend
-sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */
- srl %o0, 0, %o0
- add %sp, PTREGS_OFF, %o2
- call do_rt_sigsuspend32
- add %o7, 1f-.-4, %o7
-#endif
- /* NOTE: %o0 has a correct value already */
-sys_sigpause: add %sp, PTREGS_OFF, %o1
- call do_sigpause
- add %o7, 1f-.-4, %o7
- nop
#ifdef CONFIG_COMPAT
.globl sys32_sigreturn
sys32_sigreturn:
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 090dcca..b80eba0 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -53,14 +53,13 @@ __handle_user_windows:
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldx [%g6 + TI_FLAGS], %l0
-1: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
be,pt %xcc, __handle_user_windows_continue
nop
- clr %o0
- mov %l5, %o2
- mov %l6, %o3
- add %sp, PTREGS_OFF, %o1
- mov %l0, %o4
+ mov %l5, %o1
+ mov %l6, %o2
+ add %sp, PTREGS_OFF, %o0
+ mov %l0, %o3
call do_notify_resume
wrpr %g0, RTRAP_PSTATE, %pstate
@@ -96,15 +95,14 @@ __handle_perfctrs:
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldx [%g6 + TI_FLAGS], %l0
-1: andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
be,pt %xcc, __handle_perfctrs_continue
sethi %hi(TSTATE_PEF), %o0
- clr %o0
- mov %l5, %o2
- mov %l6, %o3
- add %sp, PTREGS_OFF, %o1
- mov %l0, %o4
+ mov %l5, %o1
+ mov %l6, %o2
+ add %sp, PTREGS_OFF, %o0
+ mov %l0, %o3
call do_notify_resume
wrpr %g0, RTRAP_PSTATE, %pstate
@@ -129,11 +127,10 @@ __handle_userfpu:
ba,a,pt %xcc, __handle_userfpu_continue
__handle_signal:
- clr %o0
- mov %l5, %o2
- mov %l6, %o3
- add %sp, PTREGS_OFF, %o1
- mov %l0, %o4
+ mov %l5, %o1
+ mov %l6, %o2
+ add %sp, PTREGS_OFF, %o0
+ mov %l0, %o3
call do_notify_resume
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
@@ -200,7 +197,7 @@ __handle_preemption_continue:
andcc %l1, %o0, %g0
andcc %l0, _TIF_NEED_RESCHED, %g0
bne,pn %xcc, __handle_preemption
- andcc %l0, (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING), %g0
+ andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
bne,pn %xcc, __handle_signal
__handle_signal_continue:
ldub [%g6 + TI_WSAVED], %o2
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index 60f5dfa..ca11a4c 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -36,9 +36,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-static int do_signal(sigset_t *oldset, struct pt_regs * regs,
- unsigned long orig_o0, int ret_from_syscall);
-
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
@@ -242,114 +239,29 @@ struct rt_signal_frame {
/* Align macros */
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7)))
-/*
- * atomically swap in the new signal mask, and wait for a signal.
- * This is really tricky on the Sparc, watch out...
- */
-asmlinkage void _sigpause_common(old_sigset_t set, struct pt_regs *regs)
+static long _sigpause_common(old_sigset_t set)
{
- sigset_t saveset;
-
-#ifdef CONFIG_SPARC32_COMPAT
- if (test_thread_flag(TIF_32BIT)) {
- extern asmlinkage void _sigpause32_common(compat_old_sigset_t,
- struct pt_regs *);
- _sigpause32_common(set, regs);
- return;
- }
-#endif
set &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
+ current->saved_sigmask = current->blocked;
siginitset(&current->blocked, set);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
-
- if (test_thread_flag(TIF_32BIT)) {
- regs->tpc = (regs->tnpc & 0xffffffff);
- regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
- } else {
- regs->tpc = regs->tnpc;
- regs->tnpc += 4;
- }
- /* Condition codes and return value where set here for sigpause,
- * and so got used by setup_frame, which again causes sigreturn()
- * to return -EINTR.
- */
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- /*
- * Return -EINTR and set condition code here,
- * so the interrupted system call actually returns
- * these.
- */
- regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
- regs->u_regs[UREG_I0] = EINTR;
- if (do_signal(&saveset, regs, 0, 0))
- return;
- }
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ return -ERESTARTNOHAND;
}
-asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
+asmlinkage long sys_sigpause(unsigned int set)
{
- _sigpause_common(set, regs);
+ return _sigpause_common(set);
}
-asmlinkage void do_sigsuspend(struct pt_regs *regs)
+asmlinkage long sys_sigsuspend(old_sigset_t set)
{
- _sigpause_common(regs->u_regs[UREG_I0], regs);
-}
-
-asmlinkage void do_rt_sigsuspend(sigset_t __user *uset, size_t sigsetsize, struct pt_regs *regs)
-{
- sigset_t oldset, set;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t)) {
- regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
- regs->u_regs[UREG_I0] = EINVAL;
- return;
- }
- if (copy_from_user(&set, uset, sizeof(set))) {
- regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
- regs->u_regs[UREG_I0] = EFAULT;
- return;
- }
-
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- oldset = current->blocked;
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- if (test_thread_flag(TIF_32BIT)) {
- regs->tpc = (regs->tnpc & 0xffffffff);
- regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
- } else {
- regs->tpc = regs->tnpc;
- regs->tnpc += 4;
- }
-
- /* Condition codes and return value where set here for sigpause,
- * and so got used by setup_frame, which again causes sigreturn()
- * to return -EINTR.
- */
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- /*
- * Return -EINTR and set condition code here,
- * so the interrupted system call actually returns
- * these.
- */
- regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
- regs->u_regs[UREG_I0] = EINTR;
- if (do_signal(&oldset, regs, 0, 0))
- return;
- }
+ return _sigpause_common(set);
}
static inline int
@@ -607,26 +519,29 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-static int do_signal(sigset_t *oldset, struct pt_regs * regs,
- unsigned long orig_i0, int restart_syscall)
+static void do_signal(struct pt_regs *regs, unsigned long orig_i0, int restart_syscall)
{
siginfo_t info;
struct signal_deliver_cookie cookie;
struct k_sigaction ka;
int signr;
+ sigset_t *oldset;
cookie.restart_syscall = restart_syscall;
cookie.orig_i0 = orig_i0;
- if (!oldset)
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
oldset = &current->blocked;
#ifdef CONFIG_SPARC32_COMPAT
if (test_thread_flag(TIF_32BIT)) {
- extern int do_signal32(sigset_t *, struct pt_regs *,
- unsigned long, int);
- return do_signal32(oldset, regs, orig_i0,
- cookie.restart_syscall);
+ extern void do_signal32(sigset_t *, struct pt_regs *,
+ unsigned long, int);
+ do_signal32(oldset, regs, orig_i0,
+ cookie.restart_syscall);
+ return;
}
#endif
@@ -635,7 +550,15 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
if (cookie.restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
handle_signal(signr, &ka, &info, oldset, regs);
- return 1;
+
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ return;
}
if (cookie.restart_syscall &&
(regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
@@ -652,15 +575,21 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
regs->tpc -= 4;
regs->tnpc -= 4;
}
- return 0;
+
+ /* if there's no signal to deliver, we just put the saved sigmask
+ * back
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
}
-void do_notify_resume(sigset_t *oldset, struct pt_regs *regs,
- unsigned long orig_i0, int restart_syscall,
+void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, int restart_syscall,
unsigned long thread_info_flags)
{
- if (thread_info_flags & _TIF_SIGPENDING)
- do_signal(oldset, regs, orig_i0, restart_syscall);
+ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+ do_signal(regs, orig_i0, restart_syscall);
}
void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index 009a86e..708ba9b 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -32,9 +32,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-int do_signal32(sigset_t *oldset, struct pt_regs *regs,
- unsigned long orig_o0, int ret_from_syscall);
-
/* Signal frames: the original one (compatible with SunOS):
*
* Set up a signal frame... Make the stack look the way SunOS
@@ -226,102 +223,6 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
return 0;
}
-/*
- * atomically swap in the new signal mask, and wait for a signal.
- * This is really tricky on the Sparc, watch out...
- */
-asmlinkage void _sigpause32_common(compat_old_sigset_t set, struct pt_regs *regs)
-{
- sigset_t saveset;
-
- set &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
- siginitset(&current->blocked, set);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs->tpc = regs->tnpc;
- regs->tnpc += 4;
- if (test_thread_flag(TIF_32BIT)) {
- regs->tpc &= 0xffffffff;
- regs->tnpc &= 0xffffffff;
- }
-
- /* Condition codes and return value where set here for sigpause,
- * and so got used by setup_frame, which again causes sigreturn()
- * to return -EINTR.
- */
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- /*
- * Return -EINTR and set condition code here,
- * so the interrupted system call actually returns
- * these.
- */
- regs->tstate |= TSTATE_ICARRY;
- regs->u_regs[UREG_I0] = EINTR;
- if (do_signal32(&saveset, regs, 0, 0))
- return;
- }
-}
-
-asmlinkage void do_rt_sigsuspend32(u32 uset, size_t sigsetsize, struct pt_regs *regs)
-{
- sigset_t oldset, set;
- compat_sigset_t set32;
-
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (((compat_size_t)sigsetsize) != sizeof(sigset_t)) {
- regs->tstate |= TSTATE_ICARRY;
- regs->u_regs[UREG_I0] = EINVAL;
- return;
- }
- if (copy_from_user(&set32, compat_ptr(uset), sizeof(set32))) {
- regs->tstate |= TSTATE_ICARRY;
- regs->u_regs[UREG_I0] = EFAULT;
- return;
- }
- switch (_NSIG_WORDS) {
- case 4: set.sig[3] = set32.sig[6] + (((long)set32.sig[7]) << 32);
- case 3: set.sig[2] = set32.sig[4] + (((long)set32.sig[5]) << 32);
- case 2: set.sig[1] = set32.sig[2] + (((long)set32.sig[3]) << 32);
- case 1: set.sig[0] = set32.sig[0] + (((long)set32.sig[1]) << 32);
- }
- sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- oldset = current->blocked;
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
-
- regs->tpc = regs->tnpc;
- regs->tnpc += 4;
- if (test_thread_flag(TIF_32BIT)) {
- regs->tpc &= 0xffffffff;
- regs->tnpc &= 0xffffffff;
- }
-
- /* Condition codes and return value where set here for sigpause,
- * and so got used by setup_frame, which again causes sigreturn()
- * to return -EINTR.
- */
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- /*
- * Return -EINTR and set condition code here,
- * so the interrupted system call actually returns
- * these.
- */
- regs->tstate |= TSTATE_ICARRY;
- regs->u_regs[UREG_I0] = EINTR;
- if (do_signal32(&oldset, regs, 0, 0))
- return;
- }
-}
-
static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
@@ -1362,8 +1263,8 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-int do_signal32(sigset_t *oldset, struct pt_regs * regs,
- unsigned long orig_i0, int restart_syscall)
+void do_signal32(sigset_t *oldset, struct pt_regs * regs,
+ unsigned long orig_i0, int restart_syscall)
{
siginfo_t info;
struct signal_deliver_cookie cookie;
@@ -1380,7 +1281,15 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
syscall_restart32(orig_i0, regs, &ka.sa);
handle_signal32(signr, &ka, &info, oldset,
regs, svr4_signal);
- return 1;
+
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ return;
}
if (cookie.restart_syscall &&
(regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
@@ -1397,7 +1306,14 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
regs->tpc -= 4;
regs->tnpc -= 4;
}
- return 0;
+
+ /* if there's no signal to deliver, we just put the saved sigmask
+ * back
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
}
struct sigstack32 {
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index d177d7e..3c06bfb 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -69,7 +69,6 @@ struct poll {
extern void die_if_kernel(char *str, struct pt_regs *regs);
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-void _sigpause_common (unsigned int set, struct pt_regs *);
extern void *__bzero(void *, size_t);
extern void *__memscan_zero(void *, size_t);
extern void *__memscan_generic(void *, int, size_t);
@@ -236,9 +235,10 @@ EXPORT_SYMBOL(pci_dma_supported);
/* I/O device mmaping on Sparc64. */
EXPORT_SYMBOL(io_remap_pfn_range);
+#ifdef CONFIG_COMPAT
/* Solaris/SunOS binary compatibility */
-EXPORT_SYMBOL(_sigpause_common);
EXPORT_SYMBOL(verify_compat_iovec);
+#endif
EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(pte_alloc_one_kernel);
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
index 9cd272a..60b5937 100644
--- a/arch/sparc64/kernel/sys32.S
+++ b/arch/sparc64/kernel/sys32.S
@@ -84,7 +84,6 @@ SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1)
SIGN1(sys32_mlockall, sys_mlockall, %o0)
SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0)
-SIGN1(sys32_clock_settime, compat_sys_clock_settime, %o1)
SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1)
SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index 98d24bc..2881faf 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -41,7 +41,7 @@ sys_call_table32:
/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid
.word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
/*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending
- .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, sys32_rt_sigsuspend, sys_setresuid, sys_getresuid
+ .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid
/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
.word sys32_getgroups, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
/*120*/ .word compat_sys_readv, compat_sys_writev, sys32_settimeofday, sys32_fchown16, sys_fchmod
@@ -71,12 +71,15 @@ sys_call_table32:
/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
.word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
/*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
- .word sys_ni_syscall, sys32_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
+ .word sys_ni_syscall, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
.word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
.word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
-/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl
+/*280*/ .word sys_ni_syscall, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
+ .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_newfstatat
+/*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
+ .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll
#endif /* CONFIG_COMPAT */
@@ -142,7 +145,10 @@ sys_call_table:
.word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
.word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
-/*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl
+/*280*/ .word sys_nis_syscall, sys_add_key, sys_request_key, sys_keyctl, sys_openat
+ .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, compat_sys_newfstatat
+/*285*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
+ .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll
#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
defined(CONFIG_SOLARIS_EMUL_MODULE)
@@ -239,13 +245,20 @@ sunos_sys_table:
/*250*/ .word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys
+/*260*/ .word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys
+/*270*/ .word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys
+/*280*/ .word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys
+/*290*/ .word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sunos_nosys
- .word sunos_nosys
#endif
diff --git a/arch/sparc64/solaris/entry64.S b/arch/sparc64/solaris/entry64.S
index 4b6ae58..eb314ed 100644
--- a/arch/sparc64/solaris/entry64.S
+++ b/arch/sparc64/solaris/entry64.S
@@ -180,6 +180,8 @@ solaris_sigsuspend:
nop
call sys_sigsuspend
stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ b,pt %xcc, ret_from_solaris
+ nop
.globl solaris_getpid
solaris_getpid:
diff --git a/block/elevator.c b/block/elevator.c
index c9f424d..96a61e0 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -139,35 +139,16 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
static char chosen_elevator[16];
-static void elevator_setup_default(void)
+static int __init elevator_setup(char *str)
{
- struct elevator_type *e;
-
- /*
- * If default has not been set, use the compiled-in selection.
- */
- if (!chosen_elevator[0])
- strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
-
/*
* Be backwards-compatible with previous kernels, so users
* won't get the wrong elevator.
*/
- if (!strcmp(chosen_elevator, "as"))
+ if (!strcmp(str, "as"))
strcpy(chosen_elevator, "anticipatory");
-
- /*
- * If the given scheduler is not available, fall back to the default
- */
- if ((e = elevator_find(chosen_elevator)))
- elevator_put(e);
else
- strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
-}
-
-static int __init elevator_setup(char *str)
-{
- strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
+ strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
return 0;
}
@@ -184,14 +165,16 @@ int elevator_init(request_queue_t *q, char *name)
q->end_sector = 0;
q->boundary_rq = NULL;
- elevator_setup_default();
+ if (name && !(e = elevator_get(name)))
+ return -EINVAL;
- if (!name)
- name = chosen_elevator;
+ if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
+ printk("I/O scheduler %s not found\n", chosen_elevator);
- e = elevator_get(name);
- if (!e)
- return -EINVAL;
+ if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
+ printk("Default I/O scheduler not found, using no-op\n");
+ e = elevator_get("noop");
+ }
eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
if (!eq) {
@@ -669,8 +652,10 @@ int elv_register(struct elevator_type *e)
spin_unlock_irq(&elv_list_lock);
printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
- if (!strcmp(e->elevator_name, chosen_elevator))
- printk(" (default)");
+ if (!strcmp(e->elevator_name, chosen_elevator) ||
+ (!*chosen_elevator &&
+ !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
+ printk(" (default)");
printk("\n");
return 0;
}
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 8e27d0a..d38b4afa 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -304,6 +304,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
* blk_queue_ordered - does this queue support ordered writes
* @q: the request queue
* @ordered: one of QUEUE_ORDERED_*
+ * @prepare_flush_fn: rq setup helper for cache flush ordered writes
*
* Description:
* For journalled file systems, doing ordered writes on a commit
@@ -332,6 +333,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
return -EINVAL;
}
+ q->ordered = ordered;
q->next_ordered = ordered;
q->prepare_flush_fn = prepare_flush_fn;
@@ -662,7 +664,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Enables a low level driver to set an upper limit on the size of
* received requests.
**/
-void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
+void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
{
if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
@@ -2632,6 +2634,7 @@ EXPORT_SYMBOL(blk_put_request);
/**
* blk_end_sync_rq - executes a completion event on a request
* @rq: request to complete
+ * @error: end io status of the request
*/
void blk_end_sync_rq(struct request *rq, int error)
{
@@ -3153,7 +3156,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
if (blk_fs_request(req) && req->rq_disk) {
const int rw = rq_data_dir(req);
- __disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
+ disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
}
total_bytes = bio_nbytes = 0;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 626508a..6a6a084 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2034,13 +2034,28 @@ config SKGE
It does not support the link failover and network management
features that "portable" vendor supplied sk98lin driver does.
+ This driver supports adapters based on the original Yukon chipset:
+ Marvell 88E8001, Belkin F5D5005, CNet GigaCard, DLink DGE-530T,
+ Linksys EG1032/EG1064, 3Com 3C940/3C940B, SysKonnect SK-9871/9872.
+
+ It does not support the newer Yukon2 chipset: a separate driver,
+ sky2, is provided for Yukon2-based adapters.
+
+ To compile this driver as a module, choose M here: the module
+ will be called skge. This is recommended.
config SKY2
tristate "SysKonnect Yukon2 support (EXPERIMENTAL)"
depends on PCI && EXPERIMENTAL
select CRC32
---help---
- This driver support the Marvell Yukon 2 Gigabit Ethernet adapter.
+ This driver supports Gigabit Ethernet adapters based on the the
+ Marvell Yukon 2 chipset:
+ Marvell 88E8021/88E8022/88E8035/88E8036/88E8038/88E8050/88E8052/
+ 88E8053/88E8055/88E8061/88E8062, SysKonnect SK-9E21D/SK-9S21
+
+ This driver does not support the original Yukon chipset: a seperate
+ driver, skge, is provided for Yukon-based adapters.
To compile this driver as a module, choose M here: the module
will be called sky2. This is recommended.
@@ -2050,8 +2065,15 @@ config SK98LIN
depends on PCI
---help---
Say Y here if you have a Marvell Yukon or SysKonnect SK-98xx/SK-95xx
- compliant Gigabit Ethernet Adapter. The following adapters are supported
- by this driver:
+ compliant Gigabit Ethernet Adapter.
+
+ This driver supports the original Yukon chipset. A cleaner driver is
+ also available (skge) which seems to work better than this one.
+
+ This driver does not support the newer Yukon2 chipset. A seperate
+ driver, sky2, is provided to support Yukon2-based adapters.
+
+ The following adapters are supported by this driver:
- 3Com 3C940 Gigabit LOM Ethernet Adapter
- 3Com 3C941 Gigabit LOM Ethernet Adapter
- Allied Telesyn AT-2970LX Gigabit Ethernet Adapter
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index b8953de..b508812 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -1002,6 +1002,8 @@ static int __devinit ace_init(struct net_device *dev)
mac1 = 0;
for(i = 0; i < 4; i++) {
+ int tmp;
+
mac1 = mac1 << 8;
tmp = read_eeprom_byte(dev, 0x8c+i);
if (tmp < 0) {
@@ -1012,6 +1014,8 @@ static int __devinit ace_init(struct net_device *dev)
}
mac2 = 0;
for(i = 4; i < 8; i++) {
+ int tmp;
+
mac2 = mac2 << 8;
tmp = read_eeprom_byte(dev, 0x8c+i);
if (tmp < 0) {
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index df9d6e8..c3267e4 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1399,7 +1399,6 @@ static int b44_open(struct net_device *dev)
b44_init_rings(bp);
b44_init_hw(bp);
- netif_carrier_off(dev);
b44_check_phy(bp);
err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
@@ -1464,7 +1463,7 @@ static int b44_close(struct net_device *dev)
#endif
b44_halt(bp);
b44_free_rings(bp);
- netif_carrier_off(bp->dev);
+ netif_carrier_off(dev);
spin_unlock_irq(&bp->lock);
@@ -2000,6 +1999,8 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+ netif_carrier_off(dev);
+
err = b44_get_invariants(bp);
if (err) {
printk(KERN_ERR PFX "Problem fetching invariants of chip, "
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2582d98..4ff006c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -576,7 +576,7 @@ static int bond_update_speed_duplex(struct slave *slave)
slave->duplex = DUPLEX_FULL;
if (slave_dev->ethtool_ops) {
- u32 res;
+ int res;
if (!slave_dev->ethtool_ops->get_settings) {
return -1;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 40ae36b..7ef4b04 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -444,6 +444,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev)
netif_rx(skb);
#endif
}
+ dev->last_rx = jiffies;
}
return received_packets;
@@ -461,7 +462,7 @@ static int mv643xx_eth_receive_queue(struct net_device *dev)
*/
static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
- struct pt_regs *regs)
+ struct pt_regs *regs)
{
struct net_device *dev = (struct net_device *)dev_id;
struct mv643xx_private *mp = netdev_priv(dev);
@@ -1047,16 +1048,15 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
{
- unsigned int frag;
- skb_frag_t *fragp;
+ unsigned int frag;
+ skb_frag_t *fragp;
- for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
- fragp = &skb_shinfo(skb)->frags[frag];
- if (fragp->size <= 8 && fragp->page_offset & 0x7)
- return 1;
-
- }
- return 0;
+ for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
+ fragp = &skb_shinfo(skb)->frags[frag];
+ if (fragp->size <= 8 && fragp->page_offset & 0x7)
+ return 1;
+ }
+ return 0;
}
@@ -2137,26 +2137,26 @@ static void eth_port_set_multicast_list(struct net_device *dev)
*/
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
for (table_index = 0; table_index <= 0xFC; table_index += 4) {
- /* Set all entries in DA filter special multicast
- * table (Ex_dFSMT)
- * Set for ETH_Q0 for now
- * Bits
- * 0 Accept=1, Drop=0
- * 3-1 Queue ETH_Q0=0
- * 7-4 Reserved = 0;
- */
- mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
-
- /* Set all entries in DA filter other multicast
- * table (Ex_dFOMT)
- * Set for ETH_Q0 for now
- * Bits
- * 0 Accept=1, Drop=0
- * 3-1 Queue ETH_Q0=0
- * 7-4 Reserved = 0;
- */
- mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
- }
+ /* Set all entries in DA filter special multicast
+ * table (Ex_dFSMT)
+ * Set for ETH_Q0 for now
+ * Bits
+ * 0 Accept=1, Drop=0
+ * 3-1 Queue ETH_Q0=0
+ * 7-4 Reserved = 0;
+ */
+ mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
+
+ /* Set all entries in DA filter other multicast
+ * table (Ex_dFOMT)
+ * Set for ETH_Q0 for now
+ * Bits
+ * 0 Accept=1, Drop=0
+ * 3-1 Queue ETH_Q0=0
+ * 7-4 Reserved = 0;
+ */
+ mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
+ }
return;
}
@@ -2617,7 +2617,6 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
struct eth_tx_desc *current_descriptor;
struct eth_tx_desc *first_descriptor;
u32 command;
- unsigned long flags;
/* Do not process Tx ring in case of Tx ring resource error */
if (mp->tx_resource_err)
@@ -2634,8 +2633,6 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
return ETH_ERROR;
}
- spin_lock_irqsave(&mp->lock, flags);
-
mp->tx_ring_skbs++;
BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
@@ -2685,15 +2682,11 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
mp->tx_resource_err = 1;
mp->tx_curr_desc_q = tx_first_desc;
- spin_unlock_irqrestore(&mp->lock, flags);
-
return ETH_QUEUE_LAST_RESOURCE;
}
mp->tx_curr_desc_q = tx_next_desc;
- spin_unlock_irqrestore(&mp->lock, flags);
-
return ETH_OK;
}
#else
@@ -2704,14 +2697,11 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
int tx_desc_used;
struct eth_tx_desc *current_descriptor;
unsigned int command_status;
- unsigned long flags;
/* Do not process Tx ring in case of Tx ring resource error */
if (mp->tx_resource_err)
return ETH_QUEUE_FULL;
- spin_lock_irqsave(&mp->lock, flags);
-
mp->tx_ring_skbs++;
BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
@@ -2742,12 +2732,9 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
/* Check for ring index overlap in the Tx desc ring */
if (tx_desc_curr == tx_desc_used) {
mp->tx_resource_err = 1;
-
- spin_unlock_irqrestore(&mp->lock, flags);
return ETH_QUEUE_LAST_RESOURCE;
}
- spin_unlock_irqrestore(&mp->lock, flags);
return ETH_OK;
}
#endif
@@ -2898,8 +2885,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
p_pkt_info->return_info = mp->rx_skb[rx_curr_desc];
p_pkt_info->l4i_chk = p_rx_desc->buf_size;
- /* Clean the return info field to indicate that the packet has been */
- /* moved to the upper layers */
+ /*
+ * Clean the return info field to indicate that the
+ * packet has been moved to the upper layers
+ */
mp->rx_skb[rx_curr_desc] = NULL;
/* Update current index in data structure */
@@ -2980,7 +2969,7 @@ struct mv643xx_stats {
};
#define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \
- offsetof(struct mv643xx_private, m)
+ offsetof(struct mv643xx_private, m)
static const struct mv643xx_stats mv643xx_gstrings_stats[] = {
{ "rx_packets", MV643XX_STAT(stats.rx_packets) },
@@ -3131,9 +3120,8 @@ mv643xx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
-static void
-mv643xx_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+static void mv643xx_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->driver, mv643xx_driver_name, 32);
strncpy(drvinfo->version, mv643xx_driver_version, 32);
@@ -3142,39 +3130,37 @@ mv643xx_get_drvinfo(struct net_device *netdev,
drvinfo->n_stats = MV643XX_STATS_LEN;
}
-static int
-mv643xx_get_stats_count(struct net_device *netdev)
+static int mv643xx_get_stats_count(struct net_device *netdev)
{
return MV643XX_STATS_LEN;
}
-static void
-mv643xx_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, uint64_t *data)
+static void mv643xx_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
{
struct mv643xx_private *mp = netdev->priv;
int i;
eth_update_mib_counters(mp);
- for(i = 0; i < MV643XX_STATS_LEN; i++) {
+ for (i = 0; i < MV643XX_STATS_LEN; i++) {
char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset;
- data[i] = (mv643xx_gstrings_stats[i].sizeof_stat ==
+ data[i] = (mv643xx_gstrings_stats[i].sizeof_stat ==
sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
}
}
-static void
-mv643xx_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
+static void mv643xx_get_strings(struct net_device *netdev, uint32_t stringset,
+ uint8_t *data)
{
int i;
switch(stringset) {
case ETH_SS_STATS:
for (i=0; i < MV643XX_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- mv643xx_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ memcpy(data + i * ETH_GSTRING_LEN,
+ mv643xx_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
}
break;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 89c4678..49b597c 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3586,7 +3586,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Buffer_Pointer = (u64) pci_map_page
(sp->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE);
- txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
+ txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
if (skb_shinfo(skb)->ufo_size)
txdp->Control_1 |= TXD_UFO_EN;
}
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 28ce47a..55f3b85 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1653,36 +1653,40 @@ static void gem_init_rings(struct gem *gp)
/* Init PHY interface and start link poll state machine */
static void gem_init_phy(struct gem *gp)
{
- u32 mifcfg;
+ u32 mif_cfg;
/* Revert MIF CFG setting done on stop_phy */
- mifcfg = readl(gp->regs + MIF_CFG);
- mifcfg &= ~MIF_CFG_BBMODE;
- writel(mifcfg, gp->regs + MIF_CFG);
+ mif_cfg = readl(gp->regs + MIF_CFG);
+ mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
+ mif_cfg |= MIF_CFG_MDI0;
+ writel(mif_cfg, gp->regs + MIF_CFG);
+ writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
+ writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
int i;
+ u16 ctrl;
- /* Those delay sucks, the HW seem to love them though, I'll
- * serisouly consider breaking some locks here to be able
- * to schedule instead
- */
- for (i = 0; i < 3; i++) {
#ifdef CONFIG_PPC_PMAC
- pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
- msleep(20);
+ pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
#endif
- /* Some PHYs used by apple have problem getting back to us,
- * we do an additional reset here
- */
- phy_write(gp, MII_BMCR, BMCR_RESET);
- msleep(20);
- if (phy_read(gp, MII_BMCR) != 0xffff)
+
+ /* Some PHYs used by apple have problem getting back
+ * to us, we do an additional reset here
+ */
+ phy_write(gp, MII_BMCR, BMCR_RESET);
+ for (i = 0; i < 50; i++) {
+ if ((phy_read(gp, MII_BMCR) & BMCR_RESET) == 0)
break;
- if (i == 2)
- printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
- gp->dev->name);
+ msleep(10);
}
+ if (i == 50)
+ printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
+ gp->dev->name);
+ /* Make sure isolate is off */
+ ctrl = phy_read(gp, MII_BMCR);
+ if (ctrl & BMCR_ISOLATE)
+ phy_write(gp, MII_BMCR, ctrl & ~BMCR_ISOLATE);
}
if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
@@ -2119,7 +2123,7 @@ static void gem_reinit_chip(struct gem *gp)
/* Must be invoked with no lock held. */
static void gem_stop_phy(struct gem *gp, int wol)
{
- u32 mifcfg;
+ u32 mif_cfg;
unsigned long flags;
/* Let the chip settle down a bit, it seems that helps
@@ -2130,9 +2134,9 @@ static void gem_stop_phy(struct gem *gp, int wol)
/* Make sure we aren't polling PHY status change. We
* don't currently use that feature though
*/
- mifcfg = readl(gp->regs + MIF_CFG);
- mifcfg &= ~MIF_CFG_POLL;
- writel(mifcfg, gp->regs + MIF_CFG);
+ mif_cfg = readl(gp->regs + MIF_CFG);
+ mif_cfg &= ~MIF_CFG_POLL;
+ writel(mif_cfg, gp->regs + MIF_CFG);
if (wol && gp->has_wol) {
unsigned char *e = &gp->dev->dev_addr[0];
@@ -2182,7 +2186,8 @@ static void gem_stop_phy(struct gem *gp, int wol)
/* According to Apple, we must set the MDIO pins to this begnign
* state or we may 1) eat more current, 2) damage some PHYs
*/
- writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
+ mif_cfg = 0;
+ writel(mif_cfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
writel(0, gp->regs + MIF_BBCLK);
writel(0, gp->regs + MIF_BBDATA);
writel(0, gp->regs + MIF_BBOENAB);
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index c8f6286..308f773 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -75,7 +75,7 @@ config HOSTAP_PCI
config HOSTAP_CS
tristate "Host AP driver for Prism2/2.5/3 PC Cards"
- depends on PCMCIA!=n && HOSTAP
+ depends on PCMCIA && HOSTAP
---help---
Host AP driver's version for Prism2/2.5/3 PC Cards.
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 8bf0276..6290c9f 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -2201,6 +2201,17 @@ static int ipw2100_alloc_skb(struct ipw2100_priv *priv,
#define SEARCH_SNAPSHOT 1
#define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff))
+static void ipw2100_snapshot_free(struct ipw2100_priv *priv)
+{
+ int i;
+ if (!priv->snapshot[0])
+ return;
+ for (i = 0; i < 0x30; i++)
+ kfree(priv->snapshot[i]);
+ priv->snapshot[0] = NULL;
+}
+
+#ifdef CONFIG_IPW2100_DEBUG_C3
static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
{
int i;
@@ -2221,16 +2232,6 @@ static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
return 1;
}
-static void ipw2100_snapshot_free(struct ipw2100_priv *priv)
-{
- int i;
- if (!priv->snapshot[0])
- return;
- for (i = 0; i < 0x30; i++)
- kfree(priv->snapshot[i]);
- priv->snapshot[0] = NULL;
-}
-
static u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf,
size_t len, int mode)
{
@@ -2269,6 +2270,7 @@ static u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf,
return ret;
}
+#endif
/*
*
@@ -7112,11 +7114,17 @@ static int ipw2100_wx_set_txpow(struct net_device *dev,
{
struct ipw2100_priv *priv = ieee80211_priv(dev);
int err = 0, value;
+
+ if (ipw_radio_kill_sw(priv, wrqu->txpower.disabled))
+ return -EINPROGRESS;
if (priv->ieee->iw_mode != IW_MODE_ADHOC)
+ return 0;
+
+ if ((wrqu->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
return -EINVAL;
- if (wrqu->txpower.disabled == 1 || wrqu->txpower.fixed == 0)
+ if (wrqu->txpower.fixed == 0)
value = IPW_TX_POWER_DEFAULT;
else {
if (wrqu->txpower.value < IPW_TX_POWER_MIN_DBM ||
@@ -7151,24 +7159,19 @@ static int ipw2100_wx_get_txpow(struct net_device *dev,
struct ipw2100_priv *priv = ieee80211_priv(dev);
- if (priv->ieee->iw_mode != IW_MODE_ADHOC) {
- wrqu->power.disabled = 1;
- return 0;
- }
+ wrqu->txpower.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
if (priv->tx_power == IPW_TX_POWER_DEFAULT) {
- wrqu->power.fixed = 0;
- wrqu->power.value = IPW_TX_POWER_MAX_DBM;
- wrqu->power.disabled = 1;
+ wrqu->txpower.fixed = 0;
+ wrqu->txpower.value = IPW_TX_POWER_MAX_DBM;
} else {
- wrqu->power.disabled = 0;
- wrqu->power.fixed = 1;
- wrqu->power.value = priv->tx_power;
+ wrqu->txpower.fixed = 1;
+ wrqu->txpower.value = priv->tx_power;
}
- wrqu->power.flags = IW_TXPOW_DBM;
+ wrqu->txpower.flags = IW_TXPOW_DBM;
- IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->power.value);
+ IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->txpower.value);
return 0;
}
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 4c28e33..916b24c 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -8012,6 +8012,10 @@ static int ipw_sw_reset(struct ipw_priv *priv, int init)
else
IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
+ priv->config &= ~CFG_STATIC_ESSID;
+ priv->essid_len = 0;
+ memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
+
if (disable) {
priv->status |= STATUS_RF_KILL_SW;
IPW_DEBUG_INFO("Radio disabled.\n");
@@ -11035,7 +11039,6 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
net_dev->set_multicast_list = ipw_net_set_multicast_list;
net_dev->set_mac_address = ipw_net_set_mac_address;
priv->wireless_data.spy_data = &priv->ieee->spy_data;
- priv->wireless_data.ieee80211 = priv->ieee;
net_dev->wireless_data = &priv->wireless_data;
net_dev->wireless_handlers = &ipw_wx_handler_def;
net_dev->ethtool_ops = &ipw_ethtool_ops;
@@ -11121,8 +11124,8 @@ static void ipw_pci_remove(struct pci_dev *pdev)
/* Free MAC hash list for ADHOC */
for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
- kfree(list_entry(p, struct ipw_ibss_seq, list));
list_del(p);
+ kfree(list_entry(p, struct ipw_ibss_seq, list));
}
}
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index b664708..3c128b6 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -261,13 +261,13 @@ orinoco_cs_config(dev_link_t *link)
/* Note that the CIS values need to be rescaled */
if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
if (conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) {
- DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
+ DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, cfg CIS = %d)\n", conf.Vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
if (!ignore_cis_vcc)
goto next_entry;
}
} else if (dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
if (conf.Vcc != dflt.vcc.param[CISTPL_POWER_VNOM] / 10000) {
- DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, CIS = %d)\n", conf.Vcc, dflt.vcc.param[CISTPL_POWER_VNOM] / 10000);
+ DEBUG(2, "orinoco_cs_config: Vcc mismatch (conf.Vcc = %d, dflt CIS = %d)\n", conf.Vcc, dflt.vcc.param[CISTPL_POWER_VNOM] / 10000);
if(!ignore_cis_vcc)
goto next_entry;
}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 202b750..8e1ba0b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -416,7 +416,9 @@ static void attach_msi_entry(struct msi_desc *entry, int vector)
static void irq_handler_init(int cap_id, int pos, int mask)
{
- spin_lock(&irq_desc[pos].lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_desc[pos].lock, flags);
if (cap_id == PCI_CAP_ID_MSIX)
irq_desc[pos].handler = &msix_irq_type;
else {
@@ -425,7 +427,7 @@ static void irq_handler_init(int cap_id, int pos, int mask)
else
irq_desc[pos].handler = &msi_irq_w_maskbit_type;
}
- spin_unlock(&irq_desc[pos].lock);
+ spin_unlock_irqrestore(&irq_desc[pos].lock, flags);
}
static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index cfbceb5..07b1e7c 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -1700,6 +1700,31 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
return sizeof(def_rw_recovery_mpage);
}
+/*
+ * We can turn this into a real blacklist if it's needed, for now just
+ * blacklist any Maxtor BANC1G10 revision firmware
+ */
+static int ata_dev_supports_fua(u16 *id)
+{
+ unsigned char model[41], fw[9];
+
+ if (!ata_id_has_fua(id))
+ return 0;
+
+ model[40] = '\0';
+ fw[8] = '\0';
+
+ ata_dev_id_string(id, model, ATA_ID_PROD_OFS, sizeof(model) - 1);
+ ata_dev_id_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw) - 1);
+
+ if (strncmp(model, "Maxtor", 6))
+ return 1;
+ if (strncmp(fw, "BANC1G10", 8))
+ return 1;
+
+ return 0; /* blacklisted */
+}
+
/**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
* @args: device IDENTIFY data / SCSI command of interest.
@@ -1797,7 +1822,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
return 0;
dpofua = 0;
- if (ata_id_has_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 &&
+ if (ata_dev_supports_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 &&
(!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
dpofua = 1 << 4;
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c
index 5468e5a..43e67d6 100644
--- a/drivers/serial/sn_console.c
+++ b/drivers/serial/sn_console.c
@@ -6,7 +6,7 @@
* driver for that.
*
*
- * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -829,8 +829,8 @@ static int __init sn_sal_module_init(void)
misc.name = DEVICE_NAME_DYNAMIC;
retval = misc_register(&misc);
if (retval != 0) {
- printk
- ("Failed to register console device using misc_register.\n");
+ printk(KERN_WARNING "Failed to register console "
+ "device using misc_register.\n");
return -ENODEV;
}
sal_console_uart.major = MISC_MAJOR;
@@ -942,88 +942,75 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
{
unsigned long flags = 0;
struct sn_cons_port *port = &sal_console_port;
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
static int stole_lock = 0;
-#endif
BUG_ON(!port->sc_is_asynch);
/* We can't look at the xmit buffer if we're not registered with serial core
* yet. So only do the fancy recovery after registering
*/
- if (port->sc_port.info) {
-
- /* somebody really wants this output, might be an
- * oops, kdb, panic, etc. make sure they get it. */
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
- if (spin_is_locked(&port->sc_port.lock)) {
- int lhead = port->sc_port.info->xmit.head;
- int ltail = port->sc_port.info->xmit.tail;
- int counter, got_lock = 0;
+ if (!port->sc_port.info) {
+ /* Not yet registered with serial core - simple case */
+ puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
+ return;
+ }
- /*
- * We attempt to determine if someone has died with the
- * lock. We wait ~20 secs after the head and tail ptrs
- * stop moving and assume the lock holder is not functional
- * and plow ahead. If the lock is freed within the time out
- * period we re-get the lock and go ahead normally. We also
- * remember if we have plowed ahead so that we don't have
- * to wait out the time out period again - the asumption
- * is that we will time out again.
- */
+ /* somebody really wants this output, might be an
+ * oops, kdb, panic, etc. make sure they get it. */
+ if (spin_is_locked(&port->sc_port.lock)) {
+ int lhead = port->sc_port.info->xmit.head;
+ int ltail = port->sc_port.info->xmit.tail;
+ int counter, got_lock = 0;
+
+ /*
+ * We attempt to determine if someone has died with the
+ * lock. We wait ~20 secs after the head and tail ptrs
+ * stop moving and assume the lock holder is not functional
+ * and plow ahead. If the lock is freed within the time out
+ * period we re-get the lock and go ahead normally. We also
+ * remember if we have plowed ahead so that we don't have
+ * to wait out the time out period again - the asumption
+ * is that we will time out again.
+ */
- for (counter = 0; counter < 150; mdelay(125), counter++) {
- if (!spin_is_locked(&port->sc_port.lock)
- || stole_lock) {
- if (!stole_lock) {
- spin_lock_irqsave(&port->
- sc_port.lock,
- flags);
- got_lock = 1;
- }
- break;
- } else {
- /* still locked */
- if ((lhead !=
- port->sc_port.info->xmit.head)
- || (ltail !=
- port->sc_port.info->xmit.
- tail)) {
- lhead =
- port->sc_port.info->xmit.
- head;
- ltail =
- port->sc_port.info->xmit.
- tail;
- counter = 0;
- }
+ for (counter = 0; counter < 150; mdelay(125), counter++) {
+ if (!spin_is_locked(&port->sc_port.lock)
+ || stole_lock) {
+ if (!stole_lock) {
+ spin_lock_irqsave(&port->sc_port.lock,
+ flags);
+ got_lock = 1;
}
- }
- /* flush anything in the serial core xmit buffer, raw */
- sn_transmit_chars(port, 1);
- if (got_lock) {
- spin_unlock_irqrestore(&port->sc_port.lock,
- flags);
- stole_lock = 0;
+ break;
} else {
- /* fell thru */
- stole_lock = 1;
+ /* still locked */
+ if ((lhead != port->sc_port.info->xmit.head)
+ || (ltail !=
+ port->sc_port.info->xmit.tail)) {
+ lhead =
+ port->sc_port.info->xmit.head;
+ ltail =
+ port->sc_port.info->xmit.tail;
+ counter = 0;
+ }
}
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
- } else {
- stole_lock = 0;
-#endif
- spin_lock_irqsave(&port->sc_port.lock, flags);
- sn_transmit_chars(port, 1);
+ }
+ /* flush anything in the serial core xmit buffer, raw */
+ sn_transmit_chars(port, 1);
+ if (got_lock) {
spin_unlock_irqrestore(&port->sc_port.lock, flags);
-
- puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+ stole_lock = 0;
+ } else {
+ /* fell thru */
+ stole_lock = 1;
}
-#endif
- }
- else {
- /* Not yet registered with serial core - simple case */
+ puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
+ } else {
+ stole_lock = 0;
+ spin_lock_irqsave(&port->sc_port.lock, flags);
+ sn_transmit_chars(port, 1);
+ spin_unlock_irqrestore(&port->sc_port.lock, flags);
+
puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
}
}
diff --git a/fs/bio.c b/fs/bio.c
index bbc442b..1f3bb50 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -411,6 +411,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
/**
* bio_add_pc_page - attempt to add page to bio
+ * @q: the target queue
* @bio: destination bio
* @page: page to add
* @len: vec entry length
diff --git a/fs/compat.c b/fs/compat.c
index 18b21b4..ff0bafc 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1743,7 +1743,7 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
if ((u64)tv.tv_sec >= (u64)MAX_INT64_SECONDS)
timeout = -1; /* infinite */
else {
- timeout = ROUND_UP(tv.tv_sec, 1000000/HZ);
+ timeout = ROUND_UP(tv.tv_usec, 1000000/HZ);
timeout += tv.tv_sec * HZ;
}
}
@@ -1884,7 +1884,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
/* We assume that ts.tv_sec is always lower than
the number of seconds that can be expressed in
an s64. Otherwise the compiler bitches at us */
- timeout = ROUND_UP(ts.tv_sec, 1000000000/HZ);
+ timeout = ROUND_UP(ts.tv_nsec, 1000000000/HZ);
timeout += ts.tv_sec * HZ;
}
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index bb89062..f483eeb 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -61,7 +61,7 @@ static inline void
down (struct semaphore *sem)
{
might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
+ if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
__down(sem);
}
@@ -75,7 +75,7 @@ down_interruptible (struct semaphore * sem)
int ret = 0;
might_sleep();
- if (atomic_dec_return(&sem->count) < 0)
+ if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
ret = __down_interruptible(sem);
return ret;
}
@@ -85,7 +85,7 @@ down_trylock (struct semaphore *sem)
{
int ret = 0;
- if (atomic_dec_return(&sem->count) < 0)
+ if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
ret = __down_trylock(sem);
return ret;
}
@@ -93,7 +93,7 @@ down_trylock (struct semaphore *sem)
static inline void
up (struct semaphore * sem)
{
- if (atomic_inc_return(&sem->count) <= 0)
+ if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
__up(sem);
}
diff --git a/include/asm-ia64/sn/xp.h b/include/asm-ia64/sn/xp.h
index 203945a..9bd2f9b 100644
--- a/include/asm-ia64/sn/xp.h
+++ b/include/asm-ia64/sn/xp.h
@@ -18,6 +18,7 @@
#include <linux/cache.h>
#include <linux/hardirq.h>
+#include <linux/mutex.h>
#include <asm/sn/types.h>
#include <asm/sn/bte.h>
@@ -359,7 +360,7 @@ typedef void (*xpc_notify_func)(enum xpc_retval reason, partid_t partid,
* the channel.
*/
struct xpc_registration {
- struct semaphore sema;
+ struct mutex mutex;
xpc_channel_func func; /* function to call */
void *key; /* pointer to user's key */
u16 nentries; /* #of msg entries in local msg queue */
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
index 87e9cd58..0c36928 100644
--- a/include/asm-ia64/sn/xpc.h
+++ b/include/asm-ia64/sn/xpc.h
@@ -19,6 +19,8 @@
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/sn/bte.h>
@@ -335,8 +337,7 @@ struct xpc_openclose_args {
* and consumed by the intended recipient.
*/
struct xpc_notify {
- struct semaphore sema; /* notify semaphore */
- volatile u8 type; /* type of notification */
+ volatile u8 type; /* type of notification */
/* the following two fields are only used if type == XPC_N_CALL */
xpc_notify_func func; /* user's notify function */
@@ -465,8 +466,8 @@ struct xpc_channel {
xpc_channel_func func; /* user's channel function */
void *key; /* pointer to user's key */
- struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
- struct semaphore wdisconnect_sema; /* wait for channel disconnect */
+ struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
+ struct completion wdisconnect_wait; /* wait for channel disconnect */
struct xpc_openclose_args *local_openclose_args; /* args passed on */
/* opening or closing of channel */
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index d8aae4d..412ef8e 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -18,6 +18,10 @@
#include <asm/smp.h>
#ifdef CONFIG_NUMA
+
+/* Nodes w/o CPUs are preferred for memory allocations, see build_zonelists */
+#define PENALTY_FOR_NODE_WITH_CPUS 255
+
/*
* Returns the number of the node containing CPU 'cpu'
*/
diff --git a/include/asm-sparc/oplib.h b/include/asm-sparc/oplib.h
index 9594455..d0d76b3 100644
--- a/include/asm-sparc/oplib.h
+++ b/include/asm-sparc/oplib.h
@@ -164,6 +164,7 @@ enum prom_input_device {
PROMDEV_IKBD, /* input from keyboard */
PROMDEV_ITTYA, /* input from ttya */
PROMDEV_ITTYB, /* input from ttyb */
+ PROMDEV_IRSC, /* input from rsc */
PROMDEV_I_UNK,
};
@@ -175,6 +176,7 @@ enum prom_output_device {
PROMDEV_OSCREEN, /* to screen */
PROMDEV_OTTYA, /* to ttya */
PROMDEV_OTTYB, /* to ttyb */
+ PROMDEV_ORSC, /* to rsc */
PROMDEV_O_UNK,
};
diff --git a/include/asm-sparc/thread_info.h b/include/asm-sparc/thread_info.h
index 65f060b..91b9f58 100644
--- a/include/asm-sparc/thread_info.h
+++ b/include/asm-sparc/thread_info.h
@@ -128,9 +128,10 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
* thread information flag bit numbers
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
-#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+/* flag bit 1 is available */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
#define TIF_USEDFPU 8 /* FPU was used by this task
* this quantum (SMP) */
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
@@ -139,9 +140,9 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index 58dba51..2ac64e6 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -300,11 +300,26 @@
#define __NR_add_key 281
#define __NR_request_key 282
#define __NR_keyctl 283
+#define __NR_openat 284
+#define __NR_mkdirat 285
+#define __NR_mknodat 286
+#define __NR_fchownat 287
+#define __NR_futimesat 288
+#define __NR_newfstatat 289
+#define __NR_unlinkat 290
+#define __NR_renameat 291
+#define __NR_linkat 292
+#define __NR_symlinkat 293
+#define __NR_readlinkat 294
+#define __NR_fchmodat 295
+#define __NR_faccessat 296
+#define __NR_pselect6 297
+#define __NR_ppoll 298
-/* WARNING: You MAY NOT add syscall numbers larger than 283, since
+/* WARNING: You MAY NOT add syscall numbers larger than 298, since
* all of the syscall tables in the Sparc kernel are
- * sized to have 283 entries (starting at zero). Therefore
- * find a free slot in the 0-282 range.
+ * sized to have 298 entries (starting at zero). Therefore
+ * find a free slot in the 0-298 range.
*/
#define _syscall0(type,name) \
@@ -458,6 +473,7 @@ return -1; \
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#endif
#ifdef __KERNEL_SYSCALLS__
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index ec85d12..508c416 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -131,6 +131,28 @@ static void inline __read_lock(raw_rwlock_t *lock)
: "memory");
}
+static int inline __read_trylock(raw_rwlock_t *lock)
+{
+ int tmp1, tmp2;
+
+ __asm__ __volatile__ (
+"1: ldsw [%2], %0\n"
+" brlz,a,pn %0, 2f\n"
+" mov 0, %0\n"
+" add %0, 1, %1\n"
+" cas [%2], %0, %1\n"
+" cmp %0, %1\n"
+" membar #StoreLoad | #StoreStore\n"
+" bne,pn %%icc, 1b\n"
+" mov 1, %0\n"
+"2:"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+ : "memory");
+
+ return tmp1;
+}
+
static void inline __read_unlock(raw_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
@@ -211,12 +233,12 @@ static int inline __write_trylock(raw_rwlock_t *lock)
}
#define __raw_read_lock(p) __read_lock(p)
+#define __raw_read_trylock(p) __read_trylock(p)
#define __raw_read_unlock(p) __read_unlock(p)
#define __raw_write_lock(p) __write_lock(p)
#define __raw_write_unlock(p) __write_unlock(p)
#define __raw_write_trylock(p) __write_trylock(p)
-#define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
#define __raw_write_can_lock(rw) (!(rw)->lock)
diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h
index c94d8b3..ac9d068 100644
--- a/include/asm-sparc64/thread_info.h
+++ b/include/asm-sparc64/thread_info.h
@@ -221,7 +221,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
* nop
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
-#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_RESTORE_SIGMASK 1 /* restore signal mask in do_signal() */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_PERFCTR 4 /* performance counters active */
@@ -241,7 +241,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_POLLING_NRFLAG 14
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_PERFCTR (1<<TIF_PERFCTR)
@@ -250,11 +249,12 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
- (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
+ (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | \
_TIF_NEED_RESCHED | _TIF_PERFCTR))
#endif /* __KERNEL__ */
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index 51ec287..84ac2bd 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -302,11 +302,26 @@
#define __NR_add_key 281
#define __NR_request_key 282
#define __NR_keyctl 283
+#define __NR_openat 284
+#define __NR_mkdirat 285
+#define __NR_mknodat 286
+#define __NR_fchownat 287
+#define __NR_futimesat 288
+#define __NR_newfstatat 289
+#define __NR_unlinkat 290
+#define __NR_renameat 291
+#define __NR_linkat 292
+#define __NR_symlinkat 293
+#define __NR_readlinkat 294
+#define __NR_fchmodat 295
+#define __NR_faccessat 296
+#define __NR_pselect6 297
+#define __NR_ppoll 298
-/* WARNING: You MAY NOT add syscall numbers larger than 283, since
+/* WARNING: You MAY NOT add syscall numbers larger than 298, since
* all of the syscall tables in the Sparc kernel are
- * sized to have 283 entries (starting at zero). Therefore
- * find a free slot in the 0-282 range.
+ * sized to have 298 entries (starting at zero). Therefore
+ * find a free slot in the 0-298 range.
*/
#define _syscall0(type,name) \
@@ -501,6 +516,8 @@ asmlinkage long sys_rt_sigaction(int sig,
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+#define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
#endif
/*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 02a585f..860e7a4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -392,8 +392,8 @@ struct request_queue
unsigned int nr_congestion_off;
unsigned int nr_batching;
- unsigned short max_sectors;
- unsigned short max_hw_sectors;
+ unsigned int max_sectors;
+ unsigned int max_hw_sectors;
unsigned short max_phys_segments;
unsigned short max_hw_segments;
unsigned short hardsect_size;
@@ -697,7 +697,7 @@ extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void blk_queue_bounce_limit(request_queue_t *, u64);
-extern void blk_queue_max_sectors(request_queue_t *, unsigned short);
+extern void blk_queue_max_sectors(request_queue_t *, unsigned int);
extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 472f048..6500d4e 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -19,7 +19,21 @@ struct xt_get_revision
/* For standard target */
#define XT_RETURN (-NF_REPEAT - 1)
-#define XT_ALIGN(s) (((s) + (__alignof__(void *)-1)) & ~(__alignof__(void *)-1))
+/* this is a dummy structure to find out the alignment requirement for a struct
+ * containing all the fundamental data types that are used in ipt_entry,
+ * ip6t_entry and arpt_entry. This sucks, and it is a hack. It will be my
+ * personal pleasure to remove it -HW
+ */
+struct _xt_align
+{
+ u_int8_t u8;
+ u_int16_t u16;
+ u_int32_t u32;
+ u_int64_t u64;
+};
+
+#define XT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) \
+ & ~(__alignof__(struct _xt_align)-1))
/* Standard return verdict, or do jump. */
#define XT_STANDARD_TARGET ""
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index df05f46..9a92aef 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -803,9 +803,9 @@ enum ieee80211_state {
#define IEEE80211_24GHZ_MAX_CHANNEL 14
#define IEEE80211_24GHZ_CHANNELS 14
-#define IEEE80211_52GHZ_MIN_CHANNEL 36
+#define IEEE80211_52GHZ_MIN_CHANNEL 34
#define IEEE80211_52GHZ_MAX_CHANNEL 165
-#define IEEE80211_52GHZ_CHANNELS 32
+#define IEEE80211_52GHZ_CHANNELS 131
enum {
IEEE80211_CH_PASSIVE_ONLY = (1 << 0),
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a553f39..e673b2c 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -175,6 +175,8 @@ void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_association *asoc,
struct sctp_transport *t);
+void sctp_backlog_migrate(struct sctp_association *assoc,
+ struct sock *oldsk, struct sock *newsk);
/*
* Section: Macros, externs, and inlines
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index f5c22d7..8c522ae 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -127,9 +127,9 @@ extern struct sctp_globals {
* RTO.Alpha - 1/8 (3 when converted to right shifts.)
* RTO.Beta - 1/4 (2 when converted to right shifts.)
*/
- __u32 rto_initial;
- __u32 rto_min;
- __u32 rto_max;
+ unsigned long rto_initial;
+ unsigned long rto_min;
+ unsigned long rto_max;
/* Note: rto_alpha and rto_beta are really defined as inverse
* powers of two to facilitate integer operations.
@@ -140,12 +140,18 @@ extern struct sctp_globals {
/* Max.Burst - 4 */
int max_burst;
- /* Valid.Cookie.Life - 60 seconds */
- int valid_cookie_life;
-
/* Whether Cookie Preservative is enabled(1) or not(0) */
int cookie_preserve_enable;
+ /* Valid.Cookie.Life - 60 seconds */
+ unsigned long valid_cookie_life;
+
+ /* Delayed SACK timeout 200ms default*/
+ unsigned long sack_timeout;
+
+ /* HB.interval - 30 seconds */
+ unsigned long hb_interval;
+
/* Association.Max.Retrans - 10 attempts
* Path.Max.Retrans - 5 attempts (per destination address)
* Max.Init.Retransmits - 8 attempts
@@ -168,12 +174,6 @@ extern struct sctp_globals {
*/
int rcvbuf_policy;
- /* Delayed SACK timeout 200ms default*/
- int sack_timeout;
-
- /* HB.interval - 30 seconds */
- int hb_interval;
-
/* The following variables are implementation specific. */
/* Default initialization values to be applied to new associations. */
@@ -405,8 +405,9 @@ struct sctp_cookie {
/* The format of our cookie that we send to our peer. */
struct sctp_signed_cookie {
__u8 signature[SCTP_SECRET_SIZE];
+ __u32 __pad; /* force sctp_cookie alignment to 64 bits */
struct sctp_cookie c;
-};
+} __attribute__((packed));
/* This is another convenience type to allocate memory for address
* params for the maximum size and pass such structures around
@@ -827,7 +828,7 @@ struct sctp_transport {
__u32 rtt; /* This is the most recent RTT. */
/* RTO : The current retransmission timeout value. */
- __u32 rto;
+ unsigned long rto;
/* RTTVAR : The current RTT variation. */
__u32 rttvar;
@@ -877,22 +878,10 @@ struct sctp_transport {
/* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
* the destination address every heartbeat interval.
*/
- __u32 hbinterval;
-
- /* This is the max_retrans value for the transport and will
- * be initialized from the assocs value. This can be changed
- * using SCTP_SET_PEER_ADDR_PARAMS socket option.
- */
- __u16 pathmaxrxt;
-
- /* PMTU : The current known path MTU. */
- __u32 pathmtu;
+ unsigned long hbinterval;
/* SACK delay timeout */
- __u32 sackdelay;
-
- /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */
- __u32 param_flags;
+ unsigned long sackdelay;
/* When was the last time (in jiffies) that we heard from this
* transport? We use this to pick new active and retran paths.
@@ -904,6 +893,18 @@ struct sctp_transport {
*/
unsigned long last_time_ecne_reduced;
+ /* This is the max_retrans value for the transport and will
+ * be initialized from the assocs value. This can be changed
+ * using SCTP_SET_PEER_ADDR_PARAMS socket option.
+ */
+ __u16 pathmaxrxt;
+
+ /* PMTU : The current known path MTU. */
+ __u32 pathmtu;
+
+ /* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */
+ __u32 param_flags;
+
/* The number of times INIT has been sent on this transport. */
int init_sent_count;
@@ -1249,6 +1250,14 @@ struct sctp_endpoint {
int last_key;
int key_changed_at;
+ /* digest: This is a digest of the sctp cookie. This field is
+ * only used on the receive path when we try to validate
+ * that the cookie has not been tampered with. We put
+ * this here so we pre-allocate this once and can re-use
+ * on every receive.
+ */
+ __u8 digest[SCTP_SIGNATURE_SIZE];
+
/* sendbuf acct. policy. */
__u32 sndbuf_policy;
@@ -1499,9 +1508,9 @@ struct sctp_association {
* These values will be initialized by system defaults, but can
* be modified via the SCTP_RTOINFO socket option.
*/
- __u32 rto_initial;
- __u32 rto_max;
- __u32 rto_min;
+ unsigned long rto_initial;
+ unsigned long rto_max;
+ unsigned long rto_min;
/* Maximum number of new data packets that can be sent in a burst. */
int max_burst;
@@ -1519,13 +1528,13 @@ struct sctp_association {
__u16 init_retries;
/* The largest timeout or RTO value to use in attempting an INIT */
- __u16 max_init_timeo;
+ unsigned long max_init_timeo;
/* Heartbeat interval: The endpoint sends out a Heartbeat chunk to
* the destination address every heartbeat interval. This value
* will be inherited by all new transports.
*/
- __u32 hbinterval;
+ unsigned long hbinterval;
/* This is the max_retrans value for new transports in the
* association.
@@ -1537,13 +1546,14 @@ struct sctp_association {
*/
__u32 pathmtu;
- /* SACK delay timeout */
- __u32 sackdelay;
-
/* Flags controling Heartbeat, SACK delay, and Path MTU Discovery. */
__u32 param_flags;
- int timeouts[SCTP_NUM_TIMEOUT_TYPES];
+ /* SACK delay timeout */
+ unsigned long sackdelay;
+
+
+ unsigned long timeouts[SCTP_NUM_TIMEOUT_TYPES];
struct timer_list timers[SCTP_NUM_TIMEOUT_TYPES];
/* Transport to which SHUTDOWN chunk was last sent. */
@@ -1648,7 +1658,10 @@ struct sctp_association {
/* How many duplicated TSNs have we seen? */
int numduptsns;
- /* Number of seconds of idle time before an association is closed. */
+ /* Number of seconds of idle time before an association is closed.
+ * In the association context, this is really used as a boolean
+ * since the real timeout is stored in the timeouts array
+ */
__u32 autoclose;
/* These are to support
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 467274a..8279929 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -554,7 +554,6 @@ struct Scsi_Host {
/*
* ordered write support
*/
- unsigned ordered_flush:1;
unsigned ordered_tag:1;
/*
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 7732199..7712912 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -114,16 +114,16 @@ rcu_torture_alloc(void)
{
struct list_head *p;
- spin_lock(&rcu_torture_lock);
+ spin_lock_bh(&rcu_torture_lock);
if (list_empty(&rcu_torture_freelist)) {
atomic_inc(&n_rcu_torture_alloc_fail);
- spin_unlock(&rcu_torture_lock);
+ spin_unlock_bh(&rcu_torture_lock);
return NULL;
}
atomic_inc(&n_rcu_torture_alloc);
p = rcu_torture_freelist.next;
list_del_init(p);
- spin_unlock(&rcu_torture_lock);
+ spin_unlock_bh(&rcu_torture_lock);
return container_of(p, struct rcu_torture, rtort_free);
}
@@ -134,9 +134,9 @@ static void
rcu_torture_free(struct rcu_torture *p)
{
atomic_inc(&n_rcu_torture_free);
- spin_lock(&rcu_torture_lock);
+ spin_lock_bh(&rcu_torture_lock);
list_add_tail(&p->rtort_free, &rcu_torture_freelist);
- spin_unlock(&rcu_torture_lock);
+ spin_unlock_bh(&rcu_torture_lock);
}
static void
diff --git a/kernel/sched.c b/kernel/sched.c
index 3ee2ae4..ec7fd9c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5141,7 +5141,7 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
#define SEARCH_SCOPE 2
#define MIN_CACHE_SIZE (64*1024U)
#define DEFAULT_CACHE_SIZE (5*1024*1024U)
-#define ITERATIONS 2
+#define ITERATIONS 1
#define SIZE_THRESH 130
#define COST_THRESH 130
@@ -5480,9 +5480,9 @@ static unsigned long long measure_migration_cost(int cpu1, int cpu2)
break;
}
/*
- * Increase the cachesize in 5% steps:
+ * Increase the cachesize in 10% steps:
*/
- size = size * 20 / 19;
+ size = size * 10 / 9;
}
if (migration_debug)
diff --git a/kernel/time.c b/kernel/time.c
index 7477b1d..1f23e68 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -155,7 +155,7 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
static int firsttime = 1;
int error = 0;
- if (!timespec_valid(tv))
+ if (tv && !timespec_valid(tv))
return -EINVAL;
error = security_settime(tv, tz);
diff --git a/kernel/user.c b/kernel/user.c
index 89e562f..d1ae234 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/key.h>
+#include <linux/interrupt.h>
/*
* UID task count cache, to get fast user lookup in "alloc_uid"
@@ -27,6 +28,12 @@
static kmem_cache_t *uid_cachep;
static struct list_head uidhash_table[UIDHASH_SZ];
+
+/*
+ * The uidhash_lock is mostly taken from process context, but it is
+ * occasionally also taken from softirq/tasklet context, when
+ * task-structs get RCU-freed. Hence all locking must be softirq-safe.
+ */
static DEFINE_SPINLOCK(uidhash_lock);
struct user_struct root_user = {
@@ -83,14 +90,15 @@ struct user_struct *find_user(uid_t uid)
{
struct user_struct *ret;
- spin_lock(&uidhash_lock);
+ spin_lock_bh(&uidhash_lock);
ret = uid_hash_find(uid, uidhashentry(uid));
- spin_unlock(&uidhash_lock);
+ spin_unlock_bh(&uidhash_lock);
return ret;
}
void free_uid(struct user_struct *up)
{
+ local_bh_disable();
if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
uid_hash_remove(up);
key_put(up->uid_keyring);
@@ -98,6 +106,7 @@ void free_uid(struct user_struct *up)
kmem_cache_free(uid_cachep, up);
spin_unlock(&uidhash_lock);
}
+ local_bh_enable();
}
struct user_struct * alloc_uid(uid_t uid)
@@ -105,9 +114,9 @@ struct user_struct * alloc_uid(uid_t uid)
struct list_head *hashent = uidhashentry(uid);
struct user_struct *up;
- spin_lock(&uidhash_lock);
+ spin_lock_bh(&uidhash_lock);
up = uid_hash_find(uid, hashent);
- spin_unlock(&uidhash_lock);
+ spin_unlock_bh(&uidhash_lock);
if (!up) {
struct user_struct *new;
@@ -137,7 +146,7 @@ struct user_struct * alloc_uid(uid_t uid)
* Before adding this, check whether we raced
* on adding the same user already..
*/
- spin_lock(&uidhash_lock);
+ spin_lock_bh(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
key_put(new->uid_keyring);
@@ -147,7 +156,7 @@ struct user_struct * alloc_uid(uid_t uid)
uid_hash_insert(new, hashent);
up = new;
}
- spin_unlock(&uidhash_lock);
+ spin_unlock_bh(&uidhash_lock);
}
return up;
@@ -183,9 +192,9 @@ static int __init uid_cache_init(void)
INIT_LIST_HEAD(uidhash_table + n);
/* Insert the root user immediately (init already runs as root) */
- spin_lock(&uidhash_lock);
+ spin_lock_bh(&uidhash_lock);
uid_hash_insert(&root_user, uidhashentry(0));
- spin_unlock(&uidhash_lock);
+ spin_unlock_bh(&uidhash_lock);
return 0;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index fd070a0..ffb8207 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2543,13 +2543,14 @@ int dev_ioctl(unsigned int cmd, void __user *arg)
case SIOCBONDENSLAVE:
case SIOCBONDRELEASE:
case SIOCBONDSETHWADDR:
- case SIOCBONDSLAVEINFOQUERY:
- case SIOCBONDINFOQUERY:
case SIOCBONDCHANGEACTIVE:
case SIOCBRADDIF:
case SIOCBRDELIF:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ /* fall through */
+ case SIOCBONDSLAVEINFOQUERY:
+ case SIOCBONDINFOQUERY:
dev_load(ifr.ifr_name);
rtnl_lock();
ret = dev_ifsioc(&ifr, cmd);
diff --git a/net/core/filter.c b/net/core/filter.c
index 9540946..93fbd01 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -64,7 +64,7 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
}
/**
- * sk_run_filter - run a filter on a socket
+ * sk_run_filter - run a filter on a socket
* @skb: buffer to run the filter on
* @filter: filter to apply
* @flen: length of filter
@@ -78,8 +78,8 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
{
struct sock_filter *fentry; /* We walk down these */
void *ptr;
- u32 A = 0; /* Accumulator */
- u32 X = 0; /* Index Register */
+ u32 A = 0; /* Accumulator */
+ u32 X = 0; /* Index Register */
u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
u32 tmp;
int k;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d0732e9..6766f11 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -135,13 +135,15 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
int fclone)
{
+ kmem_cache_t *cache;
struct skb_shared_info *shinfo;
struct sk_buff *skb;
u8 *data;
+ cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
+
/* Get the HEAD */
- skb = kmem_cache_alloc(fclone ? skbuff_fclone_cache : skbuff_head_cache,
- gfp_mask & ~__GFP_DMA);
+ skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA);
if (!skb)
goto out;
@@ -180,7 +182,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
out:
return skb;
nodata:
- kmem_cache_free(skbuff_head_cache, skb);
+ kmem_cache_free(cache, skb);
skb = NULL;
goto out;
}
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 7a12180..960aa78 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -350,6 +350,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
u8 src[ETH_ALEN];
struct ieee80211_crypt_data *crypt = NULL;
int keyidx = 0;
+ int can_be_decrypted = 0;
hdr = (struct ieee80211_hdr_4addr *)skb->data;
stats = &ieee->stats;
@@ -410,12 +411,23 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
return 1;
}
- if (is_multicast_ether_addr(hdr->addr1)
- ? ieee->host_mc_decrypt : ieee->host_decrypt) {
+ can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) ||
+ is_broadcast_ether_addr(hdr->addr2)) ?
+ ieee->host_mc_decrypt : ieee->host_decrypt;
+
+ if (can_be_decrypted) {
int idx = 0;
- if (skb->len >= hdrlen + 3)
+ if (skb->len >= hdrlen + 3) {
+ /* Top two-bits of byte 3 are the key index */
idx = skb->data[hdrlen + 3] >> 6;
+ }
+
+ /* ieee->crypt[] is WEP_KEY (4) in length. Given that idx
+ * is only allowed 2-bits of storage, no value of idx can
+ * be provided via above code that would result in idx
+ * being out of range */
crypt = ieee->crypt[idx];
+
#ifdef NOT_YET
sta = NULL;
@@ -553,7 +565,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possibly fragmented, possibly encrypted) payload */
- if (ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
+ if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted &&
(keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
goto rx_dropped;
@@ -617,7 +629,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
* encrypted/authenticated */
- if (ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
+ if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted &&
ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
goto rx_dropped;
@@ -1439,7 +1451,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
break;
case IEEE80211_STYPE_PROBE_REQ:
- IEEE80211_DEBUG_MGMT("recieved auth (%d)\n",
+ IEEE80211_DEBUG_MGMT("received auth (%d)\n",
WLAN_FC_GET_STYPE(le16_to_cpu
(header->frame_ctl)));
@@ -1473,7 +1485,7 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
break;
case IEEE80211_STYPE_AUTH:
- IEEE80211_DEBUG_MGMT("recieved auth (%d)\n",
+ IEEE80211_DEBUG_MGMT("received auth (%d)\n",
WLAN_FC_GET_STYPE(le16_to_cpu
(header->frame_ctl)));
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index 23e1630..f87c6b8 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -232,15 +232,18 @@ static char *ipw2100_translate_scan(struct ieee80211_device *ieee,
return start;
}
+#define SCAN_ITEM_SIZE 128
+
int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct ieee80211_network *network;
unsigned long flags;
+ int err = 0;
char *ev = extra;
- char *stop = ev + IW_SCAN_MAX_DATA;
+ char *stop = ev + wrqu->data.length;
int i = 0;
IEEE80211_DEBUG_WX("Getting scan\n");
@@ -249,6 +252,11 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
list_for_each_entry(network, &ieee->network_list, list) {
i++;
+ if (stop - ev < SCAN_ITEM_SIZE) {
+ err = -E2BIG;
+ break;
+ }
+
if (ieee->scan_age == 0 ||
time_after(network->last_scanned + ieee->scan_age, jiffies))
ev = ipw2100_translate_scan(ieee, ev, stop, network);
@@ -270,7 +278,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
- return 0;
+ return err;
}
int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d8ce713..f70ba62 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -970,7 +970,6 @@ int igmp_rcv(struct sk_buff *skb)
case IGMP_MTRACE_RESP:
break;
default:
- NETDEBUG(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type);
}
drop:
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 3284cfb..128de4d 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -230,7 +230,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
tp->snd_cwnd_cnt = 0;
- ca->ccount++;
}
}
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 6c05c79..4420948 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1252,8 +1252,7 @@ int igmp6_event_query(struct sk_buff *skb)
}
} else {
for (ma = idev->mc_list; ma; ma=ma->next) {
- if (group_type != IPV6_ADDR_ANY &&
- !ipv6_addr_equal(group, &ma->mca_addr))
+ if (!ipv6_addr_equal(group, &ma->mca_addr))
continue;
spin_lock_bh(&ma->mca_lock);
if (ma->mca_flags & MAF_TIMER_RUNNING) {
@@ -1268,11 +1267,10 @@ int igmp6_event_query(struct sk_buff *skb)
ma->mca_flags &= ~MAF_GSQUERY;
}
if (!(ma->mca_flags & MAF_GSQUERY) ||
- mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs))
+ mld_marksources(ma, ntohs(mlh2->nsrcs), mlh2->srcs))
igmp6_group_queried(ma, max_delay);
spin_unlock_bh(&ma->mca_lock);
- if (group_type != IPV6_ADDR_ANY)
- break;
+ break;
}
}
read_unlock_bh(&idev->lock);
@@ -1351,7 +1349,7 @@ static int is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
* in all filters
*/
if (psf->sf_count[MCAST_INCLUDE])
- return 0;
+ return type == MLD2_MODE_IS_INCLUDE;
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
}
@@ -1966,7 +1964,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc)
static int sf_setstate(struct ifmcaddr6 *pmc)
{
- struct ip6_sf_list *psf;
+ struct ip6_sf_list *psf, *dpsf;
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
int qrv = pmc->idev->mc_qrv;
int new_in, rv;
@@ -1978,8 +1976,48 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
!psf->sf_count[MCAST_INCLUDE];
} else
new_in = psf->sf_count[MCAST_INCLUDE] != 0;
- if (new_in != psf->sf_oldin) {
- psf->sf_crcount = qrv;
+ if (new_in) {
+ if (!psf->sf_oldin) {
+ struct ip6_sf_list *prev = 0;
+
+ for (dpsf=pmc->mca_tomb; dpsf;
+ dpsf=dpsf->sf_next) {
+ if (ipv6_addr_equal(&dpsf->sf_addr,
+ &psf->sf_addr))
+ break;
+ prev = dpsf;
+ }
+ if (dpsf) {
+ if (prev)
+ prev->sf_next = dpsf->sf_next;
+ else
+ pmc->mca_tomb = dpsf->sf_next;
+ kfree(dpsf);
+ }
+ psf->sf_crcount = qrv;
+ rv++;
+ }
+ } else if (psf->sf_oldin) {
+ psf->sf_crcount = 0;
+ /*
+ * add or update "delete" records if an active filter
+ * is now inactive
+ */
+ for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
+ if (ipv6_addr_equal(&dpsf->sf_addr,
+ &psf->sf_addr))
+ break;
+ if (!dpsf) {
+ dpsf = (struct ip6_sf_list *)
+ kmalloc(sizeof(*dpsf), GFP_ATOMIC);
+ if (!dpsf)
+ continue;
+ *dpsf = *psf;
+ /* pmc->mca_lock held by callers */
+ dpsf->sf_next = pmc->mca_tomb;
+ pmc->mca_tomb = dpsf;
+ }
+ dpsf->sf_crcount = qrv;
rv++;
}
}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 43f1ce7..ae86d23 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1620,6 +1620,7 @@ static int key_notify_sa_flush(struct km_event *c)
return -ENOBUFS;
hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto);
+ hdr->sadb_msg_type = SADB_FLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->pid;
hdr->sadb_msg_version = PF_KEY_V2;
@@ -2385,6 +2386,7 @@ static int key_notify_policy_flush(struct km_event *c)
if (!skb_out)
return -ENOBUFS;
hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg));
+ hdr->sadb_msg_type = SADB_X_SPDFLUSH;
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->pid;
hdr->sadb_msg_version = PF_KEY_V2;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ee93abc..9db7dbd 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -365,7 +365,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
*/
err = -EMSGSIZE;
- if(len>dev->mtu+dev->hard_header_len)
+ if (len > dev->mtu + dev->hard_header_len)
goto out_unlock;
err = -ENOBUFS;
@@ -935,7 +935,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add
* Check legality
*/
- if(addr_len!=sizeof(struct sockaddr))
+ if (addr_len != sizeof(struct sockaddr))
return -EINVAL;
strlcpy(name,uaddr->sa_data,sizeof(name));
@@ -1092,7 +1092,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
* retries.
*/
- if(skb==NULL)
+ if (skb == NULL)
goto out;
/*
@@ -1392,8 +1392,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
if (level != SOL_PACKET)
return -ENOPROTOOPT;
- if (get_user(len,optlen))
- return -EFAULT;
+ if (get_user(len, optlen))
+ return -EFAULT;
if (len < 0)
return -EINVAL;
@@ -1419,9 +1419,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
return -ENOPROTOOPT;
}
- if (put_user(len, optlen))
- return -EFAULT;
- return 0;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ return 0;
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 4aa6fc6..cb78b50 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -257,20 +257,26 @@ int sctp_rcv(struct sk_buff *skb)
*/
sctp_bh_lock_sock(sk);
+ /* It is possible that the association could have moved to a different
+ * socket if it is peeled off. If so, update the sk.
+ */
+ if (sk != rcvr->sk) {
+ sctp_bh_lock_sock(rcvr->sk);
+ sctp_bh_unlock_sock(sk);
+ sk = rcvr->sk;
+ }
+
if (sock_owned_by_user(sk))
sk_add_backlog(sk, skb);
else
sctp_backlog_rcv(sk, skb);
- /* Release the sock and any reference counts we took in the
- * lookup calls.
+ /* Release the sock and the sock ref we took in the lookup calls.
+ * The asoc/ep ref will be released in sctp_backlog_rcv.
*/
sctp_bh_unlock_sock(sk);
- if (asoc)
- sctp_association_put(asoc);
- else
- sctp_endpoint_put(ep);
sock_put(sk);
+
return ret;
discard_it:
@@ -296,12 +302,50 @@ discard_release:
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
- struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
-
- sctp_inq_push(inqueue, chunk);
+ struct sctp_inq *inqueue = NULL;
+ struct sctp_ep_common *rcvr = NULL;
+
+ rcvr = chunk->rcvr;
+
+ BUG_TRAP(rcvr->sk == sk);
+
+ if (rcvr->dead) {
+ sctp_chunk_free(chunk);
+ } else {
+ inqueue = &chunk->rcvr->inqueue;
+ sctp_inq_push(inqueue, chunk);
+ }
+
+ /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_put(sctp_assoc(rcvr));
+ else
+ sctp_endpoint_put(sctp_ep(rcvr));
+
return 0;
}
+void sctp_backlog_migrate(struct sctp_association *assoc,
+ struct sock *oldsk, struct sock *newsk)
+{
+ struct sk_buff *skb;
+ struct sctp_chunk *chunk;
+
+ skb = oldsk->sk_backlog.head;
+ oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
+ while (skb != NULL) {
+ struct sk_buff *next = skb->next;
+
+ chunk = SCTP_INPUT_CB(skb)->chunk;
+ skb->next = NULL;
+ if (&assoc->base == chunk->rcvr)
+ sk_add_backlog(newsk, skb);
+ else
+ sk_add_backlog(oldsk, skb);
+ skb = next;
+ }
+}
+
/* Handle icmp frag needed error. */
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
struct sctp_transport *t, __u32 pmtu)
@@ -544,10 +588,16 @@ int sctp_rcv_ootb(struct sk_buff *skb)
sctp_errhdr_t *err;
ch = (sctp_chunkhdr_t *) skb->data;
- ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length));
/* Scan through all the chunks in the packet. */
- while (ch_end > (__u8 *)ch && ch_end < skb->tail) {
+ do {
+ /* Break out if chunk length is less then minimal. */
+ if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
+ break;
+
+ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ if (ch_end > skb->tail)
+ break;
/* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
* receiver MUST silently discard the OOTB packet and take no
@@ -578,8 +628,7 @@ int sctp_rcv_ootb(struct sk_buff *skb)
}
ch = (sctp_chunkhdr_t *) ch_end;
- ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length));
- }
+ } while (ch_end < skb->tail);
return 0;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 2d33922..297b895 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -73,8 +73,10 @@ void sctp_inq_free(struct sctp_inq *queue)
/* If there is a packet which is currently being worked on,
* free it as well.
*/
- if (queue->in_progress)
+ if (queue->in_progress) {
sctp_chunk_free(queue->in_progress);
+ queue->in_progress = NULL;
+ }
if (queue->malloced) {
/* Dump the master memory segment. */
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 6e4dc28..d47a52c 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -176,7 +176,7 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa
static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
{
- if (*pos > sctp_ep_hashsize)
+ if (*pos >= sctp_ep_hashsize)
return NULL;
if (*pos < 0)
@@ -185,8 +185,6 @@ static void * sctp_eps_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n");
- ++*pos;
-
return (void *)pos;
}
@@ -198,11 +196,9 @@ static void sctp_eps_seq_stop(struct seq_file *seq, void *v)
static void * sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- if (*pos > sctp_ep_hashsize)
+ if (++*pos >= sctp_ep_hashsize)
return NULL;
- ++*pos;
-
return pos;
}
@@ -214,19 +210,19 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
struct sock *sk;
- int hash = *(int *)v;
+ int hash = *(loff_t *)v;
- if (hash > sctp_ep_hashsize)
+ if (hash >= sctp_ep_hashsize)
return -ENOMEM;
- head = &sctp_ep_hashtable[hash-1];
+ head = &sctp_ep_hashtable[hash];
sctp_local_bh_disable();
read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) {
ep = sctp_ep(epb);
sk = epb->sk;
seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
- sctp_sk(sk)->type, sk->sk_state, hash-1,
+ sctp_sk(sk)->type, sk->sk_state, hash,
epb->bind_addr.port,
sock_i_uid(sk), sock_i_ino(sk));
@@ -283,7 +279,7 @@ void sctp_eps_proc_exit(void)
static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
{
- if (*pos > sctp_assoc_hashsize)
+ if (*pos >= sctp_assoc_hashsize)
return NULL;
if (*pos < 0)
@@ -293,8 +289,6 @@ static void * sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos)
seq_printf(seq, " ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT "
"RPORT LADDRS <-> RADDRS\n");
- ++*pos;
-
return (void *)pos;
}
@@ -306,11 +300,9 @@ static void sctp_assocs_seq_stop(struct seq_file *seq, void *v)
static void * sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- if (*pos > sctp_assoc_hashsize)
+ if (++*pos >= sctp_assoc_hashsize)
return NULL;
- ++*pos;
-
return pos;
}
@@ -321,12 +313,12 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb;
struct sctp_association *assoc;
struct sock *sk;
- int hash = *(int *)v;
+ int hash = *(loff_t *)v;
- if (hash > sctp_assoc_hashsize)
+ if (hash >= sctp_assoc_hashsize)
return -ENOMEM;
- head = &sctp_assoc_hashtable[hash-1];
+ head = &sctp_assoc_hashtable[hash];
sctp_local_bh_disable();
read_lock(&head->lock);
for (epb = head->chain; epb; epb = epb->next) {
@@ -335,7 +327,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%8p %8p %-3d %-3d %-2d %-4d %4d %8d %8d %7d %5lu %-5d %5d ",
assoc, sk, sctp_sk(sk)->type, sk->sk_state,
- assoc->state, hash-1, assoc->assoc_id,
+ assoc->state, hash, assoc->assoc_id,
(sk->sk_rcvbuf - assoc->rwnd),
assoc->sndbuf_used,
sock_i_uid(sk), sock_i_ino(sk),
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 556c495..5e0de3c 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1275,7 +1275,12 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
unsigned int keylen;
char *key;
- headersize = sizeof(sctp_paramhdr_t) + SCTP_SECRET_SIZE;
+ /* Header size is static data prior to the actual cookie, including
+ * any padding.
+ */
+ headersize = sizeof(sctp_paramhdr_t) +
+ (sizeof(struct sctp_signed_cookie) -
+ sizeof(struct sctp_cookie));
bodysize = sizeof(struct sctp_cookie)
+ ntohs(init_chunk->chunk_hdr->length) + addrs_len;
@@ -1354,7 +1359,7 @@ struct sctp_association *sctp_unpack_cookie(
struct sctp_signed_cookie *cookie;
struct sctp_cookie *bear_cookie;
int headersize, bodysize, fixed_size;
- __u8 digest[SCTP_SIGNATURE_SIZE];
+ __u8 *digest = ep->digest;
struct scatterlist sg;
unsigned int keylen, len;
char *key;
@@ -1362,7 +1367,12 @@ struct sctp_association *sctp_unpack_cookie(
struct sk_buff *skb = chunk->skb;
struct timeval tv;
- headersize = sizeof(sctp_chunkhdr_t) + SCTP_SECRET_SIZE;
+ /* Header size is static data prior to the actual cookie, including
+ * any padding.
+ */
+ headersize = sizeof(sctp_chunkhdr_t) +
+ (sizeof(struct sctp_signed_cookie) -
+ sizeof(struct sctp_cookie));
bodysize = ntohs(chunk->chunk_hdr->length) - headersize;
fixed_size = headersize + sizeof(struct sctp_cookie);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index b8b38ab..8d1dc24 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1300,7 +1300,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
"T1 INIT Timeout adjustment"
" init_err_counter: %d"
" cycle: %d"
- " timeout: %d\n",
+ " timeout: %ld\n",
asoc->init_err_counter,
asoc->init_cycle,
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]);
@@ -1328,7 +1328,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
SCTP_DEBUG_PRINTK(
"T1 COOKIE Timeout adjustment"
" init_err_counter: %d"
- " timeout: %d\n",
+ " timeout: %ld\n",
asoc->init_err_counter,
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 477d7f8..2b9a832 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -884,7 +884,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
- if (asoc->overall_error_count > asoc->max_retrans) {
+ if (asoc->overall_error_count >= asoc->max_retrans) {
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_U32(SCTP_ERROR_NO_ERROR));
@@ -2122,7 +2122,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
struct sctp_bind_addr *bp;
int attempts = asoc->init_err_counter + 1;
- if (attempts >= asoc->max_init_attempts) {
+ if (attempts > asoc->max_init_attempts) {
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_U32(SCTP_ERROR_STALE_COOKIE));
return SCTP_DISPOSITION_DELETE_TCB;
@@ -3090,6 +3090,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
break;
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ if (ch_end > skb->tail)
+ break;
if (SCTP_CID_SHUTDOWN_ACK == ch->type)
ootb_shut_ack = 1;
@@ -4638,7 +4640,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep,
SCTP_DEBUG_PRINTK("Timer T1 expired (INIT).\n");
- if (attempts < asoc->max_init_attempts) {
+ if (attempts <= asoc->max_init_attempts) {
bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0);
if (!repl)
@@ -4695,7 +4697,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep
SCTP_DEBUG_PRINTK("Timer T1 expired (COOKIE-ECHO).\n");
- if (attempts < asoc->max_init_attempts) {
+ if (attempts <= asoc->max_init_attempts) {
repl = sctp_make_cookie_echo(asoc, NULL);
if (!repl)
return SCTP_DISPOSITION_NOMEM;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c98ee375..0ea947e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2995,7 +2995,7 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
sp->hbinterval = jiffies_to_msecs(sctp_hb_interval);
sp->pathmaxrxt = sctp_max_retrans_path;
sp->pathmtu = 0; // allow default discovery
- sp->sackdelay = sctp_sack_timeout;
+ sp->sackdelay = jiffies_to_msecs(sctp_sack_timeout);
sp->param_flags = SPP_HB_ENABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
@@ -5426,7 +5426,7 @@ out:
return err;
do_error:
- if (asoc->init_err_counter + 1 >= asoc->max_init_attempts)
+ if (asoc->init_err_counter + 1 > asoc->max_init_attempts)
err = -ETIMEDOUT;
else
err = -ECONNREFUSED;
@@ -5602,8 +5602,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
*/
newsp->type = type;
+ spin_lock_bh(&oldsk->sk_lock.slock);
+ /* Migrate the backlog from oldsk to newsk. */
+ sctp_backlog_migrate(assoc, oldsk, newsk);
/* Migrate the association to the new socket. */
sctp_assoc_migrate(assoc, newsk);
+ spin_unlock_bh(&oldsk->sk_lock.slock);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index fcd7096..dc6f3ff 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -159,12 +159,9 @@ static ctl_table sctp_table[] = {
.ctl_name = NET_SCTP_PRESERVE_ENABLE,
.procname = "cookie_preserve_enable",
.data = &sctp_cookie_preserve_enable,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_doulongvec_ms_jiffies_minmax,
- .strategy = &sctp_sysctl_jiffies_ms,
- .extra1 = &rto_timer_min,
- .extra2 = &rto_timer_max
+ .proc_handler = &proc_dointvec
},
{
.ctl_name = NET_SCTP_RTO_ALPHA,
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 68d73e2..160f62a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -350,7 +350,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
tp->rto_pending = 0;
SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d "
- "rttvar: %d, rto: %d\n", __FUNCTION__,
+ "rttvar: %d, rto: %ld\n", __FUNCTION__,
tp, rtt, tp->srtt, tp->rttvar, tp->rto);
}
diff --git a/security/seclvl.c b/security/seclvl.c
index 1caac01..8529ea6 100644
--- a/security/seclvl.c
+++ b/security/seclvl.c
@@ -368,8 +368,8 @@ static int seclvl_capable(struct task_struct *tsk, int cap)
*/
static int seclvl_settime(struct timespec *tv, struct timezone *tz)
{
- struct timespec now;
- if (seclvl > 1) {
+ if (tv && seclvl > 1) {
+ struct timespec now;
now = current_kernel_time();
if (tv->tv_sec < now.tv_sec ||
(tv->tv_sec == now.tv_sec && tv->tv_nsec < now.tv_nsec)) {
OpenPOWER on IntegriCloud