summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scsi/link_power_management_policy.txt19
-rw-r--r--arch/um/Kconfig.i3862
-rw-r--r--arch/um/Makefile-i3865
-rw-r--r--arch/um/Makefile-x86_645
-rw-r--r--arch/um/kernel/mem.c2
-rw-r--r--arch/um/sys-i386/ptrace.c8
-rw-r--r--arch/um/sys-x86_64/ptrace.c4
-rw-r--r--arch/x86/kernel/pci-gart_64.c2
-rw-r--r--arch/x86/mm/init_64.c6
-rw-r--r--block/cfq-iosched.c7
-rw-r--r--block/compat_ioctl.c2
-rw-r--r--block/ll_rw_blk.c41
-rw-r--r--drivers/ata/ahci.c174
-rw-r--r--drivers/ata/libata-core.c343
-rw-r--r--drivers/ata/libata-eh.c29
-rw-r--r--drivers/ata/libata-scsi.c91
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/ata/pata_acpi.c4
-rw-r--r--drivers/ata/pata_ns87415.c2
-rw-r--r--drivers/ata/pata_optidma.c2
-rw-r--r--drivers/ata/pata_pcmcia.c12
-rw-r--r--drivers/ata/pata_pdc2027x.c2
-rw-r--r--drivers/ata/pata_pdc202xx_old.c4
-rw-r--r--drivers/ata/pata_scc.c13
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/pata_winbond.c2
-rw-r--r--drivers/ata/pdc_adma.c12
-rw-r--r--drivers/ata/sata_inic162x.c4
-rw-r--r--drivers/ata/sata_mv.c7
-rw-r--r--drivers/ata/sata_nv.c120
-rw-r--r--drivers/ata/sata_promise.c38
-rw-r--r--drivers/ata/sata_qstor.c3
-rw-r--r--drivers/ata/sata_sil.c11
-rw-r--r--drivers/ata/sata_sil24.c9
-rw-r--r--drivers/ata/sata_sis.c64
-rw-r--r--drivers/ata/sata_svw.c10
-rw-r--r--drivers/ata/sata_sx4.c177
-rw-r--r--drivers/ata/sata_uli.c18
-rw-r--r--drivers/ata/sata_via.c20
-rw-r--r--drivers/ata/sata_vsc.c15
-rw-r--r--drivers/block/cciss.c14
-rw-r--r--drivers/block/cciss_scsi.c14
-rw-r--r--drivers/block/cciss_scsi.h14
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/isdn/capi/capidrv.c25
-rw-r--r--drivers/md/raid6algos.c4
-rw-r--r--drivers/md/raid6mmx.c2
-rw-r--r--drivers/md/raid6sse1.c2
-rw-r--r--drivers/md/raid6sse2.c4
-rw-r--r--drivers/md/raid6x86.h2
-rw-r--r--drivers/mmc/host/imxmmc.c2
-rw-r--r--drivers/net/Kconfig37
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/netdev.c425
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c21
-rw-r--r--drivers/net/fec_mpc52xx.c1112
-rw-r--r--drivers/net/fec_mpc52xx.h313
-rw-r--r--drivers/net/fec_mpc52xx_phy.c198
-rw-r--r--drivers/net/irda/au1k_ir.c11
-rw-r--r--drivers/net/pcmcia/3c574_cs.c2
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c2
-rw-r--r--drivers/net/r8169.c12
-rw-r--r--drivers/net/ucc_geth.c15
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/ipw2100.c11
-rw-r--r--drivers/net/wireless/ipw2200.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c56
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h2
-rw-r--r--drivers/net/wireless/rtl8187_dev.c20
-rw-r--r--drivers/pci/intel-iommu.c7
-rw-r--r--drivers/pci/intel-iommu.h2
-rw-r--r--drivers/s390/scsi/zfcp_def.h5
-rw-r--r--drivers/scsi/aacraid/commctrl.c12
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/dpcsup.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h39
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c205
-rw-r--r--drivers/scsi/sg.c1
-rw-r--r--drivers/serial/serial_cs.c2
-rw-r--r--drivers/spi/spidev.c6
-rw-r--r--fs/proc/array.c3
-rw-r--r--include/asm-um/unistd.h1
-rw-r--r--include/linux/ata.h22
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/eventpoll.h7
-rw-r--r--include/linux/libata.h30
-rw-r--r--include/linux/scatterlist.h10
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/sunrpc/rpc_rdma.h32
-rw-r--r--include/linux/types.h2
-rw-r--r--include/net/sctp/auth.h2
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/sched.c70
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timer_list.c6
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/slub.c2
-rw-r--r--mm/sparse.c11
-rw-r--r--net/sctp/auth.c4
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c24
116 files changed, 3160 insertions, 1080 deletions
diff --git a/Documentation/scsi/link_power_management_policy.txt b/Documentation/scsi/link_power_management_policy.txt
new file mode 100644
index 0000000..d18993d
--- /dev/null
+++ b/Documentation/scsi/link_power_management_policy.txt
@@ -0,0 +1,19 @@
+This parameter allows the user to set the link (interface) power management.
+There are 3 possible options:
+
+Value Effect
+----------------------------------------------------------------------------
+min_power Tell the controller to try to make the link use the
+ least possible power when possible. This may
+ sacrifice some performance due to increased latency
+ when coming out of lower power states.
+
+max_performance Generally, this means no power management. Tell
+ the controller to have performance be a priority
+ over power management.
+
+medium_power Tell the controller to enter a lower power state
+ when possible, but do not enter the lowest power
+ state, thus improving latency over min_power setting.
+
+
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
index 9876d80..e0ac74e 100644
--- a/arch/um/Kconfig.i386
+++ b/arch/um/Kconfig.i386
@@ -1,6 +1,6 @@
menu "Host processor type and features"
-source "arch/i386/Kconfig.cpu"
+source "arch/x86/Kconfig.cpu"
endmenu
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index 0178df3..b01dfb0 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -9,6 +9,7 @@ ELF_ARCH := $(SUBARCH)
ELF_FORMAT := elf32-$(SUBARCH)
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
HEADER_ARCH := x86
+CHECKFLAGS += -D__i386__
ifeq ("$(origin SUBARCH)", "command line")
ifneq ("$(shell uname -m | sed -e s/i.86/i386/)", "$(SUBARCH)")
@@ -26,10 +27,8 @@ AFLAGS += -DCONFIG_X86_32
CONFIG_X86_32 := y
export CONFIG_X86_32
-ARCH_KERNEL_DEFINES += -U__$(SUBARCH)__ -U$(SUBARCH)
-
# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
-include $(srctree)/arch/i386/Makefile.cpu
+include $(srctree)/arch/x86/Makefile_32.cpu
# prevent gcc from keeping the stack 16 byte aligned. Taken from i386.
cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64
index fe5316f..8ed362f 100644
--- a/arch/um/Makefile-x86_64
+++ b/arch/um/Makefile-x86_64
@@ -6,12 +6,9 @@ START := 0x60000000
_extra_flags_ = -fno-builtin -m64
-#We #undef __x86_64__ for kernelspace, not for userspace where
-#it's needed for headers to work!
-ARCH_KERNEL_DEFINES = -U__$(SUBARCH)__
KBUILD_CFLAGS += $(_extra_flags_)
-CHECKFLAGS += -m64
+CHECKFLAGS += -m64 -D__x86_64__
KBUILD_AFLAGS += -m64
LDFLAGS += -m elf_x86_64
KBUILD_CPPFLAGS += -m64
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 8456397..59822dee 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -165,7 +165,7 @@ static void __init kmap_init(void)
kmap_prot = PAGE_KERNEL;
}
-static void init_highmem(void)
+static void __init init_highmem(void)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
index 9657c89..bd3da8a 100644
--- a/arch/um/sys-i386/ptrace.c
+++ b/arch/um/sys-i386/ptrace.c
@@ -155,7 +155,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
if (err)
return err;
- n = copy_to_user((void *) buf, fpregs, sizeof(fpregs));
+ n = copy_to_user(buf, fpregs, sizeof(fpregs));
if(n > 0)
return -EFAULT;
@@ -168,7 +168,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
long fpregs[HOST_FP_SIZE];
BUG_ON(sizeof(*buf) != sizeof(fpregs));
- n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs));
+ n = copy_from_user(fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;
@@ -185,7 +185,7 @@ int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
if (err)
return err;
- n = copy_to_user((void *) buf, fpregs, sizeof(fpregs));
+ n = copy_to_user(buf, fpregs, sizeof(fpregs));
if(n > 0)
return -EFAULT;
@@ -198,7 +198,7 @@ int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
long fpregs[HOST_XFP_SIZE];
BUG_ON(sizeof(*buf) != sizeof(fpregs));
- n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs));
+ n = copy_from_user(fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c
index a3cfeed..b7631b0 100644
--- a/arch/um/sys-x86_64/ptrace.c
+++ b/arch/um/sys-x86_64/ptrace.c
@@ -154,7 +154,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
if (err)
return err;
- n = copy_to_user((void *) buf, fpregs, sizeof(fpregs));
+ n = copy_to_user(buf, fpregs, sizeof(fpregs));
if(n > 0)
return -EFAULT;
@@ -167,7 +167,7 @@ int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
long fpregs[HOST_FP_SIZE];
BUG_ON(sizeof(*buf) != sizeof(fpregs));
- n = copy_from_user(fpregs, (void *) buf, sizeof(fpregs));
+ n = copy_from_user(fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index ae7e016..79b514b 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -435,7 +435,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
error:
flush_gart();
- gart_unmap_sg(dev, sg, nents, dir);
+ gart_unmap_sg(dev, sg, out, dir);
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
out = dma_map_sg_nonforce(dev, sg, nents, dir);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 1e3862e..a7308b2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -728,12 +728,6 @@ int in_gate_area_no_task(unsigned long addr)
return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
}
-void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
-{
- return __alloc_bootmem_core(pgdat->bdata, size,
- SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
-}
-
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 54dc054..e47a930 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1443,8 +1443,11 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
cfqq = *async_cfqq;
}
- if (!cfqq)
+ if (!cfqq) {
cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
+ if (!cfqq)
+ return NULL;
+ }
/*
* pin the queue now that it's allocated, scheduler exit will prune it
@@ -2053,7 +2056,7 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
del_timer_sync(&cfqd->idle_slice_timer);
del_timer_sync(&cfqd->idle_class_timer);
- blk_sync_queue(cfqd->queue);
+ kblockd_flush_work(&cfqd->unplug_work);
}
static void cfq_put_async_queues(struct cfq_data *cfqd)
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index f84093b..cae0a85 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -581,7 +581,7 @@ static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file,
{
int ret;
- switch (arg) {
+ switch (cmd) {
case HDIO_GET_UNMASKINTR:
case HDIO_GET_MULTCOUNT:
case HDIO_GET_KEEPSETTINGS:
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index b01dee3ae..56f26466 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -39,7 +39,7 @@
static void blk_unplug_work(struct work_struct *work);
static void blk_unplug_timeout(unsigned long data);
-static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
+static void drive_stat_acct(struct request *rq, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
@@ -791,7 +791,6 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
retval = atomic_dec_and_test(&bqt->refcnt);
if (retval) {
BUG_ON(bqt->busy);
- BUG_ON(!list_empty(&bqt->busy_list));
kfree(bqt->tag_index);
bqt->tag_index = NULL;
@@ -903,7 +902,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
if (init_tag_map(q, tags, depth))
goto fail;
- INIT_LIST_HEAD(&tags->busy_list);
tags->busy = 0;
atomic_set(&tags->refcnt, 1);
return tags;
@@ -954,6 +952,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
*/
q->queue_tags = tags;
q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+ INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
fail:
kfree(tags);
@@ -1057,18 +1056,16 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
bqt->tag_index[tag] = NULL;
- /*
- * We use test_and_clear_bit's memory ordering properties here.
- * The tag_map bit acts as a lock for tag_index[bit], so we need
- * a barrer before clearing the bit (precisely: release semantics).
- * Could use clear_bit_unlock when it is merged.
- */
- if (unlikely(!test_and_clear_bit(tag, bqt->tag_map))) {
+ if (unlikely(!test_bit(tag, bqt->tag_map))) {
printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
__FUNCTION__, tag);
return;
}
-
+ /*
+ * The tag_map bit acts as a lock for tag_index[bit], so we need
+ * unlock memory barrier semantics.
+ */
+ clear_bit_unlock(tag, bqt->tag_map);
bqt->busy--;
}
@@ -1114,17 +1111,17 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
if (tag >= bqt->max_depth)
return 1;
- } while (test_and_set_bit(tag, bqt->tag_map));
+ } while (test_and_set_bit_lock(tag, bqt->tag_map));
/*
- * We rely on test_and_set_bit providing lock memory ordering semantics
- * (could use test_and_set_bit_lock when it is merged).
+ * We need lock ordering semantics given by test_and_set_bit_lock.
+ * See blk_queue_end_tag for details.
*/
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
blkdev_dequeue_request(rq);
- list_add(&rq->queuelist, &bqt->busy_list);
+ list_add(&rq->queuelist, &q->tag_busy_list);
bqt->busy++;
return 0;
}
@@ -1145,11 +1142,10 @@ EXPORT_SYMBOL(blk_queue_start_tag);
**/
void blk_queue_invalidate_tags(struct request_queue *q)
{
- struct blk_queue_tag *bqt = q->queue_tags;
struct list_head *tmp, *n;
struct request *rq;
- list_for_each_safe(tmp, n, &bqt->busy_list) {
+ list_for_each_safe(tmp, n, &q->tag_busy_list) {
rq = list_entry_rq(tmp);
if (rq->tag == -1) {
@@ -1738,6 +1734,7 @@ EXPORT_SYMBOL(blk_stop_queue);
void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->unplug_timer);
+ kblockd_flush_work(&q->unplug_work);
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -2341,7 +2338,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
if (blk_rq_tagged(rq))
blk_queue_end_tag(q, rq);
- drive_stat_acct(rq, rq->nr_sectors, 1);
+ drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0);
blk_start_queueing(q);
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2736,7 +2733,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
EXPORT_SYMBOL(blkdev_issue_flush);
-static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
+static void drive_stat_acct(struct request *rq, int new_io)
{
int rw = rq_data_dir(rq);
@@ -2758,7 +2755,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
*/
static inline void add_request(struct request_queue * q, struct request * req)
{
- drive_stat_acct(req, req->nr_sectors, 1);
+ drive_stat_acct(req, 1);
/*
* elevator indicated where it wants this request to be
@@ -3015,7 +3012,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
- drive_stat_acct(req, nr_sectors, 0);
+ drive_stat_acct(req, 0);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out;
@@ -3042,7 +3039,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->sector = req->hard_sector = bio->bi_sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
- drive_stat_acct(req, nr_sectors, 0);
+ drive_stat_acct(req, 0);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 49cf4cf..ed9b407 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -49,6 +49,9 @@
#define DRV_NAME "ahci"
#define DRV_VERSION "3.0"
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy);
+static void ahci_disable_alpm(struct ata_port *ap);
enum {
AHCI_PCI_BAR = 5,
@@ -99,6 +102,7 @@ enum {
HOST_CAP_SSC = (1 << 14), /* Slumber capable */
HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
HOST_CAP_CLO = (1 << 24), /* Command List Override support */
+ HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
HOST_CAP_SNTF = (1 << 29), /* SNotification register */
HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
@@ -155,6 +159,8 @@ enum {
PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
/* PORT_CMD bits */
+ PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
+ PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
PORT_CMD_PMP = (1 << 17), /* PMP attached */
PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
@@ -178,13 +184,14 @@ enum {
AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
+ AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
/* ap->flags bits */
- AHCI_FLAG_NO_HOTPLUG = (1 << 24), /* ignore PxSERR.DIAG.N */
AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
- ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
+ ATA_FLAG_IPM,
AHCI_LFLAG_COMMON = ATA_LFLAG_SKIP_D2H_BSY,
};
@@ -254,6 +261,11 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int ahci_pci_device_resume(struct pci_dev *pdev);
#endif
+static struct class_device_attribute *ahci_shost_attrs[] = {
+ &class_device_attr_link_power_management_policy,
+ NULL
+};
+
static struct scsi_host_template ahci_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
@@ -271,6 +283,7 @@ static struct scsi_host_template ahci_sht = {
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
+ .shost_attrs = ahci_shost_attrs,
};
static const struct ata_port_operations ahci_ops = {
@@ -302,6 +315,8 @@ static const struct ata_port_operations ahci_ops = {
.port_suspend = ahci_port_suspend,
.port_resume = ahci_port_resume,
#endif
+ .enable_pm = ahci_enable_alpm,
+ .disable_pm = ahci_disable_alpm,
.port_start = ahci_port_start,
.port_stop = ahci_port_stop,
@@ -836,6 +851,130 @@ static void ahci_power_up(struct ata_port *ap)
writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
}
+static void ahci_disable_alpm(struct ata_port *ap)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ /* IPM bits should be disabled by libata-core */
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* disable ALPM and ASP */
+ cmd &= ~PORT_CMD_ASP;
+ cmd &= ~PORT_CMD_ALPE;
+
+ /* force the interface back to active */
+ cmd |= PORT_CMD_ICC_ACTIVE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* wait 10ms to be sure we've come out of any low power state */
+ msleep(10);
+
+ /* clear out any PhyRdy stuff from interrupt status */
+ writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
+
+ /* go ahead and clean out PhyRdy Change from Serror too */
+ ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
+
+ /*
+ * Clear flag to indicate that we should ignore all PhyRdy
+ * state changes
+ */
+ hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
+
+ /*
+ * Enable interrupts on Phy Ready.
+ */
+ pp->intr_mask |= PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * don't change the link pm policy - we can be called
+ * just to turn of link pm temporarily
+ */
+}
+
+static int ahci_enable_alpm(struct ata_port *ap,
+ enum link_pm policy)
+{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd;
+ struct ahci_port_priv *pp = ap->private_data;
+ u32 asp;
+
+ /* Make sure the host is capable of link power management */
+ if (!(hpriv->cap & HOST_CAP_ALPM))
+ return -EINVAL;
+
+ switch (policy) {
+ case MAX_PERFORMANCE:
+ case NOT_AVAILABLE:
+ /*
+ * if we came here with NOT_AVAILABLE,
+ * it just means this is the first time we
+ * have tried to enable - default to max performance,
+ * and let the user go to lower power modes on request.
+ */
+ ahci_disable_alpm(ap);
+ return 0;
+ case MIN_POWER:
+ /* configure HBA to enter SLUMBER */
+ asp = PORT_CMD_ASP;
+ break;
+ case MEDIUM_POWER:
+ /* configure HBA to enter PARTIAL */
+ asp = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Disable interrupts on Phy Ready. This keeps us from
+ * getting woken up due to spurious phy ready interrupts
+ * TBD - Hot plug should be done via polling now, is
+ * that even supported?
+ */
+ pp->intr_mask &= ~PORT_IRQ_PHYRDY;
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * Set a flag to indicate that we should ignore all PhyRdy
+ * state changes since these can happen now whenever we
+ * change link state
+ */
+ hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
+
+ /* get the existing command bits */
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /*
+ * Set ASP based on Policy
+ */
+ cmd |= asp;
+
+ /*
+ * Setting this bit will instruct the HBA to aggressively
+ * enter a lower power link state when it's appropriate and
+ * based on the value set above for ASP
+ */
+ cmd |= PORT_CMD_ALPE;
+
+ /* write out new cmd value */
+ writel(cmd, port_mmio + PORT_CMD);
+ cmd = readl(port_mmio + PORT_CMD);
+
+ /* IPM bits should be set by libata-core */
+ return 0;
+}
+
#ifdef CONFIG_PM
static void ahci_power_down(struct ata_port *ap)
{
@@ -898,8 +1037,10 @@ static int ahci_reset_controller(struct ata_host *host)
* AHCI-specific, such as HOST_RESET.
*/
tmp = readl(mmio + HOST_CTL);
- if (!(tmp & HOST_AHCI_EN))
- writel(tmp | HOST_AHCI_EN, mmio + HOST_CTL);
+ if (!(tmp & HOST_AHCI_EN)) {
+ tmp |= HOST_AHCI_EN;
+ writel(tmp, mmio + HOST_CTL);
+ }
/* global controller reset */
if ((tmp & HOST_RESET) == 0) {
@@ -1153,15 +1294,8 @@ static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
tf.ctl &= ~ATA_SRST;
ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
- /* spec mandates ">= 2ms" before checking status.
- * We wait 150ms, because that was the magic delay used for
- * ATAPI devices in Hale Landis's ATADRVR, for the period of time
- * between when the ATA command register is written, and then
- * status is checked. Because waiting for "a while" before
- * checking status is fine, post SRST, we perform this magic
- * delay here as well.
- */
- msleep(150);
+ /* wait a while before checking status */
+ ata_wait_after_reset(ap, deadline);
rc = ata_wait_ready(ap, deadline);
/* link occupied, -ENODEV too is an error */
@@ -1509,6 +1643,17 @@ static void ahci_port_intr(struct ata_port *ap)
if (unlikely(resetting))
status &= ~PORT_IRQ_BAD_PMP;
+ /* If we are getting PhyRdy, this is
+ * just a power state change, we should
+ * clear out this, plus the PhyRdy/Comm
+ * Wake bits from Serror
+ */
+ if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
+ (status & PORT_IRQ_PHYRDY)) {
+ status &= ~PORT_IRQ_PHYRDY;
+ ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
+ }
+
if (unlikely(status & PORT_IRQ_ERROR)) {
ahci_error_intr(ap, status);
return;
@@ -2156,6 +2301,9 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ata_port_pbar_desc(ap, AHCI_PCI_BAR,
0x100 + ap->port_no * 0x80, "port");
+ /* set initial link pm policy */
+ ap->pm_policy = NOT_AVAILABLE;
+
/* standard SATA port setup */
if (hpriv->port_map & (1 << i))
ap->ioaddr.cmd_addr = port_mmio;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 081e3df..63035d7 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -620,6 +620,177 @@ void ata_dev_disable(struct ata_device *dev)
}
}
+static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
+{
+ struct ata_link *link = dev->link;
+ struct ata_port *ap = link->ap;
+ u32 scontrol;
+ unsigned int err_mask;
+ int rc;
+
+ /*
+ * disallow DIPM for drivers which haven't set
+ * ATA_FLAG_IPM. This is because when DIPM is enabled,
+ * phy ready will be set in the interrupt status on
+ * state changes, which will cause some drivers to
+ * think there are errors - additionally drivers will
+ * need to disable hot plug.
+ */
+ if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
+ ap->pm_policy = NOT_AVAILABLE;
+ return -EINVAL;
+ }
+
+ /*
+ * For DIPM, we will only enable it for the
+ * min_power setting.
+ *
+ * Why? Because Disks are too stupid to know that
+ * If the host rejects a request to go to SLUMBER
+ * they should retry at PARTIAL, and instead it
+ * just would give up. So, for medium_power to
+ * work at all, we need to only allow HIPM.
+ */
+ rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
+ if (rc)
+ return rc;
+
+ switch (policy) {
+ case MIN_POWER:
+ /* no restrictions on IPM transitions */
+ scontrol &= ~(0x3 << 8);
+ rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+ if (rc)
+ return rc;
+
+ /* enable DIPM */
+ if (dev->flags & ATA_DFLAG_DIPM)
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_ENABLE, SATA_DIPM);
+ break;
+ case MEDIUM_POWER:
+ /* allow IPM to PARTIAL */
+ scontrol &= ~(0x1 << 8);
+ scontrol |= (0x2 << 8);
+ rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+ if (rc)
+ return rc;
+
+ /* disable DIPM */
+ if (ata_dev_enabled(dev) && (dev->flags & ATA_DFLAG_DIPM))
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_DISABLE, SATA_DIPM);
+ break;
+ case NOT_AVAILABLE:
+ case MAX_PERFORMANCE:
+ /* disable all IPM transitions */
+ scontrol |= (0x3 << 8);
+ rc = sata_scr_write(link, SCR_CONTROL, scontrol);
+ if (rc)
+ return rc;
+
+ /* disable DIPM */
+ if (ata_dev_enabled(dev) && (dev->flags & ATA_DFLAG_DIPM))
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_DISABLE, SATA_DIPM);
+ break;
+ }
+
+ /* FIXME: handle SET FEATURES failure */
+ (void) err_mask;
+
+ return 0;
+}
+
+/**
+ * ata_dev_enable_pm - enable SATA interface power management
+ * @device - device to enable ipm for
+ * @policy - the link power management policy
+ *
+ * Enable SATA Interface power management. This will enable
+ * Device Interface Power Management (DIPM) for min_power
+ * policy, and then call driver specific callbacks for
+ * enabling Host Initiated Power management.
+ *
+ * Locking: Caller.
+ * Returns: -EINVAL if IPM is not supported, 0 otherwise.
+ */
+void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
+{
+ int rc = 0;
+ struct ata_port *ap = dev->link->ap;
+
+ /* set HIPM first, then DIPM */
+ if (ap->ops->enable_pm)
+ rc = ap->ops->enable_pm(ap, policy);
+ if (rc)
+ goto enable_pm_out;
+ rc = ata_dev_set_dipm(dev, policy);
+
+enable_pm_out:
+ if (rc)
+ ap->pm_policy = MAX_PERFORMANCE;
+ else
+ ap->pm_policy = policy;
+ return /* rc */; /* hopefully we can use 'rc' eventually */
+}
+
+/**
+ * ata_dev_disable_pm - disable SATA interface power management
+ * @device - device to enable ipm for
+ *
+ * Disable SATA Interface power management. This will disable
+ * Device Interface Power Management (DIPM) without changing
+ * policy, call driver specific callbacks for disabling Host
+ * Initiated Power management.
+ *
+ * Locking: Caller.
+ * Returns: void
+ */
+static void ata_dev_disable_pm(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+
+ ata_dev_set_dipm(dev, MAX_PERFORMANCE);
+ if (ap->ops->disable_pm)
+ ap->ops->disable_pm(ap);
+}
+
+void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
+{
+ ap->pm_policy = policy;
+ ap->link.eh_info.action |= ATA_EHI_LPM;
+ ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
+ ata_port_schedule_eh(ap);
+}
+
+static void ata_lpm_enable(struct ata_host *host)
+{
+ struct ata_link *link;
+ struct ata_port *ap;
+ struct ata_device *dev;
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ ap = host->ports[i];
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link)
+ ata_dev_disable_pm(dev);
+ }
+ }
+}
+
+static void ata_lpm_disable(struct ata_host *host)
+{
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+ ata_lpm_schedule(ap, ap->pm_policy);
+ }
+}
+
+
/**
* ata_devchk - PATA device presence detection
* @ap: ATA channel to examine
@@ -2101,6 +2272,13 @@ int ata_dev_configure(struct ata_device *dev)
if (dev->flags & ATA_DFLAG_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
+ if (!(dev->horkage & ATA_HORKAGE_IPM)) {
+ if (ata_id_has_hipm(dev->id))
+ dev->flags |= ATA_DFLAG_HIPM;
+ if (ata_id_has_dipm(dev->id))
+ dev->flags |= ATA_DFLAG_DIPM;
+ }
+
if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
/* Let the user know. We don't want to disallow opens for
rescue purposes, or in case the vendor is just a blithering
@@ -2126,6 +2304,13 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
+ if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
+ dev->horkage |= ATA_HORKAGE_IPM;
+
+ /* reset link pm_policy for this port to no pm */
+ ap->pm_policy = MAX_PERFORMANCE;
+ }
+
if (ap->ops->dev_config)
ap->ops->dev_config(dev);
@@ -2219,6 +2404,25 @@ int ata_bus_probe(struct ata_port *ap)
tries[dev->devno] = ATA_PROBE_MAX_TRIES;
retry:
+ ata_link_for_each_dev(dev, &ap->link) {
+ /* If we issue an SRST then an ATA drive (not ATAPI)
+ * may change configuration and be in PIO0 timing. If
+ * we do a hard reset (or are coming from power on)
+ * this is true for ATA or ATAPI. Until we've set a
+ * suitable controller mode we should not touch the
+ * bus as we may be talking too fast.
+ */
+ dev->pio_mode = XFER_PIO_0;
+
+ /* If the controller has a pio mode setup function
+ * then use it to set the chipset to rights. Don't
+ * touch the DMA setup as that will be dealt with when
+ * configuring devices.
+ */
+ if (ap->ops->set_piomode)
+ ap->ops->set_piomode(ap, dev);
+ }
+
/* reset and determine device classes */
ap->ops->phy_reset(ap);
@@ -2234,12 +2438,6 @@ int ata_bus_probe(struct ata_port *ap)
ata_port_probe(ap);
- /* after the reset the device state is PIO 0 and the controller
- state is undefined. Record the mode */
-
- ata_link_for_each_dev(dev, &ap->link)
- dev->pio_mode = XFER_PIO_0;
-
/* read IDENTIFY page and configure devices. We have to do the identify
specific sequence bass-ackwards so that PDIAG- is released by
the slave device */
@@ -3118,6 +3316,55 @@ int ata_busy_sleep(struct ata_port *ap,
}
/**
+ * ata_wait_after_reset - wait before checking status after reset
+ * @ap: port containing status register to be polled
+ * @deadline: deadline jiffies for the operation
+ *
+ * After reset, we need to pause a while before reading status.
+ * Also, certain combination of controller and device report 0xff
+ * for some duration (e.g. until SATA PHY is up and running)
+ * which is interpreted as empty port in ATA world. This
+ * function also waits for such devices to get out of 0xff
+ * status.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
+{
+ unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
+
+ if (time_before(until, deadline))
+ deadline = until;
+
+ /* Spec mandates ">= 2ms" before checking status. We wait
+ * 150ms, because that was the magic delay used for ATAPI
+ * devices in Hale Landis's ATADRVR, for the period of time
+ * between when the ATA command register is written, and then
+ * status is checked. Because waiting for "a while" before
+ * checking status is fine, post SRST, we perform this magic
+ * delay here as well.
+ *
+ * Old drivers/ide uses the 2mS rule and then waits for ready.
+ */
+ msleep(150);
+
+ /* Wait for 0xff to clear. Some SATA devices take a long time
+ * to clear 0xff after reset. For example, HHD424020F7SV00
+ * iVDR needs >= 800ms while. Quantum GoVault needs even more
+ * than that.
+ */
+ while (1) {
+ u8 status = ata_chk_status(ap);
+
+ if (status != 0xff || time_after(jiffies, deadline))
+ return;
+
+ msleep(50);
+ }
+}
+
+/**
* ata_wait_ready - sleep until BSY clears, or timeout
* @ap: port containing status register to be polled
* @deadline: deadline jiffies for the operation
@@ -3223,8 +3470,6 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- struct ata_device *dev;
- int i = 0;
DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
@@ -3235,36 +3480,8 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
udelay(20); /* FIXME: flush */
iowrite8(ap->ctl, ioaddr->ctl_addr);
- /* If we issued an SRST then an ATA drive (not ATAPI)
- * may have changed configuration and be in PIO0 timing. If
- * we did a hard reset (or are coming from power on) this is
- * true for ATA or ATAPI. Until we've set a suitable controller
- * mode we should not touch the bus as we may be talking too fast.
- */
-
- ata_link_for_each_dev(dev, &ap->link)
- dev->pio_mode = XFER_PIO_0;
-
- /* If the controller has a pio mode setup function then use
- it to set the chipset to rights. Don't touch the DMA setup
- as that will be dealt with when revalidating */
- if (ap->ops->set_piomode) {
- ata_link_for_each_dev(dev, &ap->link)
- if (devmask & (1 << i++))
- ap->ops->set_piomode(ap, dev);
- }
-
- /* spec mandates ">= 2ms" before checking status.
- * We wait 150ms, because that was the magic delay used for
- * ATAPI devices in Hale Landis's ATADRVR, for the period of time
- * between when the ATA command register is written, and then
- * status is checked. Because waiting for "a while" before
- * checking status is fine, post SRST, we perform this magic
- * delay here as well.
- *
- * Old drivers/ide uses the 2mS rule and then waits for ready
- */
- msleep(150);
+ /* wait a while before checking status */
+ ata_wait_after_reset(ap, deadline);
/* Before we perform post reset processing we want to see if
* the bus shows 0xFF because the odd clown forgets the D7
@@ -3691,8 +3908,8 @@ int sata_std_hardreset(struct ata_link *link, unsigned int *class,
return 0;
}
- /* wait a while before checking status, see SRST for more info */
- msleep(150);
+ /* wait a while before checking status */
+ ata_wait_after_reset(ap, deadline);
/* If PMP is supported, we have to do follow-up SRST. Note
* that some PMPs don't send D2H Reg FIS after hardreset at
@@ -3992,6 +4209,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
{ "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
{ "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
+ { "Maxtor 7V300F0", "VA111900", ATA_HORKAGE_NONCQ, },
/* devices which puke on READ_NATIVE_MAX */
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
@@ -4689,6 +4907,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
* data in this function or read data in ata_sg_clean.
*/
offset = lsg->offset + lsg->length - qc->pad_len;
+ sg_init_table(psg, 1);
sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
qc->pad_len, offset_in_page(offset));
@@ -5594,6 +5813,9 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* taken care of.
*/
if (ap->ops->error_handler) {
+ struct ata_device *dev = qc->dev;
+ struct ata_eh_info *ehi = &dev->link->eh_info;
+
WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
if (unlikely(qc->err_mask))
@@ -5612,6 +5834,27 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_RESULT_TF)
fill_result_tf(qc);
+ /* Some commands need post-processing after successful
+ * completion.
+ */
+ switch (qc->tf.command) {
+ case ATA_CMD_SET_FEATURES:
+ if (qc->tf.feature != SETFEATURES_WC_ON &&
+ qc->tf.feature != SETFEATURES_WC_OFF)
+ break;
+ /* fall through */
+ case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
+ case ATA_CMD_SET_MULTI: /* multi_count changed */
+ /* revalidate device */
+ ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
+ ata_port_schedule_eh(ap);
+ break;
+
+ case ATA_CMD_SLEEP:
+ dev->flags |= ATA_DFLAG_SLEEPING;
+ break;
+ }
+
__ata_qc_complete(qc);
} else {
if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
@@ -5749,6 +5992,14 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
qc->flags &= ~ATA_QCFLAG_DMAMAP;
}
+ /* if device is sleeping, schedule softreset and abort the link */
+ if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
+ link->eh_info.action |= ATA_EH_SOFTRESET;
+ ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
+ ata_link_abort(link);
+ return;
+ }
+
ap->ops->qc_prep(qc);
qc->err_mask |= ap->ops->qc_issue(qc);
@@ -6296,6 +6547,12 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
{
int rc;
+ /*
+ * disable link pm on all ports before requesting
+ * any pm activity
+ */
+ ata_lpm_enable(host);
+
rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
if (rc == 0)
host->dev->power.power_state = mesg;
@@ -6318,6 +6575,9 @@ void ata_host_resume(struct ata_host *host)
ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
host->dev->power.power_state = PMSG_ON;
+
+ /* reenable link pm */
+ ata_lpm_disable(host);
}
#endif
@@ -6860,6 +7120,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
struct ata_port *ap = host->ports[i];
ata_scsi_scan_host(ap, 1);
+ ata_lpm_schedule(ap, ap->pm_policy);
}
return 0;
@@ -7256,7 +7517,6 @@ const struct ata_port_info ata_dummy_port_info = {
* likely to change as new drivers are added and updated.
* Do not depend on ABI/API stability.
*/
-
EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
EXPORT_SYMBOL_GPL(sata_deb_timing_long);
@@ -7326,6 +7586,7 @@ EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_ratelimit);
EXPORT_SYMBOL_GPL(ata_wait_register);
EXPORT_SYMBOL_GPL(ata_busy_sleep);
+EXPORT_SYMBOL_GPL(ata_wait_after_reset);
EXPORT_SYMBOL_GPL(ata_wait_ready);
EXPORT_SYMBOL_GPL(ata_port_queue_task);
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 8cb35bb..fefea74 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2083,6 +2083,25 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_eh_about_to_do(link, NULL, ehc->i.action & ATA_EH_RESET_MASK);
+ ata_link_for_each_dev(dev, link) {
+ /* If we issue an SRST then an ATA drive (not ATAPI)
+ * may change configuration and be in PIO0 timing. If
+ * we do a hard reset (or are coming from power on)
+ * this is true for ATA or ATAPI. Until we've set a
+ * suitable controller mode we should not touch the
+ * bus as we may be talking too fast.
+ */
+ dev->pio_mode = XFER_PIO_0;
+
+ /* If the controller has a pio mode setup function
+ * then use it to set the chipset to rights. Don't
+ * touch the DMA setup as that will be dealt with when
+ * configuring devices.
+ */
+ if (ap->ops->set_piomode)
+ ap->ops->set_piomode(ap, dev);
+ }
+
/* Determine which reset to use and record in ehc->i.action.
* prereset() may examine and modify it.
*/
@@ -2208,9 +2227,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_link_for_each_dev(dev, link) {
/* After the reset, the device state is PIO 0
* and the controller state is undefined.
- * Record the mode.
+ * Reset also wakes up drives from sleeping
+ * mode.
*/
dev->pio_mode = XFER_PIO_0;
+ dev->flags &= ~ATA_DFLAG_SLEEPING;
if (ata_link_offline(link))
continue;
@@ -2416,7 +2437,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
/* give it just one more chance */
ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
case -EIO:
- if (ehc->tries[dev->devno] == 1) {
+ if (ehc->tries[dev->devno] == 1 && dev->pio_mode > XFER_PIO_0) {
/* This is the last chance, better to slow
* down than lose it.
*/
@@ -2607,6 +2628,10 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
ehc->i.flags &= ~ATA_EHI_SETMODE;
}
+ if (ehc->i.action & ATA_EHI_LPM)
+ ata_link_for_each_dev(dev, link)
+ ata_dev_enable_pm(dev, ap->pm_policy);
+
/* this link is okay now */
ehc->i.flags = 0;
continue;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f5d5420..93bd36c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -110,6 +110,74 @@ static struct scsi_transport_template ata_scsi_transport_template = {
};
+static const struct {
+ enum link_pm value;
+ const char *name;
+} link_pm_policy[] = {
+ { NOT_AVAILABLE, "max_performance" },
+ { MIN_POWER, "min_power" },
+ { MAX_PERFORMANCE, "max_performance" },
+ { MEDIUM_POWER, "medium_power" },
+};
+
+const char *ata_scsi_lpm_get(enum link_pm policy)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++)
+ if (link_pm_policy[i].value == policy)
+ return link_pm_policy[i].name;
+
+ return NULL;
+}
+
+static ssize_t ata_scsi_lpm_put(struct class_device *class_dev,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ enum link_pm policy = 0;
+ int i;
+
+ /*
+ * we are skipping array location 0 on purpose - this
+ * is because a value of NOT_AVAILABLE is displayed
+ * to the user as max_performance, but when the user
+ * writes "max_performance", they actually want the
+ * value to match MAX_PERFORMANCE.
+ */
+ for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) {
+ const int len = strlen(link_pm_policy[i].name);
+ if (strncmp(link_pm_policy[i].name, buf, len) == 0 &&
+ buf[len] == '\n') {
+ policy = link_pm_policy[i].value;
+ break;
+ }
+ }
+ if (!policy)
+ return -EINVAL;
+
+ ata_lpm_schedule(ap, policy);
+ return count;
+}
+
+static ssize_t
+ata_scsi_lpm_show(struct class_device *class_dev, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(class_dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ const char *policy =
+ ata_scsi_lpm_get(ap->pm_policy);
+
+ if (!policy)
+ return -EINVAL;
+
+ return snprintf(buf, 23, "%s\n", policy);
+}
+CLASS_DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
+ ata_scsi_lpm_show, ata_scsi_lpm_put);
+EXPORT_SYMBOL_GPL(class_device_attr_link_power_management_policy);
+
static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
void (*done)(struct scsi_cmnd *))
{
@@ -1361,33 +1429,10 @@ nothing_to_do:
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct ata_eh_info *ehi = &qc->dev->link->eh_info;
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
int need_sense = (qc->err_mask != 0);
- /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and
- * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE
- * cache
- */
- if (ap->ops->error_handler && !need_sense) {
- switch (qc->tf.command) {
- case ATA_CMD_SET_FEATURES:
- if ((qc->tf.feature == SETFEATURES_WC_ON) ||
- (qc->tf.feature == SETFEATURES_WC_OFF)) {
- ehi->action |= ATA_EH_REVALIDATE;
- ata_port_schedule_eh(ap);
- }
- break;
-
- case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
- case ATA_CMD_SET_MULTI: /* multi_count changed */
- ehi->action |= ATA_EH_REVALIDATE;
- ata_port_schedule_eh(ap);
- break;
- }
- }
-
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
* generate because the user forced us to, a check condition
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 90df58a..0e6cf3a 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -101,6 +101,8 @@ extern int sata_link_init_spd(struct ata_link *link);
extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern struct ata_port *ata_port_alloc(struct ata_host *host);
+extern void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy);
+extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm);
/* libata-acpi.c */
#ifdef CONFIG_ATA_ACPI
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 0f6f7bc..e4542ab 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -181,7 +181,7 @@ static void pacpi_set_piomode(struct ata_port *ap, struct ata_device *adev)
int unit = adev->devno;
struct pata_acpi *acpi = ap->private_data;
- if(!(acpi->gtm.flags & 0x10))
+ if (!(acpi->gtm.flags & 0x10))
unit = 0;
/* Now stuff the nS values into the structure */
@@ -202,7 +202,7 @@ static void pacpi_set_dmamode(struct ata_port *ap, struct ata_device *adev)
int unit = adev->devno;
struct pata_acpi *acpi = ap->private_data;
- if(!(acpi->gtm.flags & 0x10))
+ if (!(acpi->gtm.flags & 0x10))
unit = 0;
/* Now stuff the nS values into the structure */
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index b9a17eb..d0e2e50 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -215,6 +215,8 @@ static int ns87415_check_atapi_dma(struct ata_queued_cmd *qc)
#include <asm/superio.h>
+#define SUPERIO_IDE_MAX_RETRIES 25
+
/**
* ns87560_read_buggy - workaround buggy Super I/O chip
* @port: Port to read
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 6b07b5b..f9b485a 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -449,7 +449,7 @@ static int optiplus_with_udma(struct pci_dev *pdev)
/* Find function 1 */
dev1 = pci_get_device(0x1045, 0xC701, NULL);
- if(dev1 == NULL)
+ if (dev1 == NULL)
return 0;
/* Rev must be >= 0x10 */
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 5db2013..fd36099 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -74,8 +74,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
return ata_do_set_mode(link, r_failed_dev);
if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV,
- ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0)
- {
+ ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) {
/* Suspicious match, but could be two cards from
the same vendor - check serial */
if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO,
@@ -248,7 +247,8 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
goto next_entry;
io_base = pdev->io.BasePort1;
ctl_base = pdev->io.BasePort1 + 0x0e;
- } else goto next_entry;
+ } else
+ goto next_entry;
/* If we've got this far, we're done */
break;
}
@@ -285,8 +285,8 @@ next_entry:
printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n");
/*
- * Having done the PCMCIA plumbing the ATA side is relatively
- * sane.
+ * Having done the PCMCIA plumbing the ATA side is relatively
+ * sane.
*/
ret = -ENOMEM;
host = ata_host_alloc(&pdev->dev, 1);
@@ -363,7 +363,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
- PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
+ PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0100), /* Viking CFA */
PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar, Viking CFA */
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 3d3f155..2622577 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -348,7 +348,7 @@ static unsigned long pdc2027x_mode_filter(struct ata_device *adev, unsigned long
ata_id_c_string(pair->id, model_num, ATA_ID_PROD,
ATA_ID_PROD_LEN + 1);
/* If the master is a maxtor in UDMA6 then the slave should not use UDMA 6 */
- if(strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6)
+ if (strstr(model_num, "Maxtor") == 0 && pair->dma_mode == XFER_UDMA_6)
mask &= ~ (1 << (6 + ATA_SHIFT_UDMA));
return ata_pci_default_filter(adev, mask);
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 65d9516..bc7c2d5 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -351,9 +351,9 @@ static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id
struct pci_dev *bridge = dev->bus->self;
/* Don't grab anything behind a Promise I2O RAID */
if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
- if( bridge->device == PCI_DEVICE_ID_INTEL_I960)
+ if (bridge->device == PCI_DEVICE_ID_INTEL_I960)
return -ENODEV;
- if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
+ if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
return -ENODEV;
}
}
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 5557613..ea2ef9f 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -570,17 +570,8 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
udelay(20);
out_be32(ioaddr->ctl_addr, ap->ctl);
- /* spec mandates ">= 2ms" before checking status.
- * We wait 150ms, because that was the magic delay used for
- * ATAPI devices in Hale Landis's ATADRVR, for the period of time
- * between when the ATA command register is written, and then
- * status is checked. Because waiting for "a while" before
- * checking status is fine, post SRST, we perform this magic
- * delay here as well.
- *
- * Old drivers/ide uses the 2mS rule and then waits for ready
- */
- msleep(150);
+ /* wait a while before checking status */
+ ata_wait_after_reset(ap, deadline);
/* Before we perform post reset processing we want to see if
* the bus shows 0xFF because the odd clown forgets the D7
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index ea7a9a6..a4175fb 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -176,7 +176,7 @@ static int via_cable_detect(struct ata_port *ap) {
if ((config->flags & VIA_UDMA) < VIA_UDMA_66)
return ATA_CBL_PATA40;
/* UDMA 66 chips have only drive side logic */
- else if((config->flags & VIA_UDMA) < VIA_UDMA_100)
+ else if ((config->flags & VIA_UDMA) < VIA_UDMA_100)
return ATA_CBL_PATA_UNK;
/* UDMA 100 or later */
pci_read_config_dword(pdev, 0x50, &ata66);
diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
index 549cbbe..311cdb3 100644
--- a/drivers/ata/pata_winbond.c
+++ b/drivers/ata/pata_winbond.c
@@ -279,7 +279,7 @@ static __init int winbond_init(void)
if (request_region(port, 2, "pata_winbond")) {
ret = winbond_init_one(port);
- if(ret <= 0)
+ if (ret <= 0)
release_region(port, 2);
else ct+= ret;
}
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 199f7e1..bd4c2a3 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -47,10 +47,10 @@
#define DRV_VERSION "1.0"
/* macro to calculate base address for ATA regs */
-#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
+#define ADMA_ATA_REGS(base, port_no) ((base) + ((port_no) * 0x40))
/* macro to calculate base address for ADMA regs */
-#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
+#define ADMA_REGS(base, port_no) ((base) + 0x80 + ((port_no) * 0x20))
/* macro to obtain addresses from ata_port */
#define ADMA_PORT_REGS(ap) \
@@ -128,7 +128,7 @@ struct adma_port_priv {
adma_state_t state;
};
-static int adma_ata_init_one (struct pci_dev *pdev,
+static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
static int adma_port_start(struct ata_port *ap);
static void adma_host_stop(struct ata_host *host);
@@ -340,8 +340,8 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
buf[i++] = 0; /* pPKLW */
buf[i++] = 0; /* reserved */
- *(__le32 *)(buf + i)
- = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
+ *(__le32 *)(buf + i) =
+ (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
i += 4;
VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
@@ -617,7 +617,7 @@ static int adma_port_start(struct ata_port *ap)
return -ENOMEM;
/* paranoia? */
if ((pp->pkt_dma & 7) != 0) {
- printk("bad alignment for pp->pkt_dma: %08x\n",
+ printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n",
(u32)pp->pkt_dma);
return -ENOMEM;
}
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 08595f3..323c087 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -143,7 +143,7 @@ static const int scr_map[] = {
[SCR_CONTROL] = 2,
};
-static void __iomem * inic_port_base(struct ata_port *ap)
+static void __iomem *inic_port_base(struct ata_port *ap)
{
return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE;
}
@@ -448,7 +448,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
struct ata_taskfile tf;
/* wait a while before checking status */
- msleep(150);
+ ata_wait_after_reset(ap, deadline);
rc = ata_wait_ready(ap, deadline);
/* link occupied, -ENODEV too is an error */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b39648f..a43f64d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1156,7 +1156,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
}
-static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
+static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
{
u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
(last ? CRQB_CMD_LAST : 0);
@@ -2429,7 +2429,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
struct mv_host_priv *hpriv = host->private_data;
u32 hp_flags = hpriv->hp_flags;
- switch(board_idx) {
+ switch (board_idx) {
case chip_5080:
hpriv->ops = &mv5xxx_ops;
hp_flags |= MV_HP_GEN_I;
@@ -2510,7 +2510,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
default:
- printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
+ dev_printk(KERN_ERR, &pdev->dev,
+ "BUG: invalid board index %u\n", board_idx);
return 1;
}
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index f1b422f..35b2df2 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -291,7 +291,7 @@ struct nv_swncq_port_priv {
};
-#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
+#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM
@@ -884,8 +884,9 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
/* Notifier bits set without a command may indicate the drive
is misbehaving. Raise host state machine violation on this
condition. */
- ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
- cpb_num);
+ ata_port_printk(ap, KERN_ERR,
+ "notifier for tag %d with no cmd?\n",
+ cpb_num);
ehi->err_mask |= AC_ERR_HSM;
ehi->action |= ATA_EH_SOFTRESET;
ata_port_freeze(ap);
@@ -1012,7 +1013,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
u32 check_commands;
int pos, error = 0;
- if(ata_tag_valid(ap->link.active_tag))
+ if (ata_tag_valid(ap->link.active_tag))
check_commands = 1 << ap->link.active_tag;
else
check_commands = ap->link.sactive;
@@ -1021,14 +1022,14 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
while ((pos = ffs(check_commands)) && !error) {
pos--;
error = nv_adma_check_cpb(ap, pos,
- notifier_error & (1 << pos) );
- check_commands &= ~(1 << pos );
+ notifier_error & (1 << pos));
+ check_commands &= ~(1 << pos);
}
}
}
}
- if(notifier_clears[0] || notifier_clears[1]) {
+ if (notifier_clears[0] || notifier_clears[1]) {
/* Note: Both notifier clear registers must be written
if either is set, even if one is zero, according to NVIDIA. */
struct nv_adma_port_priv *pp = host->ports[0]->private_data;
@@ -1061,7 +1062,7 @@ static void nv_adma_freeze(struct ata_port *ap)
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
mmio + NV_ADMA_CTL);
- readw(mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
static void nv_adma_thaw(struct ata_port *ap)
@@ -1079,7 +1080,7 @@ static void nv_adma_thaw(struct ata_port *ap)
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
mmio + NV_ADMA_CTL);
- readw(mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
static void nv_adma_irq_clear(struct ata_port *ap)
@@ -1119,7 +1120,7 @@ static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct nv_adma_port_priv *pp = qc->ap->private_data;
- if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
+ if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
ata_bmdma_post_internal_cmd(qc);
}
@@ -1165,7 +1166,7 @@ static int nv_adma_port_start(struct ata_port *ap)
pp->cpb_dma = mem_dma;
writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
- writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
+ writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
@@ -1189,15 +1190,15 @@ static int nv_adma_port_start(struct ata_port *ap)
/* clear GO for register mode, enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
- writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
- NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+ writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+ NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
return 0;
}
@@ -1237,7 +1238,7 @@ static int nv_adma_port_resume(struct ata_port *ap)
/* set CPB block location */
writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
- writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
+ writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
/* clear any outstanding interrupt conditions */
writew(0xffff, mmio + NV_ADMA_STAT);
@@ -1250,15 +1251,15 @@ static int nv_adma_port_resume(struct ata_port *ap)
/* clear GO for register mode, enable interrupt */
tmp = readw(mmio + NV_ADMA_CTL);
- writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
- NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
+ writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
+ NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
return 0;
}
@@ -1342,7 +1343,8 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
idx = 0;
ata_for_each_sg(sg, qc) {
- aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
+ aprd = (idx < 5) ? &cpb->aprd[idx] :
+ &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
nv_adma_fill_aprd(qc, sg, idx, aprd);
idx++;
}
@@ -1359,12 +1361,12 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
/* ADMA engine can only be used for non-ATAPI DMA commands,
or interrupt-driven no-data commands, where a result taskfile
is not required. */
- if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
+ if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(qc->tf.flags & ATA_TFLAG_POLLING) ||
(qc->flags & ATA_QCFLAG_RESULT_TF))
return 1;
- if((qc->flags & ATA_QCFLAG_DMAMAP) ||
+ if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
(qc->tf.protocol == ATA_PROT_NODATA))
return 0;
@@ -1401,14 +1403,14 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
- if(qc->flags & ATA_QCFLAG_DMAMAP) {
+ if (qc->flags & ATA_QCFLAG_DMAMAP) {
nv_adma_fill_sg(qc, cpb);
ctl_flags |= NV_CPB_CTL_APRD_VALID;
} else
memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
- /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
- finished filling in all of the contents */
+ /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
+ until we are finished filling in all of the contents */
wmb();
cpb->ctl_flags = ctl_flags;
wmb();
@@ -1435,16 +1437,16 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
and (number of cpbs to append -1) in top 8 bits */
wmb();
- if(curr_ncq != pp->last_issue_ncq) {
- /* Seems to need some delay before switching between NCQ and non-NCQ
- commands, else we get command timeouts and such. */
+ if (curr_ncq != pp->last_issue_ncq) {
+ /* Seems to need some delay before switching between NCQ and
+ non-NCQ commands, else we get command timeouts and such. */
udelay(20);
pp->last_issue_ncq = curr_ncq;
}
writew(qc->tag, mmio + NV_ADMA_APPEND);
- DPRINTK("Issued tag %u\n",qc->tag);
+ DPRINTK("Issued tag %u\n", qc->tag);
return 0;
}
@@ -1641,12 +1643,12 @@ static void nv_error_handler(struct ata_port *ap)
static void nv_adma_error_handler(struct ata_port *ap)
{
struct nv_adma_port_priv *pp = ap->private_data;
- if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
+ if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
void __iomem *mmio = pp->ctl_block;
int i;
u16 tmp;
- if(ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
+ if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
@@ -1654,16 +1656,17 @@ static void nv_adma_error_handler(struct ata_port *ap)
u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
- ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
+ ata_port_printk(ap, KERN_ERR,
+ "EH in ADMA mode, notifier 0x%X "
"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
"next cpb count 0x%X next cpb idx 0x%x\n",
notifier, notifier_error, gen_ctl, status,
cpb_count, next_cpb_idx);
- for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
+ for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
struct nv_adma_cpb *cpb = &pp->cpb[i];
- if( (ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
- ap->link.sactive & (1 << i) )
+ if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
+ ap->link.sactive & (1 << i))
ata_port_printk(ap, KERN_ERR,
"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
i, cpb->ctl_flags, cpb->resp_flags);
@@ -1673,8 +1676,9 @@ static void nv_adma_error_handler(struct ata_port *ap)
/* Push us back into port register mode for error handling. */
nv_adma_register_mode(ap);
- /* Mark all of the CPBs as invalid to prevent them from being executed */
- for( i=0;i<NV_ADMA_MAX_CPBS;i++)
+ /* Mark all of the CPBs as invalid to prevent them from
+ being executed */
+ for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
/* clear CPB fetch count */
@@ -1683,10 +1687,10 @@ static void nv_adma_error_handler(struct ata_port *ap)
/* Reset channel */
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
udelay(1);
writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
- readw( mmio + NV_ADMA_CTL ); /* flush posted write */
+ readw(mmio + NV_ADMA_CTL); /* flush posted write */
}
ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
@@ -2350,9 +2354,9 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
return IRQ_RETVAL(handled);
}
-static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version = 0;
+ static int printed_version;
const struct ata_port_info *ppi[] = { NULL, NULL };
struct ata_host *host;
struct nv_host_priv *hpriv;
@@ -2364,7 +2368,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
// Make sure this is a SATA controller by counting the number of bars
// (NVIDIA SATA controllers will always have six bars). Otherwise,
// it's an IDE controller and we ignore it.
- for (bar=0; bar<6; bar++)
+ for (bar = 0; bar < 6; bar++)
if (pci_resource_start(pdev, bar) == 0)
return -ENODEV;
@@ -2381,6 +2385,14 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
type = ADMA;
}
+ if (type == SWNCQ) {
+ if (swncq_enabled)
+ dev_printk(KERN_NOTICE, &pdev->dev,
+ "Using SWNCQ mode\n");
+ else
+ type = GENERIC;
+ }
+
ppi[0] = &nv_port_info[type];
rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
if (rc)
@@ -2422,10 +2434,8 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
rc = nv_adma_host_init(host);
if (rc)
return rc;
- } else if (type == SWNCQ && swncq_enabled) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
+ } else if (type == SWNCQ)
nv_swncq_host_init(host);
- }
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
@@ -2440,37 +2450,37 @@ static int nv_pci_device_resume(struct pci_dev *pdev)
int rc;
rc = ata_pci_device_do_resume(pdev);
- if(rc)
+ if (rc)
return rc;
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
- if(hpriv->type >= CK804) {
+ if (hpriv->type >= CK804) {
u8 regval;
pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
}
- if(hpriv->type == ADMA) {
+ if (hpriv->type == ADMA) {
u32 tmp32;
struct nv_adma_port_priv *pp;
/* enable/disable ADMA on the ports appropriately */
pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
pp = host->ports[0]->private_data;
- if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
- NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+ NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
else
tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
- NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
+ NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
pp = host->ports[1]->private_data;
- if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
+ if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
- NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+ NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
else
tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
- NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
+ NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
}
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 9032131..deb26f0 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -83,10 +83,12 @@ enum {
PDC_PCI_SYS_ERR = (1 << 22), /* PCI system error */
PDC1_PCI_PARITY_ERR = (1 << 23), /* PCI parity error (from SATA150 driver) */
PDC1_ERR_MASK = PDC1_PCI_PARITY_ERR,
- PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR | PDC2_ATA_DMA_CNT_ERR,
- PDC_ERR_MASK = (PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR | PDC_OVERRUN_ERR
- | PDC_UNDERRUN_ERR | PDC_DRIVE_ERR | PDC_PCI_SYS_ERR
- | PDC1_ERR_MASK | PDC2_ERR_MASK),
+ PDC2_ERR_MASK = PDC2_HTO_ERR | PDC2_ATA_HBA_ERR |
+ PDC2_ATA_DMA_CNT_ERR,
+ PDC_ERR_MASK = PDC_PH_ERR | PDC_SH_ERR | PDC_DH_ERR |
+ PDC_OVERRUN_ERR | PDC_UNDERRUN_ERR |
+ PDC_DRIVE_ERR | PDC_PCI_SYS_ERR |
+ PDC1_ERR_MASK | PDC2_ERR_MASK,
board_2037x = 0, /* FastTrak S150 TX2plus */
board_2037x_pata = 1, /* FastTrak S150 TX2plus PATA port */
@@ -695,19 +697,20 @@ static void pdc_irq_clear(struct ata_port *ap)
readl(mmio + PDC_INT_SEQMASK);
}
-static inline int pdc_is_sataii_tx4(unsigned long flags)
+static int pdc_is_sataii_tx4(unsigned long flags)
{
const unsigned long mask = PDC_FLAG_GEN_II | PDC_FLAG_4_PORTS;
return (flags & mask) == mask;
}
-static inline unsigned int pdc_port_no_to_ata_no(unsigned int port_no, int is_sataii_tx4)
+static unsigned int pdc_port_no_to_ata_no(unsigned int port_no,
+ int is_sataii_tx4)
{
static const unsigned char sataii_tx4_port_remap[4] = { 3, 1, 0, 2};
return is_sataii_tx4 ? sataii_tx4_port_remap[port_no] : port_no;
}
-static irqreturn_t pdc_interrupt (int irq, void *dev_instance)
+static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ata_port *ap;
@@ -839,15 +842,16 @@ static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
- WARN_ON (tf->protocol == ATA_PROT_DMA ||
- tf->protocol == ATA_PROT_ATAPI_DMA);
+ WARN_ON(tf->protocol == ATA_PROT_DMA ||
+ tf->protocol == ATA_PROT_ATAPI_DMA);
ata_tf_load(ap, tf);
}
-static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+static void pdc_exec_command_mmio(struct ata_port *ap,
+ const struct ata_taskfile *tf)
{
- WARN_ON (tf->protocol == ATA_PROT_DMA ||
- tf->protocol == ATA_PROT_ATAPI_DMA);
+ WARN_ON(tf->protocol == ATA_PROT_DMA ||
+ tf->protocol == ATA_PROT_ATAPI_DMA);
ata_exec_command(ap, tf);
}
@@ -870,8 +874,11 @@ static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
}
/* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
if (scsicmd[0] == WRITE_10) {
- unsigned int lba;
- lba = (scsicmd[2] << 24) | (scsicmd[3] << 16) | (scsicmd[4] << 8) | scsicmd[5];
+ unsigned int lba =
+ (scsicmd[2] << 24) |
+ (scsicmd[3] << 16) |
+ (scsicmd[4] << 8) |
+ scsicmd[5];
if (lba >= 0xFFFF4FA2)
pio = 1;
}
@@ -956,7 +963,8 @@ static void pdc_host_init(struct ata_host *host)
writel(tmp, mmio + PDC_SLEW_CTL);
}
-static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int pdc_ata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int printed_version;
const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index c4c4cd2..6d43ba7 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -113,7 +113,7 @@ struct qs_port_priv {
static int qs_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
static int qs_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
-static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host *host);
static void qs_phy_reset(struct ata_port *ap);
@@ -135,7 +135,6 @@ static struct scsi_host_template qs_ata_sht = {
.sg_tablesize = QS_MAX_PRD,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
- //FIXME .use_clustering = ATA_SHT_USE_CLUSTERING,
.use_clustering = ENABLE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = QS_DMA_BOUNDARY,
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index ea3a0ab..4e6e381 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -111,7 +111,7 @@ enum {
SIL_QUIRK_UDMA5MAX = (1 << 1),
};
-static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM
static int sil_pci_device_resume(struct pci_dev *pdev);
#endif
@@ -138,7 +138,7 @@ static const struct pci_device_id sil_pci_tbl[] = {
/* TODO firmware versions should be added - eric */
static const struct sil_drivelist {
- const char * product;
+ const char *product;
unsigned int quirk;
} sil_blacklist [] = {
{ "ST320012AS", SIL_QUIRK_MOD15WRITE },
@@ -279,7 +279,7 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
MODULE_VERSION(DRV_VERSION);
-static int slow_down = 0;
+static int slow_down;
module_param(slow_down, int, 0444);
MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
@@ -332,7 +332,8 @@ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
return 0;
}
-static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
+static inline void __iomem *sil_scr_addr(struct ata_port *ap,
+ unsigned int sc_reg)
{
void __iomem *offset = ap->ioaddr.scr_addr;
@@ -643,7 +644,7 @@ static void sil_init_controller(struct ata_host *host)
}
}
-static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
int board_id = ent->driver_data;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 26ebffc..3c481e0 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -674,7 +674,7 @@ static int sil24_do_softreset(struct ata_link *link, unsigned int *class,
/* put the port into known state */
if (sil24_init_port(ap)) {
- reason ="port not ready";
+ reason = "port not ready";
goto err;
}
@@ -756,7 +756,8 @@ static int sil24_hardreset(struct ata_link *link, unsigned int *class,
writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
tmp = ata_wait_register(port + PORT_CTRL_STAT,
- PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec);
+ PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10,
+ tout_msec);
/* SStatus oscillates between zero and valid status after
* DEV_RST, debounce it.
@@ -1270,7 +1271,7 @@ static void sil24_init_controller(struct ata_host *host)
PORT_CS_PORT_RST, 10, 100);
if (tmp & PORT_CS_PORT_RST)
dev_printk(KERN_ERR, host->dev,
- "failed to clear port RST\n");
+ "failed to clear port RST\n");
}
/* configure port */
@@ -1283,7 +1284,7 @@ static void sil24_init_controller(struct ata_host *host)
static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version = 0;
+ static int printed_version;
struct ata_port_info pi = sil24_port_info[ent->driver_data];
const struct ata_port_info *ppi[] = { &pi, NULL };
void __iomem * const *iomap;
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index f147dc7..a01260a 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -63,17 +63,17 @@ enum {
GENCTL_IOMAPPED_SCR = (1 << 26), /* if set, SCRs are in IO space */
};
-static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static int sis_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int sis_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
static const struct pci_device_id sis_pci_tbl[] = {
- { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */
- { PCI_VDEVICE(SI, 0x0181), sis_180 }, /* SiS 964/180 */
- { PCI_VDEVICE(SI, 0x0182), sis_180 }, /* SiS 965/965L */
- { PCI_VDEVICE(SI, 0x0183), sis_180 }, /* SiS 965/965L */
- { PCI_VDEVICE(SI, 0x1182), sis_180 }, /* SiS 966/680 */
- { PCI_VDEVICE(SI, 0x1183), sis_180 }, /* SiS 966/966L/968/680 */
+ { PCI_VDEVICE(SI, 0x0180), sis_180 }, /* SiS 964/180 */
+ { PCI_VDEVICE(SI, 0x0181), sis_180 }, /* SiS 964/180 */
+ { PCI_VDEVICE(SI, 0x0182), sis_180 }, /* SiS 965/965L */
+ { PCI_VDEVICE(SI, 0x0183), sis_180 }, /* SiS 965/965L */
+ { PCI_VDEVICE(SI, 0x1182), sis_180 }, /* SiS 966/680 */
+ { PCI_VDEVICE(SI, 0x1183), sis_180 }, /* SiS 966/966L/968/680 */
{ } /* terminate list */
};
@@ -149,24 +149,24 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
if (ap->port_no) {
switch (pdev->device) {
- case 0x0180:
- case 0x0181:
- pci_read_config_byte(pdev, SIS_PMR, &pmr);
- if ((pmr & SIS_PMR_COMBINED) == 0)
- addr += SIS180_SATA1_OFS;
- break;
-
- case 0x0182:
- case 0x0183:
- case 0x1182:
- addr += SIS182_SATA1_OFS;
- break;
+ case 0x0180:
+ case 0x0181:
+ pci_read_config_byte(pdev, SIS_PMR, &pmr);
+ if ((pmr & SIS_PMR_COMBINED) == 0)
+ addr += SIS180_SATA1_OFS;
+ break;
+
+ case 0x0182:
+ case 0x0183:
+ case 0x1182:
+ addr += SIS182_SATA1_OFS;
+ break;
}
}
return addr;
}
-static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static u32 sis_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
@@ -190,7 +190,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg, u32 *val)
return 0;
}
-static void sis_scr_cfg_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static void sis_scr_cfg_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
@@ -253,7 +253,7 @@ static int sis_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
return 0;
}
-static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
struct ata_port_info pi = sis_port_info;
@@ -309,29 +309,33 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
dev_printk(KERN_INFO, &pdev->dev,
"Detected SiS 180/181 chipset in combined mode\n");
- port2_start=0;
+ port2_start = 0;
pi.flags |= ATA_FLAG_SLAVE_POSS;
}
break;
case 0x0182:
case 0x0183:
- pci_read_config_dword ( pdev, 0x6C, &val);
+ pci_read_config_dword(pdev, 0x6C, &val);
if (val & (1L << 31)) {
- dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965 chipset\n");
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Detected SiS 182/965 chipset\n");
pi.flags |= ATA_FLAG_SLAVE_POSS;
} else {
- dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 182/965L chipset\n");
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Detected SiS 182/965L chipset\n");
}
break;
case 0x1182:
- dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1182/966/680 SATA controller\n");
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Detected SiS 1182/966/680 SATA controller\n");
pi.flags |= ATA_FLAG_SLAVE_POSS;
break;
case 0x1183:
- dev_printk(KERN_INFO, &pdev->dev, "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
+ dev_printk(KERN_INFO, &pdev->dev,
+ "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
ppi[0] = &sis_info133_for_sata;
ppi[1] = &sis_info133_for_sata;
break;
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 12d613c..69f651e 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -182,7 +182,7 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
tf->hob_lbal = lbal >> 8;
tf->hob_lbam = lbam >> 8;
tf->hob_lbah = lbah >> 8;
- }
+ }
}
/**
@@ -193,7 +193,7 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
* spin_lock_irqsave(host lock)
*/
-static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
+static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
@@ -224,7 +224,7 @@ static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
* spin_lock_irqsave(host lock)
*/
-static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
+static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *mmio = ap->ioaddr.bmdma_addr;
@@ -255,7 +255,7 @@ static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
static u8 k2_stat_check_status(struct ata_port *ap)
{
- return readl(ap->ioaddr.status_addr);
+ return readl(ap->ioaddr.status_addr);
}
#ifdef CONFIG_PPC_OF
@@ -395,7 +395,7 @@ static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
}
-static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
const struct ata_port_info *ppi[] =
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index b6026bc..4d85718 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -212,9 +212,9 @@ struct pdc_host_priv {
};
-static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void pdc_eng_timeout(struct ata_port *ap);
-static void pdc_20621_phy_reset (struct ata_port *ap);
+static void pdc_20621_phy_reset(struct ata_port *ap);
static int pdc_port_start(struct ata_port *ap);
static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
@@ -320,16 +320,16 @@ static int pdc_port_start(struct ata_port *ap)
return 0;
}
-static void pdc_20621_phy_reset (struct ata_port *ap)
+static void pdc_20621_phy_reset(struct ata_port *ap)
{
VPRINTK("ENTER\n");
- ap->cbl = ATA_CBL_SATA;
- ata_port_probe(ap);
- ata_bus_reset(ap);
+ ap->cbl = ATA_CBL_SATA;
+ ata_port_probe(ap);
+ ata_bus_reset(ap);
}
static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
- unsigned int portno,
+ unsigned int portno,
unsigned int total_len)
{
u32 addr;
@@ -351,7 +351,7 @@ static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
}
static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
- unsigned int portno,
+ unsigned int portno,
unsigned int total_len)
{
u32 addr;
@@ -711,8 +711,8 @@ static unsigned int pdc20621_qc_issue_prot(struct ata_queued_cmd *qc)
return ata_qc_issue_prot(qc);
}
-static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
- struct ata_queued_cmd *qc,
+static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
+ struct ata_queued_cmd *qc,
unsigned int doing_hdma,
void __iomem *mmio)
{
@@ -803,7 +803,7 @@ static void pdc20621_irq_clear(struct ata_port *ap)
readl(mmio + PDC_20621_SEQMASK);
}
-static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance)
+static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ata_port *ap;
@@ -836,9 +836,9 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance)
return IRQ_NONE;
}
- spin_lock(&host->lock);
+ spin_lock(&host->lock);
- for (i = 1; i < 9; i++) {
+ for (i = 1; i < 9; i++) {
port_no = i - 1;
if (port_no > 3)
port_no -= 4;
@@ -859,7 +859,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance)
}
}
- spin_unlock(&host->lock);
+ spin_unlock(&host->lock);
VPRINTK("mask == 0x%x\n", mask);
@@ -906,16 +906,16 @@ static void pdc_eng_timeout(struct ata_port *ap)
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
- WARN_ON (tf->protocol == ATA_PROT_DMA ||
- tf->protocol == ATA_PROT_NODATA);
+ WARN_ON(tf->protocol == ATA_PROT_DMA ||
+ tf->protocol == ATA_PROT_NODATA);
ata_tf_load(ap, tf);
}
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
{
- WARN_ON (tf->protocol == ATA_PROT_DMA ||
- tf->protocol == ATA_PROT_NODATA);
+ WARN_ON(tf->protocol == ATA_PROT_DMA ||
+ tf->protocol == ATA_PROT_NODATA);
ata_exec_command(ap, tf);
}
@@ -953,7 +953,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
mmio += PDC_CHIP0_OFS;
page_mask = 0x00;
- window_size = 0x2000 * 4; /* 32K byte uchar size */
+ window_size = 0x2000 * 4; /* 32K byte uchar size */
idx = (u16) (offset / window_size);
writel(0x01, mmio + PDC_GENERAL_CTLR);
@@ -979,7 +979,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
window_size / 4);
psource += window_size;
size -= window_size;
- idx ++;
+ idx++;
}
if (size) {
@@ -1008,7 +1008,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
mmio += PDC_CHIP0_OFS;
page_mask = 0x00;
- window_size = 0x2000 * 4; /* 32K byte uchar size */
+ window_size = 0x2000 * 4; /* 32K byte uchar size */
idx = (u16) (offset / window_size);
writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
@@ -1031,7 +1031,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
readl(mmio + PDC_GENERAL_CTLR);
psource += window_size;
size -= window_size;
- idx ++;
+ idx++;
}
if (size) {
@@ -1050,7 +1050,7 @@ static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
u32 i2creg = 0;
u32 status;
- u32 count =0;
+ u32 count = 0;
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
@@ -1082,21 +1082,21 @@ static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
static int pdc20621_detect_dimm(struct ata_host *host)
{
- u32 data=0 ;
+ u32 data = 0;
if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
- if (data == 100)
+ if (data == 100)
return 100;
- } else
+ } else
return 0;
if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
- if(data <= 0x75)
+ if (data <= 0x75)
return 133;
- } else
+ } else
return 0;
- return 0;
+ return 0;
}
@@ -1104,8 +1104,8 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
{
u32 spd0[50];
u32 data = 0;
- int size, i;
- u8 bdimmsize;
+ int size, i;
+ u8 bdimmsize;
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
static const struct {
unsigned int reg;
@@ -1128,40 +1128,40 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
- for(i=0; i<ARRAY_SIZE(pdc_i2c_read_data); i++)
+ for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
pdc_i2c_read_data[i].reg,
&spd0[pdc_i2c_read_data[i].ofs]);
- data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
- data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
+ data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
+ data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
((((spd0[27] + 9) / 10) - 1) << 8) ;
- data |= (((((spd0[29] > spd0[28])
+ data |= (((((spd0[29] > spd0[28])
? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
- data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
+ data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
- if (spd0[18] & 0x08)
+ if (spd0[18] & 0x08)
data |= ((0x03) << 14);
- else if (spd0[18] & 0x04)
+ else if (spd0[18] & 0x04)
data |= ((0x02) << 14);
- else if (spd0[18] & 0x01)
+ else if (spd0[18] & 0x01)
data |= ((0x01) << 14);
- else
+ else
data |= (0 << 14);
- /*
+ /*
Calculate the size of bDIMMSize (power of 2) and
merge the DIMM size by program start/end address.
*/
- bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
- size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
- data |= (((size / 16) - 1) << 16);
- data |= (0 << 23);
+ bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
+ size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
+ data |= (((size / 16) - 1) << 16);
+ data |= (0 << 23);
data |= 8;
- writel(data, mmio + PDC_DIMM0_CONTROL);
+ writel(data, mmio + PDC_DIMM0_CONTROL);
readl(mmio + PDC_DIMM0_CONTROL);
- return size;
+ return size;
}
@@ -1172,9 +1172,9 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
/* hard-code chip #0 */
- mmio += PDC_CHIP0_OFS;
+ mmio += PDC_CHIP0_OFS;
- /*
+ /*
Set To Default : DIMM Module Global Control Register (0x022259F1)
DIMM Arbitration Disable (bit 20)
DIMM Data/Control Output Driving Selection (bit12 - bit15)
@@ -1193,40 +1193,40 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
writel(data, mmio + PDC_SDRAM_CONTROL);
readl(mmio + PDC_SDRAM_CONTROL);
printk(KERN_ERR "Local DIMM ECC Enabled\n");
- }
+ }
- /* DIMM Initialization Select/Enable (bit 18/19) */
- data &= (~(1<<18));
- data |= (1<<19);
- writel(data, mmio + PDC_SDRAM_CONTROL);
+ /* DIMM Initialization Select/Enable (bit 18/19) */
+ data &= (~(1<<18));
+ data |= (1<<19);
+ writel(data, mmio + PDC_SDRAM_CONTROL);
- error = 1;
- for (i = 1; i <= 10; i++) { /* polling ~5 secs */
+ error = 1;
+ for (i = 1; i <= 10; i++) { /* polling ~5 secs */
data = readl(mmio + PDC_SDRAM_CONTROL);
if (!(data & (1<<19))) {
- error = 0;
- break;
+ error = 0;
+ break;
}
msleep(i*100);
- }
- return error;
+ }
+ return error;
}
static unsigned int pdc20621_dimm_init(struct ata_host *host)
{
int speed, size, length;
- u32 addr,spd0,pci_status;
- u32 tmp=0;
- u32 time_period=0;
- u32 tcount=0;
- u32 ticks=0;
- u32 clock=0;
- u32 fparam=0;
+ u32 addr, spd0, pci_status;
+ u32 tmp = 0;
+ u32 time_period = 0;
+ u32 tcount = 0;
+ u32 ticks = 0;
+ u32 clock = 0;
+ u32 fparam = 0;
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
/* hard-code chip #0 */
- mmio += PDC_CHIP0_OFS;
+ mmio += PDC_CHIP0_OFS;
/* Initialize PLL based upon PCI Bus Frequency */
@@ -1254,7 +1254,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
If SX4 is on PCI-X bus, after 3 seconds, the timer counter
register should be >= (0xffffffff - 3x10^8).
*/
- if(tcount >= PCI_X_TCOUNT) {
+ if (tcount >= PCI_X_TCOUNT) {
ticks = (time_period - tcount);
VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
@@ -1285,41 +1285,43 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
if (!(speed = pdc20621_detect_dimm(host))) {
printk(KERN_ERR "Detect Local DIMM Fail\n");
return 1; /* DIMM error */
- }
- VPRINTK("Local DIMM Speed = %d\n", speed);
+ }
+ VPRINTK("Local DIMM Speed = %d\n", speed);
- /* Programming DIMM0 Module Control Register (index_CID0:80h) */
+ /* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host);
- VPRINTK("Local DIMM Size = %dMB\n",size);
+ VPRINTK("Local DIMM Size = %dMB\n", size);
- /* Programming DIMM Module Global Control Register (index_CID0:88h) */
+ /* Programming DIMM Module Global Control Register (index_CID0:88h) */
if (pdc20621_prog_dimm_global(host)) {
printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
return 1;
- }
+ }
#ifdef ATA_VERBOSE_DEBUG
{
- u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ',
- 'N','o','t',' ','Y','e','t',' ','D','e','f','i','n','e','d',' ',
- '1','.','1','0',
- '9','8','0','3','1','6','1','2',0,0};
+ u8 test_parttern1[40] =
+ {0x55,0xAA,'P','r','o','m','i','s','e',' ',
+ 'N','o','t',' ','Y','e','t',' ',
+ 'D','e','f','i','n','e','d',' ',
+ '1','.','1','0',
+ '9','8','0','3','1','6','1','2',0,0};
u8 test_parttern2[40] = {0};
- pdc20621_put_to_dimm(host, (void *) test_parttern2, 0x10040, 40);
- pdc20621_put_to_dimm(host, (void *) test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
+ pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
- pdc20621_put_to_dimm(host, (void *) test_parttern1, 0x10040, 40);
- pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
+ pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
- pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x10040,
+ pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
40);
printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
- pdc20621_put_to_dimm(host, (void *) test_parttern1, 0x40, 40);
- pdc20621_get_from_dimm(host, (void *) test_parttern2, 0x40, 40);
+ pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
+ pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
}
@@ -1375,7 +1377,8 @@ static void pdc_20621_init(struct ata_host *host)
readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
}
-static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int pdc_sata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int printed_version;
const struct ata_port_info *ppi[] =
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index d394da0..e710e71 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -56,9 +56,9 @@ struct uli_priv {
unsigned int scr_cfg_addr[uli_max_ports];
};
-static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
-static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val);
-static int uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
+static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
+static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
static const struct pci_device_id uli_pci_tbl[] = {
{ PCI_VDEVICE(AL, 0x5289), uli_5289 },
@@ -143,7 +143,7 @@ static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
}
-static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
+static u32 uli_scr_cfg_read(struct ata_port *ap, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
@@ -153,7 +153,7 @@ static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
return val;
}
-static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
+static void uli_scr_cfg_write(struct ata_port *ap, unsigned int scr, u32 val)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
@@ -161,7 +161,7 @@ static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
pci_write_config_dword(pdev, cfg_addr, val);
}
-static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val)
+static int uli_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
{
if (sc_reg > SCR_CONTROL)
return -EINVAL;
@@ -170,16 +170,16 @@ static int uli_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val)
return 0;
}
-static int uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
+static int uli_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
{
- if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
+ if (sc_reg > SCR_CONTROL) //SCR_CONTROL=2, SCR_ERROR=1, SCR_STATUS=0
return -EINVAL;
uli_scr_cfg_write(ap, sc_reg, val);
return 0;
}
-static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
const struct ata_port_info *ppi[] = { &uli_port_info, NULL };
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index cc6ee08..3ef072ff 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -3,7 +3,7 @@
*
* Maintained by: Jeff Garzik <jgarzik@pobox.com>
* Please ALWAYS copy linux-ide@vger.kernel.org
- on emails.
+ * on emails.
*
* Copyright 2003-2004 Red Hat, Inc. All rights reserved.
* Copyright 2003-2004 Jeff Garzik
@@ -69,7 +69,7 @@ enum {
SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
};
-static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
static void svia_noop_freeze(struct ata_port *ap);
@@ -372,12 +372,12 @@ static const unsigned int vt6421_bar_sizes[] = {
16, 16, 16, 16, 32, 128
};
-static void __iomem * svia_scr_addr(void __iomem *addr, unsigned int port)
+static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
{
return addr + (port * 128);
}
-static void __iomem * vt6421_scr_addr(void __iomem *addr, unsigned int port)
+static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
{
return addr + (port * 64);
}
@@ -472,7 +472,7 @@ static void svia_configure(struct pci_dev *pdev)
if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
dev_printk(KERN_DEBUG, &pdev->dev,
"enabling SATA channels (0x%x)\n",
- (int) tmp8);
+ (int) tmp8);
tmp8 |= ALL_PORTS;
pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
}
@@ -482,7 +482,7 @@ static void svia_configure(struct pci_dev *pdev)
if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
dev_printk(KERN_DEBUG, &pdev->dev,
"enabling SATA channel interrupts (0x%x)\n",
- (int) tmp8);
+ (int) tmp8);
tmp8 |= ALL_PORTS;
pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
}
@@ -492,13 +492,13 @@ static void svia_configure(struct pci_dev *pdev)
if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
dev_printk(KERN_DEBUG, &pdev->dev,
"enabling SATA channel native mode (0x%x)\n",
- (int) tmp8);
+ (int) tmp8);
tmp8 |= NATIVE_MODE_ALL;
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
}
-static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
unsigned int i;
@@ -525,8 +525,8 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
dev_printk(KERN_ERR, &pdev->dev,
"invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
i,
- (unsigned long long)pci_resource_start(pdev, i),
- (unsigned long long)pci_resource_len(pdev, i));
+ (unsigned long long)pci_resource_start(pdev, i),
+ (unsigned long long)pci_resource_len(pdev, i));
return -ENODEV;
}
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 0d9be16..95ae3ed 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -162,7 +162,8 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
/*
* The only thing the ctl register is used for is SRST.
* That is not enabled or disabled via tf_load.
- * However, if ATA_NIEN is changed, then we need to change the interrupt register.
+ * However, if ATA_NIEN is changed, then we need to change
+ * the interrupt register.
*/
if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
ap->last_ctl = tf->ctl;
@@ -219,7 +220,7 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
tf->hob_lbal = lbal >> 8;
tf->hob_lbam = lbam >> 8;
tf->hob_lbah = lbah >> 8;
- }
+ }
}
static inline void vsc_error_intr(u8 port_status, struct ata_port *ap)
@@ -256,9 +257,10 @@ static void vsc_port_intr(u8 port_status, struct ata_port *ap)
/*
* vsc_sata_interrupt
*
- * Read the interrupt register and process for the devices that have them pending.
+ * Read the interrupt register and process for the devices that have
+ * them pending.
*/
-static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance)
+static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
unsigned int i;
@@ -287,7 +289,7 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance)
handled++;
} else
dev_printk(KERN_ERR, host->dev,
- ": interrupt from disabled port %d\n", i);
+ "interrupt from disabled port %d\n", i);
}
}
@@ -363,7 +365,8 @@ static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
}
-static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static const struct ata_port_info pi = {
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 5a6fe17..7d70496 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1,20 +1,20 @@
/*
- * Disk Array driver for HP SA 5xxx and 6xxx Controllers
- * Copyright 2000, 2006 Hewlett-Packard Development Company, L.P.
+ * Disk Array driver for HP Smart Array controllers.
+ * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA.
*
* Questions/Comments/Bugfixes to iss_storagedev@hp.com
*
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 4aca7dd..63ee6c0 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -1,20 +1,20 @@
/*
- * Disk Array driver for Compaq SA53xx Controllers, SCSI Tape module
- * Copyright 2001 Compaq Computer Corporation
+ * Disk Array driver for HP Smart Array controllers, SCSI Tape module.
+ * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA
+ * 02111-1307, USA.
*
* Questions/Comments/Bugfixes to iss_storagedev@hp.com
*
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
index 5e7e06c..d9c2c58 100644
--- a/drivers/block/cciss_scsi.h
+++ b/drivers/block/cciss_scsi.h
@@ -1,20 +1,20 @@
/*
- * Disk Array driver for Compaq SA53xx Controllers, SCSI Tape module
- * Copyright 2001 Compaq Computer Corporation
+ * Disk Array driver for HP Smart Array controllers, SCSI Tape module.
+ * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more details.
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA
+ * 02111-1307, USA.
*
* Questions/Comments/Bugfixes to iss_storagedev@hp.com
*
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 0e937f6..20070b7 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -41,7 +41,7 @@
*/
static inline int uncached_access(struct file *file, unsigned long addr)
{
-#if defined(__i386__)
+#if defined(__i386__) && !defined(__arch_um__)
/*
* On the PPro and successors, the MTRRs are used to set
* memory types for physical addresses outside main memory,
@@ -57,7 +57,7 @@ static inline int uncached_access(struct file *file, unsigned long addr)
test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
&& addr >= __pa(high_memory);
-#elif defined(__x86_64__)
+#elif defined(__x86_64__) && !defined(__arch_um__)
/*
* This is broken because it can generate memory type aliases,
* which can cause cache corruptions
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 476012b..48c1775 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -1843,6 +1843,7 @@ static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb)
int msglen;
u16 errcode;
u16 datahandle;
+ u32 data;
if (!card) {
printk(KERN_ERR "capidrv: if_sendbuf called with invalid driverId %d!\n",
@@ -1860,9 +1861,26 @@ static int if_sendbuf(int id, int channel, int doack, struct sk_buff *skb)
return 0;
}
datahandle = nccip->datahandle;
+
+ /*
+ * Here we copy pointer skb->data into the 32-bit 'Data' field.
+ * The 'Data' field is not used in practice in linux kernel
+ * (neither in 32 or 64 bit), but should have some value,
+ * since a CAPI message trace will display it.
+ *
+ * The correct value in the 32 bit case is the address of the
+ * data, in 64 bit it makes no sense, we use 0 there.
+ */
+
+#ifdef CONFIG_64BIT
+ data = 0;
+#else
+ data = (unsigned long) skb->data;
+#endif
+
capi_fill_DATA_B3_REQ(&sendcmsg, global.ap.applid, card->msgid++,
nccip->ncci, /* adr */
- (u32) skb->data, /* Data */
+ data, /* Data */
skb->len, /* DataLength */
datahandle, /* DataHandle */
0 /* Flags */
@@ -2123,7 +2141,10 @@ static int capidrv_delcontr(u16 contr)
printk(KERN_ERR "capidrv: delcontr: no contr %u\n", contr);
return -1;
}
- #warning FIXME: maybe a race condition the card should be removed here from global list /kkeil
+
+ /* FIXME: maybe a race condition the card should be removed
+ * here from global list /kkeil
+ */
spin_unlock_irqrestore(&global_lock, flags);
del_timer(&card->listentimer);
diff --git a/drivers/md/raid6algos.c b/drivers/md/raid6algos.c
index 9265761..77a6e4bf 100644
--- a/drivers/md/raid6algos.c
+++ b/drivers/md/raid6algos.c
@@ -52,7 +52,7 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_intx16,
&raid6_intx32,
#endif
-#if defined(__i386__)
+#if defined(__i386__) && !defined(__arch_um__)
&raid6_mmxx1,
&raid6_mmxx2,
&raid6_sse1x1,
@@ -60,7 +60,7 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_sse2x1,
&raid6_sse2x2,
#endif
-#if defined(__x86_64__)
+#if defined(__x86_64__) && !defined(__arch_um__)
&raid6_sse2x1,
&raid6_sse2x2,
&raid6_sse2x4,
diff --git a/drivers/md/raid6mmx.c b/drivers/md/raid6mmx.c
index 6181a5a..d4e4a1b 100644
--- a/drivers/md/raid6mmx.c
+++ b/drivers/md/raid6mmx.c
@@ -16,7 +16,7 @@
* MMX implementation of RAID-6 syndrome functions
*/
-#if defined(__i386__)
+#if defined(__i386__) && !defined(__arch_um__)
#include "raid6.h"
#include "raid6x86.h"
diff --git a/drivers/md/raid6sse1.c b/drivers/md/raid6sse1.c
index f0a1ba8..0666237 100644
--- a/drivers/md/raid6sse1.c
+++ b/drivers/md/raid6sse1.c
@@ -21,7 +21,7 @@
* worthwhile as a separate implementation.
*/
-#if defined(__i386__)
+#if defined(__i386__) && !defined(__arch_um__)
#include "raid6.h"
#include "raid6x86.h"
diff --git a/drivers/md/raid6sse2.c b/drivers/md/raid6sse2.c
index 0f01976..b034ad8 100644
--- a/drivers/md/raid6sse2.c
+++ b/drivers/md/raid6sse2.c
@@ -17,7 +17,7 @@
*
*/
-#if defined(__i386__) || defined(__x86_64__)
+#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
#include "raid6.h"
#include "raid6x86.h"
@@ -161,7 +161,7 @@ const struct raid6_calls raid6_sse2x2 = {
#endif
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(__arch_um__)
/*
* Unrolled-by-4 SSE2 implementation
diff --git a/drivers/md/raid6x86.h b/drivers/md/raid6x86.h
index 9111950..99fea7a 100644
--- a/drivers/md/raid6x86.h
+++ b/drivers/md/raid6x86.h
@@ -19,7 +19,7 @@
#ifndef LINUX_RAID_RAID6X86_H
#define LINUX_RAID_RAID6X86_H
-#if defined(__i386__) || defined(__x86_64__)
+#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
#ifdef __KERNEL__ /* Real code */
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index fc72e1f..f2070a1 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -262,7 +262,7 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
}
/* Convert back to virtual address */
- host->data_ptr = (u16*)sg_virt(sg);
+ host->data_ptr = (u16*)sg_virt(data->sg);
host->data_cnt = 0;
clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 86b8641..867cb73 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -166,13 +166,14 @@ config NET_SB1000
If you don't have this card, of course say N.
config IP1000
- tristate "IP1000 Gigabit Ethernet support"
- depends on PCI && EXPERIMENTAL
- ---help---
- This driver supports IP1000 gigabit Ethernet cards.
+ tristate "IP1000 Gigabit Ethernet support"
+ depends on PCI && EXPERIMENTAL
+ select MII
+ ---help---
+ This driver supports IP1000 gigabit Ethernet cards.
- To compile this driver as a module, choose M here: the module
- will be called ipg. This is recommended.
+ To compile this driver as a module, choose M here: the module
+ will be called ipg. This is recommended.
source "drivers/net/arcnet/Kconfig"
@@ -1880,6 +1881,30 @@ config FEC2
Say Y here if you want to use the second built-in 10/100 Fast
ethernet controller on some Motorola ColdFire processors.
+config FEC_MPC52xx
+ tristate "MPC52xx FEC driver"
+ depends on PPC_MPC52xx
+ select PPC_BESTCOMM
+ select PPC_BESTCOMM_FEC
+ select CRC32
+ select PHYLIB
+ ---help---
+ This option enables support for the MPC5200's on-chip
+ Fast Ethernet Controller
+ If compiled as module, it will be called 'fec_mpc52xx.ko'.
+
+config FEC_MPC52xx_MDIO
+ bool "MPC52xx FEC MDIO bus driver"
+ depends on FEC_MPC52xx
+ default y
+ ---help---
+ The MPC5200's FEC can connect to the Ethernet either with
+ an external MII PHY chip or 10 Mbps 7-wire interface
+ (Motorola? industry standard).
+ If your board uses an external PHY connected to FEC, enable this.
+ If not sure, enable.
+ If compiled as module, it will be called 'fec_mpc52xx_phy.ko'.
+
config NE_H8300
tristate "NE2000 compatible support for H8/300"
depends on H8300
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 5932620..0e5fde4 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -96,6 +96,10 @@ obj-$(CONFIG_SHAPER) += shaper.o
obj-$(CONFIG_HP100) += hp100.o
obj-$(CONFIG_SMC9194) += smc9194.o
obj-$(CONFIG_FEC) += fec.o
+obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
+ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
+ obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
+endif
obj-$(CONFIG_68360_ENET) += 68360enet.o
obj-$(CONFIG_WD80x3) += wd.o 8390.o
obj-$(CONFIG_EL2) += 3c503.o 8390.o
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d2499bb..473f78d 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -122,7 +122,8 @@ struct e1000_buffer {
u16 next_to_watch;
};
/* RX */
- struct page *page;
+ /* arrays of page information for packet split */
+ struct e1000_ps_page *ps_pages;
};
};
@@ -142,8 +143,6 @@ struct e1000_ring {
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
- /* arrays of page information for packet split */
- struct e1000_ps_page *ps_pages;
struct sk_buff *rx_skb_top;
struct e1000_queue_stats stats;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 033e124..4fd2e23 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -245,37 +245,36 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
for (j = 0; j < PS_PAGE_BUFFERS; j++) {
- ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS)
- + j];
- if (j < adapter->rx_ps_pages) {
+ ps_page = &buffer_info->ps_pages[j];
+ if (j >= adapter->rx_ps_pages) {
+ /* all unused desc entries get hw null ptr */
+ rx_desc->read.buffer_addr[j+1] = ~0;
+ continue;
+ }
+ if (!ps_page->page) {
+ ps_page->page = alloc_page(GFP_ATOMIC);
if (!ps_page->page) {
- ps_page->page = alloc_page(GFP_ATOMIC);
- if (!ps_page->page) {
- adapter->alloc_rx_buff_failed++;
- goto no_buffers;
- }
- ps_page->dma = pci_map_page(pdev,
- ps_page->page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(
- ps_page->dma)) {
- dev_err(&adapter->pdev->dev,
- "RX DMA page map failed\n");
- adapter->rx_dma_failed++;
- goto no_buffers;
- }
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ ps_page->dma = pci_map_page(pdev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(ps_page->dma)) {
+ dev_err(&adapter->pdev->dev,
+ "RX DMA page map failed\n");
+ adapter->rx_dma_failed++;
+ goto no_buffers;
}
- /*
- * Refresh the desc even if buffer_addrs
- * didn't change because each write-back
- * erases this info.
- */
- rx_desc->read.buffer_addr[j+1] =
- cpu_to_le64(ps_page->dma);
- } else {
- rx_desc->read.buffer_addr[j+1] = ~0;
}
+ /*
+ * Refresh the desc even if buffer_addrs
+ * didn't change because each write-back
+ * erases this info.
+ */
+ rx_desc->read.buffer_addr[j+1] =
+ cpu_to_le64(ps_page->dma);
}
skb = netdev_alloc_skb(netdev,
@@ -334,94 +333,6 @@ no_buffers:
}
/**
- * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers
- *
- * @adapter: address of board private structure
- * @cleaned_count: number of buffers to allocate this pass
- **/
-static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter,
- int cleaned_count)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_rx_desc *rx_desc;
- struct e1000_buffer *buffer_info;
- struct sk_buff *skb;
- unsigned int i;
- unsigned int bufsz = 256 -
- 16 /*for skb_reserve */ -
- NET_IP_ALIGN;
-
- i = rx_ring->next_to_use;
- buffer_info = &rx_ring->buffer_info[i];
-
- while (cleaned_count--) {
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- goto check_page;
- }
-
- skb = netdev_alloc_skb(netdev, bufsz);
- if (!skb) {
- /* Better luck next round */
- adapter->alloc_rx_buff_failed++;
- break;
- }
-
- /* Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
- skb_reserve(skb, NET_IP_ALIGN);
-
- buffer_info->skb = skb;
-check_page:
- /* allocate a new page if necessary */
- if (!buffer_info->page) {
- buffer_info->page = alloc_page(GFP_ATOMIC);
- if (!buffer_info->page) {
- adapter->alloc_rx_buff_failed++;
- break;
- }
- }
-
- if (!buffer_info->dma)
- buffer_info->dma = pci_map_page(pdev,
- buffer_info->page, 0,
- PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
- dev_err(&adapter->pdev->dev, "RX DMA page map failed\n");
- adapter->rx_dma_failed++;
- break;
- }
-
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
-
- i++;
- if (i == rx_ring->count)
- i = 0;
- buffer_info = &rx_ring->buffer_info[i];
- }
-
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
-}
-
-/**
* e1000_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
*
@@ -495,10 +406,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
goto next_desc;
}
- /* adjust length to remove Ethernet CRC */
- length -= 4;
-
- /* probably a little skewed due to removing CRC */
total_rx_bytes += length;
total_rx_packets++;
@@ -554,15 +461,6 @@ next_desc:
return cleaned;
}
-static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
-{
- bi->page = NULL;
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
-}
-
static void e1000_put_txbuf(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
@@ -699,174 +597,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
/**
- * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy
- * @adapter: board private structure
- *
- * the return value indicates whether actual cleaning was done, there
- * is no guarantee that everything was cleaned
- **/
-static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter,
- int *work_done, int work_to_do)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_rx_desc *rx_desc, *next_rxd;
- struct e1000_buffer *buffer_info, *next_buffer;
- u32 length;
- unsigned int i;
- int cleaned_count = 0;
- bool cleaned = 0;
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-
- i = rx_ring->next_to_clean;
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- buffer_info = &rx_ring->buffer_info[i];
-
- while (rx_desc->status & E1000_RXD_STAT_DD) {
- struct sk_buff *skb;
- u8 status;
-
- if (*work_done >= work_to_do)
- break;
- (*work_done)++;
-
- status = rx_desc->status;
- skb = buffer_info->skb;
- buffer_info->skb = NULL;
-
- i++;
- if (i == rx_ring->count)
- i = 0;
- next_rxd = E1000_RX_DESC(*rx_ring, i);
- prefetch(next_rxd);
-
- next_buffer = &rx_ring->buffer_info[i];
-
- cleaned = 1;
- cleaned_count++;
- pci_unmap_page(pdev,
- buffer_info->dma,
- PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- buffer_info->dma = 0;
-
- length = le16_to_cpu(rx_desc->length);
-
- /* errors is only valid for DD + EOP descriptors */
- if ((status & E1000_RXD_STAT_EOP) &&
- (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
- /* recycle both page and skb */
- buffer_info->skb = skb;
- /* an error means any chain goes out the window too */
- if (rx_ring->rx_skb_top)
- dev_kfree_skb(rx_ring->rx_skb_top);
- rx_ring->rx_skb_top = NULL;
- goto next_desc;
- }
-
-#define rxtop rx_ring->rx_skb_top
- if (!(status & E1000_RXD_STAT_EOP)) {
- /* this descriptor is only the beginning (or middle) */
- if (!rxtop) {
- /* this is the beginning of a chain */
- rxtop = skb;
- skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
- } else {
- /* this is the middle of a chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0,
- length);
- /* re-use the skb, only consumed the page */
- buffer_info->skb = skb;
- }
- e1000_consume_page(buffer_info, rxtop, length);
- goto next_desc;
- } else {
- if (rxtop) {
- /* end of the chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
- /* re-use the current skb, we only consumed the
- * page */
- buffer_info->skb = skb;
- skb = rxtop;
- rxtop = NULL;
- e1000_consume_page(buffer_info, skb, length);
- } else {
- /* no chain, got EOP, this buf is the packet
- * copybreak to save the put_page/alloc_page */
- if (length <= copybreak &&
- skb_tailroom(skb) >= length) {
- u8 *vaddr;
- vaddr = kmap_atomic(buffer_info->page,
- KM_SKB_DATA_SOFTIRQ);
- memcpy(skb_tail_pointer(skb),
- vaddr, length);
- kunmap_atomic(vaddr,
- KM_SKB_DATA_SOFTIRQ);
- /* re-use the page, so don't erase
- * buffer_info->page */
- skb_put(skb, length);
- } else {
- skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
- e1000_consume_page(buffer_info, skb,
- length);
- }
- }
- }
-
- /* Receive Checksum Offload XXX recompute due to CRC strip? */
- e1000_rx_checksum(adapter,
- (u32)(status) |
- ((u32)(rx_desc->errors) << 24),
- le16_to_cpu(rx_desc->csum), skb);
-
- pskb_trim(skb, skb->len - 4);
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
-
- /* eth type trans needs skb->data to point to something */
- if (!pskb_may_pull(skb, ETH_HLEN)) {
- ndev_err(netdev, "__pskb_pull_tail failed.\n");
- dev_kfree_skb(skb);
- goto next_desc;
- }
-
- e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
-
-next_desc:
- rx_desc->status = 0;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
- adapter->alloc_rx_buf(adapter, cleaned_count);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- buffer_info = next_buffer;
- }
- rx_ring->next_to_clean = i;
-
- cleaned_count = e1000_desc_unused(rx_ring);
- if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count);
-
- adapter->total_rx_packets += total_rx_packets;
- adapter->total_rx_bytes += total_rx_bytes;
- return cleaned;
-}
-
-/**
* e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
* @adapter: board private structure
*
@@ -953,7 +683,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
((length + l1) <= adapter->rx_ps_bsize0)) {
u8 *vaddr;
- ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS];
+ ps_page = &buffer_info->ps_pages[0];
/* there is no documentation about how to call
* kmap_atomic, so we can't hold the mapping
@@ -965,8 +695,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
pci_dma_sync_single_for_device(pdev, ps_page->dma,
PAGE_SIZE, PCI_DMA_FROMDEVICE);
- /* remove the CRC */
- l1 -= 4;
+
skb_put(skb, l1);
goto copydone;
} /* if */
@@ -977,7 +706,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
if (!length)
break;
- ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j];
+ ps_page = &buffer_info->ps_pages[j];
pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
ps_page->dma = 0;
@@ -988,10 +717,6 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
skb->truesize += length;
}
- /* strip the ethernet crc, problem is we're using pages now so
- * this whole operation can get a little cpu intensive */
- pskb_trim(skb, skb->len - 4);
-
copydone:
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -1043,7 +768,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
struct e1000_buffer *buffer_info;
struct e1000_ps_page *ps_page;
struct pci_dev *pdev = adapter->pdev;
- unsigned long size;
unsigned int i, j;
/* Free all the Rx ring sk_buffs */
@@ -1054,9 +778,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
- else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo)
- pci_unmap_page(pdev, buffer_info->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_ps_bsize0,
@@ -1064,19 +785,13 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
buffer_info->dma = 0;
}
- if (buffer_info->page) {
- put_page(buffer_info->page);
- buffer_info->page = NULL;
- }
-
if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
for (j = 0; j < PS_PAGE_BUFFERS; j++) {
- ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS)
- + j];
+ ps_page = &buffer_info->ps_pages[j];
if (!ps_page->page)
break;
pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
@@ -1093,12 +808,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
rx_ring->rx_skb_top = NULL;
}
- size = sizeof(struct e1000_buffer) * rx_ring->count;
- memset(rx_ring->buffer_info, 0, size);
- size = sizeof(struct e1000_ps_page)
- * (rx_ring->count * PS_PAGE_BUFFERS);
- memset(rx_ring->ps_pages, 0, size);
-
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
@@ -1421,7 +1130,8 @@ err:
int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
{
struct e1000_ring *rx_ring = adapter->rx_ring;
- int size, desc_len, err = -ENOMEM;
+ struct e1000_buffer *buffer_info;
+ int i, size, desc_len, err = -ENOMEM;
size = sizeof(struct e1000_buffer) * rx_ring->count;
rx_ring->buffer_info = vmalloc(size);
@@ -1429,11 +1139,14 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
goto err;
memset(rx_ring->buffer_info, 0, size);
- rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS,
- sizeof(struct e1000_ps_page),
- GFP_KERNEL);
- if (!rx_ring->ps_pages)
- goto err;
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
+ sizeof(struct e1000_ps_page),
+ GFP_KERNEL);
+ if (!buffer_info->ps_pages)
+ goto err_pages;
+ }
desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1443,16 +1156,21 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
err = e1000_alloc_ring_dma(adapter, rx_ring);
if (err)
- goto err;
+ goto err_pages;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->rx_skb_top = NULL;
return 0;
+
+err_pages:
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ kfree(buffer_info->ps_pages);
+ }
err:
vfree(rx_ring->buffer_info);
- kfree(rx_ring->ps_pages);
ndev_err(adapter->netdev,
"Unable to allocate memory for the transmit descriptor ring\n");
return err;
@@ -1518,15 +1236,17 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct e1000_ring *rx_ring = adapter->rx_ring;
+ int i;
e1000_clean_rx_ring(adapter);
+ for (i = 0; i < rx_ring->count; i++) {
+ kfree(rx_ring->buffer_info[i].ps_pages);
+ }
+
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- kfree(rx_ring->ps_pages);
- rx_ring->ps_pages = NULL;
-
dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
rx_ring->dma);
rx_ring->desc = NULL;
@@ -2032,9 +1752,11 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
ew32(RFCTL, rfctl);
- /* disable the stripping of CRC because it breaks
- * BMC firmware connected over SMBUS */
- rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */;
+ /* Enable Packet split descriptors */
+ rctl |= E1000_RCTL_DTYP_PS;
+
+ /* Enable hardware CRC frame stripping */
+ rctl |= E1000_RCTL_SECRC;
psrctl |= adapter->rx_ps_bsize0 >>
E1000_PSRCTL_BSIZE0_SHIFT;
@@ -2077,11 +1799,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
sizeof(union e1000_rx_desc_packet_split);
adapter->clean_rx = e1000_clean_rx_irq_ps;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
- } else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) {
- rdlen = rx_ring->count *
- sizeof(struct e1000_rx_desc);
- adapter->clean_rx = e1000_clean_rx_irq_jumbo;
- adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo;
} else {
rdlen = rx_ring->count *
sizeof(struct e1000_rx_desc);
@@ -2326,8 +2043,11 @@ void e1000e_reset(struct e1000_adapter *adapter)
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_hw *hw = &adapter->hw;
u32 tx_space, min_tx_space, min_rx_space;
+ u32 pba;
u16 hwm;
+ ew32(PBA, adapter->pba);
+
if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) {
/* To maintain wire speed transmits, the Tx FIFO should be
* large enough to accommodate two full transmit packets,
@@ -2335,11 +2055,11 @@ void e1000e_reset(struct e1000_adapter *adapter)
* the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
* expressed in KB. */
- adapter->pba = er32(PBA);
+ pba = er32(PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
- tx_space = adapter->pba >> 16;
+ tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
- adapter->pba &= 0xffff;
+ pba &= 0xffff;
/* the tx fifo also stores 16 bytes of information about the tx
* but don't include ethernet FCS because hardware appends it */
min_tx_space = (mac->max_frame_size +
@@ -2355,20 +2075,21 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
* allocation, take space away from current Rx allocation */
- if (tx_space < min_tx_space &&
- ((min_tx_space - tx_space) < adapter->pba)) {
- adapter->pba -= - (min_tx_space - tx_space);
+ if ((tx_space < min_tx_space) &&
+ ((min_tx_space - tx_space) < pba)) {
+ pba -= min_tx_space - tx_space;
/* if short on rx space, rx wins and must trump tx
* adjustment or use Early Receive if available */
- if ((adapter->pba < min_rx_space) &&
+ if ((pba < min_rx_space) &&
(!(adapter->flags & FLAG_HAS_ERT)))
/* ERT enabled in e1000_configure_rx */
- adapter->pba = min_rx_space;
+ pba = min_rx_space;
}
+
+ ew32(PBA, pba);
}
- ew32(PBA, adapter->pba);
/* flow control settings */
/* The high water mark must be low enough to fit one full frame
@@ -3624,9 +3345,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
- * i.e. RXBUFFER_2048 --> size-4096 slab
- * however with the new *_jumbo* routines, jumbo receives will use
- * fragmented skbs */
+ * i.e. RXBUFFER_2048 --> size-4096 slab */
if (max_frame <= 256)
adapter->rx_buffer_len = 256;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 4b4b74e..f78e5bf 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0079"
+#define DRV_VERSION "EHEA_0080"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 0a7e789..f0319f1 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -33,6 +33,9 @@
#include <linux/if.h>
#include <linux/list.h>
#include <linux/if_ether.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+
#include <net/ip.h>
#include "ehea.h"
@@ -3295,6 +3298,20 @@ static int __devexit ehea_remove(struct of_device *dev)
return 0;
}
+static int ehea_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ if (action == SYS_RESTART) {
+ ehea_info("Reboot: freeing all eHEA resources");
+ ibmebus_unregister_driver(&ehea_driver);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+ .notifier_call = ehea_reboot_notifier,
+};
+
static int check_module_parm(void)
{
int ret = 0;
@@ -3351,6 +3368,8 @@ int __init ehea_module_init(void)
if (ret)
goto out;
+ register_reboot_notifier(&ehea_reboot_nb);
+
ret = ibmebus_register_driver(&ehea_driver);
if (ret) {
ehea_error("failed registering eHEA device driver on ebus");
@@ -3362,6 +3381,7 @@ int __init ehea_module_init(void)
if (ret) {
ehea_error("failed to register capabilities attribute, ret=%d",
ret);
+ unregister_reboot_notifier(&ehea_reboot_nb);
ibmebus_unregister_driver(&ehea_driver);
goto out;
}
@@ -3375,6 +3395,7 @@ static void __exit ehea_module_exit(void)
flush_scheduled_work();
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
+ unregister_reboot_notifier(&ehea_reboot_nb);
ehea_destroy_busmap();
}
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
new file mode 100644
index 0000000..fc1cf0b
--- /dev/null
+++ b/drivers/net/fec_mpc52xx.c
@@ -0,0 +1,1112 @@
+/*
+ * Driver for the MPC5200 Fast Ethernet Controller
+ *
+ * Originally written by Dale Farnsworth <dfarnsworth@mvista.com> and
+ * now maintained by Sylvain Munaut <tnt@246tNt.com>
+ *
+ * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/hardirq.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/mpc52xx.h>
+
+#include <sysdev/bestcomm/bestcomm.h>
+#include <sysdev/bestcomm/fec.h>
+
+#include "fec_mpc52xx.h"
+
+#define DRIVER_NAME "mpc52xx-fec"
+
+static irqreturn_t mpc52xx_fec_interrupt(int, void *);
+static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
+static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
+static void mpc52xx_fec_stop(struct net_device *dev);
+static void mpc52xx_fec_start(struct net_device *dev);
+static void mpc52xx_fec_reset(struct net_device *dev);
+
+static u8 mpc52xx_fec_mac_addr[6];
+module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0);
+MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe");
+
+#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFDOWN )
+static int debug = -1; /* the above default */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "debugging messages level");
+
+static void mpc52xx_fec_tx_timeout(struct net_device *dev)
+{
+ dev_warn(&dev->dev, "transmit timed out\n");
+
+ mpc52xx_fec_reset(dev);
+
+ dev->stats.tx_errors++;
+
+ netif_wake_queue(dev);
+}
+
+static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+}
+
+static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ *(u32 *)(&mac[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16;
+}
+
+static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sock = addr;
+
+ memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
+
+ mpc52xx_fec_set_paddr(dev, sock->sa_data);
+ return 0;
+}
+
+static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task *s)
+{
+ while (!bcom_queue_empty(s)) {
+ struct bcom_fec_bd *bd;
+ struct sk_buff *skb;
+
+ skb = bcom_retrieve_buffer(s, NULL, (struct bcom_bd **)&bd);
+ dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE);
+ kfree_skb(skb);
+ }
+}
+
+static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk)
+{
+ while (!bcom_queue_full(rxtsk)) {
+ struct sk_buff *skb;
+ struct bcom_fec_bd *bd;
+
+ skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
+ if (skb == NULL)
+ return -EAGAIN;
+
+ /* zero out the initial receive buffers to aid debugging */
+ memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
+
+ bd = (struct bcom_fec_bd *)bcom_prepare_next_buffer(rxtsk);
+
+ bd->status = FEC_RX_BUFFER_SIZE;
+ bd->skb_pa = dma_map_single(&dev->dev, skb->data,
+ FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+ bcom_submit_next_buffer(rxtsk, skb);
+ }
+
+ return 0;
+}
+
+/* based on generic_adjust_link from fs_enet-main.c */
+static void mpc52xx_fec_adjust_link(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+ if (phydev->link != PHY_DOWN) {
+ if (phydev->duplex != priv->duplex) {
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rcntrl;
+ u32 tcntrl;
+
+ new_state = 1;
+ priv->duplex = phydev->duplex;
+
+ rcntrl = in_be32(&fec->r_cntrl);
+ tcntrl = in_be32(&fec->x_cntrl);
+
+ rcntrl &= ~FEC_RCNTRL_DRT;
+ tcntrl &= ~FEC_TCNTRL_FDEN;
+ if (phydev->duplex == DUPLEX_FULL)
+ tcntrl |= FEC_TCNTRL_FDEN; /* FD enable */
+ else
+ rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
+
+ out_be32(&fec->r_cntrl, rcntrl);
+ out_be32(&fec->x_cntrl, tcntrl);
+ }
+
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ priv->speed = phydev->speed;
+ }
+
+ if (priv->link == PHY_DOWN) {
+ new_state = 1;
+ priv->link = phydev->link;
+ netif_schedule(dev);
+ netif_carrier_on(dev);
+ netif_start_queue(dev);
+ }
+
+ } else if (priv->link) {
+ new_state = 1;
+ priv->link = PHY_DOWN;
+ priv->speed = 0;
+ priv->duplex = -1;
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
+static int mpc52xx_fec_init_phy(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ char phy_id[BUS_ID_SIZE];
+
+ snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
+ (unsigned int)dev->base_addr, priv->phy_addr);
+
+ priv->link = PHY_DOWN;
+ priv->speed = 0;
+ priv->duplex = -1;
+
+ phydev = phy_connect(dev, phy_id, &mpc52xx_fec_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phydev)) {
+ dev_err(&dev->dev, "phy_connect failed\n");
+ return PTR_ERR(phydev);
+ }
+ dev_info(&dev->dev, "attached phy %i to driver %s\n",
+ phydev->addr, phydev->drv->name);
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+static int mpc52xx_fec_phy_start(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (!priv->has_phy)
+ return 0;
+
+ err = mpc52xx_fec_init_phy(dev);
+ if (err) {
+ dev_err(&dev->dev, "mpc52xx_fec_init_phy failed\n");
+ return err;
+ }
+
+ /* reset phy - this also wakes it from PDOWN */
+ phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
+ phy_start(priv->phydev);
+
+ return 0;
+}
+
+static void mpc52xx_fec_phy_stop(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ if (!priv->has_phy)
+ return;
+
+ phy_disconnect(priv->phydev);
+ /* power down phy */
+ phy_stop(priv->phydev);
+ phy_write(priv->phydev, MII_BMCR, BMCR_PDOWN);
+}
+
+static int mpc52xx_fec_phy_mii_ioctl(struct mpc52xx_fec_priv *priv,
+ struct mii_ioctl_data *mii_data, int cmd)
+{
+ if (!priv->has_phy)
+ return -ENOTSUPP;
+
+ return phy_mii_ioctl(priv->phydev, mii_data, cmd);
+}
+
+static void mpc52xx_fec_phy_hw_init(struct mpc52xx_fec_priv *priv)
+{
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ if (!priv->has_phy)
+ return;
+
+ out_be32(&fec->mii_speed, priv->phy_speed);
+}
+
+static int mpc52xx_fec_open(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ int err = -EBUSY;
+
+ if (request_irq(dev->irq, &mpc52xx_fec_interrupt, IRQF_SHARED,
+ DRIVER_NAME "_ctrl", dev)) {
+ dev_err(&dev->dev, "ctrl interrupt request failed\n");
+ goto out;
+ }
+ if (request_irq(priv->r_irq, &mpc52xx_fec_rx_interrupt, 0,
+ DRIVER_NAME "_rx", dev)) {
+ dev_err(&dev->dev, "rx interrupt request failed\n");
+ goto free_ctrl_irq;
+ }
+ if (request_irq(priv->t_irq, &mpc52xx_fec_tx_interrupt, 0,
+ DRIVER_NAME "_tx", dev)) {
+ dev_err(&dev->dev, "tx interrupt request failed\n");
+ goto free_2irqs;
+ }
+
+ bcom_fec_rx_reset(priv->rx_dmatsk);
+ bcom_fec_tx_reset(priv->tx_dmatsk);
+
+ err = mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
+ if (err) {
+ dev_err(&dev->dev, "mpc52xx_fec_alloc_rx_buffers failed\n");
+ goto free_irqs;
+ }
+
+ err = mpc52xx_fec_phy_start(dev);
+ if (err)
+ goto free_skbs;
+
+ bcom_enable(priv->rx_dmatsk);
+ bcom_enable(priv->tx_dmatsk);
+
+ mpc52xx_fec_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+ free_skbs:
+ mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
+
+ free_irqs:
+ free_irq(priv->t_irq, dev);
+ free_2irqs:
+ free_irq(priv->r_irq, dev);
+ free_ctrl_irq:
+ free_irq(dev->irq, dev);
+ out:
+
+ return err;
+}
+
+static int mpc52xx_fec_close(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ mpc52xx_fec_stop(dev);
+
+ mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
+
+ free_irq(dev->irq, dev);
+ free_irq(priv->r_irq, dev);
+ free_irq(priv->t_irq, dev);
+
+ mpc52xx_fec_phy_stop(dev);
+
+ return 0;
+}
+
+/* This will only be invoked if your driver is _not_ in XOFF state.
+ * What this means is that you need not check it, and that this
+ * invariant will hold if you make sure that the netif_*_queue()
+ * calls are done at the proper times.
+ */
+static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct bcom_fec_bd *bd;
+
+ if (bcom_queue_full(priv->tx_dmatsk)) {
+ if (net_ratelimit())
+ dev_err(&dev->dev, "transmit queue overrun\n");
+ return 1;
+ }
+
+ spin_lock_irq(&priv->lock);
+ dev->trans_start = jiffies;
+
+ bd = (struct bcom_fec_bd *)
+ bcom_prepare_next_buffer(priv->tx_dmatsk);
+
+ bd->status = skb->len | BCOM_FEC_TX_BD_TFD | BCOM_FEC_TX_BD_TC;
+ bd->skb_pa = dma_map_single(&dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+
+ bcom_submit_next_buffer(priv->tx_dmatsk, skb);
+
+ if (bcom_queue_full(priv->tx_dmatsk)) {
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return 0;
+}
+
+/* This handles BestComm transmit task interrupts
+ */
+static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+
+ while (bcom_buffer_done(priv->tx_dmatsk)) {
+ struct sk_buff *skb;
+ struct bcom_fec_bd *bd;
+ skb = bcom_retrieve_buffer(priv->tx_dmatsk, NULL,
+ (struct bcom_bd **)&bd);
+ dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_TO_DEVICE);
+
+ dev_kfree_skb_irq(skb);
+ }
+
+ netif_wake_queue(dev);
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ while (bcom_buffer_done(priv->rx_dmatsk)) {
+ struct sk_buff *skb;
+ struct sk_buff *rskb;
+ struct bcom_fec_bd *bd;
+ u32 status;
+
+ rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
+ (struct bcom_bd **)&bd);
+ dma_unmap_single(&dev->dev, bd->skb_pa, skb->len, DMA_FROM_DEVICE);
+
+ /* Test for errors in received frame */
+ if (status & BCOM_FEC_RX_BD_ERRORS) {
+ /* Drop packet and reuse the buffer */
+ bd = (struct bcom_fec_bd *)
+ bcom_prepare_next_buffer(priv->rx_dmatsk);
+
+ bd->status = FEC_RX_BUFFER_SIZE;
+ bd->skb_pa = dma_map_single(&dev->dev, rskb->data,
+ FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+ bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
+
+ dev->stats.rx_dropped++;
+
+ continue;
+ }
+
+ /* skbs are allocated on open, so now we allocate a new one,
+ * and remove the old (with the packet) */
+ skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
+ if (skb) {
+ /* Process the received skb */
+ int length = status & BCOM_FEC_RX_BD_LEN_MASK;
+
+ skb_put(rskb, length - 4); /* length without CRC32 */
+
+ rskb->dev = dev;
+ rskb->protocol = eth_type_trans(rskb, dev);
+
+ netif_rx(rskb);
+ dev->last_rx = jiffies;
+ } else {
+ /* Can't get a new one : reuse the same & drop pkt */
+ dev_notice(&dev->dev, "Memory squeeze, dropping packet.\n");
+ dev->stats.rx_dropped++;
+
+ skb = rskb;
+ }
+
+ bd = (struct bcom_fec_bd *)
+ bcom_prepare_next_buffer(priv->rx_dmatsk);
+
+ bd->status = FEC_RX_BUFFER_SIZE;
+ bd->skb_pa = dma_map_single(&dev->dev, rskb->data,
+ FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
+
+ bcom_submit_next_buffer(priv->rx_dmatsk, skb);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 ievent;
+
+ ievent = in_be32(&fec->ievent);
+
+ ievent &= ~FEC_IEVENT_MII; /* mii is handled separately */
+ if (!ievent)
+ return IRQ_NONE;
+
+ out_be32(&fec->ievent, ievent); /* clear pending events */
+
+ if (ievent & ~(FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
+ if (ievent & ~FEC_IEVENT_TFINT)
+ dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
+ return IRQ_HANDLED;
+ }
+
+ if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
+ dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
+ if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
+ dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
+
+ mpc52xx_fec_reset(dev);
+
+ netif_wake_queue(dev);
+ return IRQ_HANDLED;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *mpc52xx_fec_get_stats(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ stats->rx_bytes = in_be32(&fec->rmon_r_octets);
+ stats->rx_packets = in_be32(&fec->rmon_r_packets);
+ stats->rx_errors = in_be32(&fec->rmon_r_crc_align) +
+ in_be32(&fec->rmon_r_undersize) +
+ in_be32(&fec->rmon_r_oversize) +
+ in_be32(&fec->rmon_r_frag) +
+ in_be32(&fec->rmon_r_jab);
+
+ stats->tx_bytes = in_be32(&fec->rmon_t_octets);
+ stats->tx_packets = in_be32(&fec->rmon_t_packets);
+ stats->tx_errors = in_be32(&fec->rmon_t_crc_align) +
+ in_be32(&fec->rmon_t_undersize) +
+ in_be32(&fec->rmon_t_oversize) +
+ in_be32(&fec->rmon_t_frag) +
+ in_be32(&fec->rmon_t_jab);
+
+ stats->multicast = in_be32(&fec->rmon_r_mc_pkt);
+ stats->collisions = in_be32(&fec->rmon_t_col);
+
+ /* detailed rx_errors: */
+ stats->rx_length_errors = in_be32(&fec->rmon_r_undersize)
+ + in_be32(&fec->rmon_r_oversize)
+ + in_be32(&fec->rmon_r_frag)
+ + in_be32(&fec->rmon_r_jab);
+ stats->rx_over_errors = in_be32(&fec->r_macerr);
+ stats->rx_crc_errors = in_be32(&fec->ieee_r_crc);
+ stats->rx_frame_errors = in_be32(&fec->ieee_r_align);
+ stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop);
+ stats->rx_missed_errors = in_be32(&fec->rmon_r_drop);
+
+ /* detailed tx_errors: */
+ stats->tx_aborted_errors = 0;
+ stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr);
+ stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop);
+ stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe);
+ stats->tx_window_errors = in_be32(&fec->ieee_t_lcol);
+
+ return stats;
+}
+
+/*
+ * Read MIB counters in order to reset them,
+ * then zero all the stats fields in memory
+ */
+static void mpc52xx_fec_reset_stats(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ out_be32(&fec->mib_control, FEC_MIB_DISABLE);
+ memset_io(&fec->rmon_t_drop, 0, (__force u32)&fec->reserved10 -
+ (__force u32)&fec->rmon_t_drop);
+ out_be32(&fec->mib_control, 0);
+
+ memset(&dev->stats, 0, sizeof(dev->stats));
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rx_control;
+
+ rx_control = in_be32(&fec->r_cntrl);
+
+ if (dev->flags & IFF_PROMISC) {
+ rx_control |= FEC_RCNTRL_PROM;
+ out_be32(&fec->r_cntrl, rx_control);
+ } else {
+ rx_control &= ~FEC_RCNTRL_PROM;
+ out_be32(&fec->r_cntrl, rx_control);
+
+ if (dev->flags & IFF_ALLMULTI) {
+ out_be32(&fec->gaddr1, 0xffffffff);
+ out_be32(&fec->gaddr2, 0xffffffff);
+ } else {
+ u32 crc;
+ int i;
+ struct dev_mc_list *dmi;
+ u32 gaddr1 = 0x00000000;
+ u32 gaddr2 = 0x00000000;
+
+ dmi = dev->mc_list;
+ for (i=0; i<dev->mc_count; i++) {
+ crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
+ if (crc >= 32)
+ gaddr1 |= 1 << (crc-32);
+ else
+ gaddr2 |= 1 << crc;
+ dmi = dmi->next;
+ }
+ out_be32(&fec->gaddr1, gaddr1);
+ out_be32(&fec->gaddr2, gaddr2);
+ }
+ }
+}
+
+/**
+ * mpc52xx_fec_hw_init
+ * @dev: network device
+ *
+ * Setup various hardware setting, only needed once on start
+ */
+static void mpc52xx_fec_hw_init(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ int i;
+
+ /* Whack a reset. We should wait for this. */
+ out_be32(&fec->ecntrl, FEC_ECNTRL_RESET);
+ for (i = 0; i < FEC_RESET_DELAY; ++i) {
+ if ((in_be32(&fec->ecntrl) & FEC_ECNTRL_RESET) == 0)
+ break;
+ udelay(1);
+ }
+ if (i == FEC_RESET_DELAY)
+ dev_err(&dev->dev, "FEC Reset timeout!\n");
+
+ /* set pause to 0x20 frames */
+ out_be32(&fec->op_pause, FEC_OP_PAUSE_OPCODE | 0x20);
+
+ /* high service request will be deasserted when there's < 7 bytes in fifo
+ * low service request will be deasserted when there's < 4*7 bytes in fifo
+ */
+ out_be32(&fec->rfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
+ out_be32(&fec->tfifo_cntrl, FEC_FIFO_CNTRL_FRAME | FEC_FIFO_CNTRL_LTG_7);
+
+ /* alarm when <= x bytes in FIFO */
+ out_be32(&fec->rfifo_alarm, 0x0000030c);
+ out_be32(&fec->tfifo_alarm, 0x00000100);
+
+ /* begin transmittion when 256 bytes are in FIFO (or EOF or FIFO full) */
+ out_be32(&fec->x_wmrk, FEC_FIFO_WMRK_256B);
+
+ /* enable crc generation */
+ out_be32(&fec->xmit_fsm, FEC_XMIT_FSM_APPEND_CRC | FEC_XMIT_FSM_ENABLE_CRC);
+ out_be32(&fec->iaddr1, 0x00000000); /* No individual filter */
+ out_be32(&fec->iaddr2, 0x00000000); /* No individual filter */
+
+ /* set phy speed.
+ * this can't be done in phy driver, since it needs to be called
+ * before fec stuff (even on resume) */
+ mpc52xx_fec_phy_hw_init(priv);
+}
+
+/**
+ * mpc52xx_fec_start
+ * @dev: network device
+ *
+ * This function is called to start or restart the FEC during a link
+ * change. This happens on fifo errors or when switching between half
+ * and full duplex.
+ */
+static void mpc52xx_fec_start(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ u32 rcntrl;
+ u32 tcntrl;
+ u32 tmp;
+
+ /* clear sticky error bits */
+ tmp = FEC_FIFO_STATUS_ERR | FEC_FIFO_STATUS_UF | FEC_FIFO_STATUS_OF;
+ out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & tmp);
+ out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & tmp);
+
+ /* FIFOs will reset on mpc52xx_fec_enable */
+ out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_ENABLE_IS_RESET);
+
+ /* Set station address. */
+ mpc52xx_fec_set_paddr(dev, dev->dev_addr);
+
+ mpc52xx_fec_set_multicast_list(dev);
+
+ /* set max frame len, enable flow control, select mii mode */
+ rcntrl = FEC_RX_BUFFER_SIZE << 16; /* max frame length */
+ rcntrl |= FEC_RCNTRL_FCE;
+
+ if (priv->has_phy)
+ rcntrl |= FEC_RCNTRL_MII_MODE;
+
+ if (priv->duplex == DUPLEX_FULL)
+ tcntrl = FEC_TCNTRL_FDEN; /* FD enable */
+ else {
+ rcntrl |= FEC_RCNTRL_DRT; /* disable Rx on Tx (HD) */
+ tcntrl = 0;
+ }
+ out_be32(&fec->r_cntrl, rcntrl);
+ out_be32(&fec->x_cntrl, tcntrl);
+
+ /* Clear any outstanding interrupt. */
+ out_be32(&fec->ievent, 0xffffffff);
+
+ /* Enable interrupts we wish to service. */
+ out_be32(&fec->imask, FEC_IMASK_ENABLE);
+
+ /* And last, enable the transmit and receive processing. */
+ out_be32(&fec->ecntrl, FEC_ECNTRL_ETHER_EN);
+ out_be32(&fec->r_des_active, 0x01000000);
+}
+
+/**
+ * mpc52xx_fec_stop
+ * @dev: network device
+ *
+ * stop all activity on fec and empty dma buffers
+ */
+static void mpc52xx_fec_stop(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+ unsigned long timeout;
+
+ /* disable all interrupts */
+ out_be32(&fec->imask, 0);
+
+ /* Disable the rx task. */
+ bcom_disable(priv->rx_dmatsk);
+
+ /* Wait for tx queue to drain, but only if we're in process context */
+ if (!in_interrupt()) {
+ timeout = jiffies + msecs_to_jiffies(2000);
+ while (time_before(jiffies, timeout) &&
+ !bcom_queue_empty(priv->tx_dmatsk))
+ msleep(100);
+
+ if (time_after_eq(jiffies, timeout))
+ dev_err(&dev->dev, "queues didn't drain\n");
+#if 1
+ if (time_after_eq(jiffies, timeout)) {
+ dev_err(&dev->dev, " tx: index: %i, outdex: %i\n",
+ priv->tx_dmatsk->index,
+ priv->tx_dmatsk->outdex);
+ dev_err(&dev->dev, " rx: index: %i, outdex: %i\n",
+ priv->rx_dmatsk->index,
+ priv->rx_dmatsk->outdex);
+ }
+#endif
+ }
+
+ bcom_disable(priv->tx_dmatsk);
+
+ /* Stop FEC */
+ out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~FEC_ECNTRL_ETHER_EN);
+
+ return;
+}
+
+/* reset fec and bestcomm tasks */
+static void mpc52xx_fec_reset(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ mpc52xx_fec_stop(dev);
+
+ out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status));
+ out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO);
+
+ mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
+
+ mpc52xx_fec_hw_init(dev);
+
+ phy_stop(priv->phydev);
+ phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
+ phy_start(priv->phydev);
+
+ bcom_fec_rx_reset(priv->rx_dmatsk);
+ bcom_fec_tx_reset(priv->tx_dmatsk);
+
+ mpc52xx_fec_alloc_rx_buffers(dev, priv->rx_dmatsk);
+
+ bcom_enable(priv->rx_dmatsk);
+ bcom_enable(priv->tx_dmatsk);
+
+ mpc52xx_fec_start(dev);
+}
+
+
+/* ethtool interface */
+static void mpc52xx_fec_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, DRIVER_NAME);
+}
+
+static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
+ .get_drvinfo = mpc52xx_fec_get_drvinfo,
+ .get_settings = mpc52xx_fec_get_settings,
+ .set_settings = mpc52xx_fec_set_settings,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = mpc52xx_fec_get_msglevel,
+ .set_msglevel = mpc52xx_fec_set_msglevel,
+};
+
+
+static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ return mpc52xx_fec_phy_mii_ioctl(priv, if_mii(rq), cmd);
+}
+
+/* ======================================================================== */
+/* OF Driver */
+/* ======================================================================== */
+
+static int __devinit
+mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
+{
+ int rv;
+ struct net_device *ndev;
+ struct mpc52xx_fec_priv *priv = NULL;
+ struct resource mem;
+ const phandle *ph;
+
+ phys_addr_t rx_fifo;
+ phys_addr_t tx_fifo;
+
+ /* Get the ether ndev & it's private zone */
+ ndev = alloc_etherdev(sizeof(struct mpc52xx_fec_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+
+ /* Reserve FEC control zone */
+ rv = of_address_to_resource(op->node, 0, &mem);
+ if (rv) {
+ printk(KERN_ERR DRIVER_NAME ": "
+ "Error while parsing device node resource\n" );
+ return rv;
+ }
+ if ((mem.end - mem.start + 1) != sizeof(struct mpc52xx_fec)) {
+ printk(KERN_ERR DRIVER_NAME
+ " - invalid resource size (%lx != %x), check mpc52xx_devices.c\n",
+ (unsigned long)(mem.end - mem.start + 1), sizeof(struct mpc52xx_fec));
+ return -EINVAL;
+ }
+
+ if (!request_mem_region(mem.start, sizeof(struct mpc52xx_fec), DRIVER_NAME))
+ return -EBUSY;
+
+ /* Init ether ndev with what we have */
+ ndev->open = mpc52xx_fec_open;
+ ndev->stop = mpc52xx_fec_close;
+ ndev->hard_start_xmit = mpc52xx_fec_hard_start_xmit;
+ ndev->do_ioctl = mpc52xx_fec_ioctl;
+ ndev->ethtool_ops = &mpc52xx_fec_ethtool_ops;
+ ndev->get_stats = mpc52xx_fec_get_stats;
+ ndev->set_mac_address = mpc52xx_fec_set_mac_address;
+ ndev->set_multicast_list = mpc52xx_fec_set_multicast_list;
+ ndev->tx_timeout = mpc52xx_fec_tx_timeout;
+ ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
+ ndev->base_addr = mem.start;
+
+ priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */
+
+ spin_lock_init(&priv->lock);
+
+ /* ioremap the zones */
+ priv->fec = ioremap(mem.start, sizeof(struct mpc52xx_fec));
+
+ if (!priv->fec) {
+ rv = -ENOMEM;
+ goto probe_error;
+ }
+
+ /* Bestcomm init */
+ rx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, rfifo_data);
+ tx_fifo = ndev->base_addr + offsetof(struct mpc52xx_fec, tfifo_data);
+
+ priv->rx_dmatsk = bcom_fec_rx_init(FEC_RX_NUM_BD, rx_fifo, FEC_RX_BUFFER_SIZE);
+ priv->tx_dmatsk = bcom_fec_tx_init(FEC_TX_NUM_BD, tx_fifo);
+
+ if (!priv->rx_dmatsk || !priv->tx_dmatsk) {
+ printk(KERN_ERR DRIVER_NAME ": Can not init SDMA tasks\n" );
+ rv = -ENOMEM;
+ goto probe_error;
+ }
+
+ /* Get the IRQ we need one by one */
+ /* Control */
+ ndev->irq = irq_of_parse_and_map(op->node, 0);
+
+ /* RX */
+ priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
+
+ /* TX */
+ priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
+
+ /* MAC address init */
+ if (!is_zero_ether_addr(mpc52xx_fec_mac_addr))
+ memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6);
+ else
+ mpc52xx_fec_get_paddr(ndev, ndev->dev_addr);
+
+ priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
+ priv->duplex = DUPLEX_FULL;
+
+ /* is the phy present in device tree? */
+ ph = of_get_property(op->node, "phy-handle", NULL);
+ if (ph) {
+ const unsigned int *prop;
+ struct device_node *phy_dn;
+ priv->has_phy = 1;
+
+ phy_dn = of_find_node_by_phandle(*ph);
+ prop = of_get_property(phy_dn, "reg", NULL);
+ priv->phy_addr = *prop;
+
+ of_node_put(phy_dn);
+
+ /* Phy speed */
+ priv->phy_speed = ((mpc52xx_find_ipb_freq(op->node) >> 20) / 5) << 1;
+ } else {
+ dev_info(&ndev->dev, "can't find \"phy-handle\" in device"
+ " tree, using 7-wire mode\n");
+ }
+
+ /* Hardware init */
+ mpc52xx_fec_hw_init(ndev);
+
+ mpc52xx_fec_reset_stats(ndev);
+
+ /* Register the new network device */
+ rv = register_netdev(ndev);
+ if (rv < 0)
+ goto probe_error;
+
+ /* We're done ! */
+ dev_set_drvdata(&op->dev, ndev);
+
+ return 0;
+
+
+ /* Error handling - free everything that might be allocated */
+probe_error:
+
+ irq_dispose_mapping(ndev->irq);
+
+ if (priv->rx_dmatsk)
+ bcom_fec_rx_release(priv->rx_dmatsk);
+ if (priv->tx_dmatsk)
+ bcom_fec_tx_release(priv->tx_dmatsk);
+
+ if (priv->fec)
+ iounmap(priv->fec);
+
+ release_mem_region(mem.start, sizeof(struct mpc52xx_fec));
+
+ free_netdev(ndev);
+
+ return rv;
+}
+
+static int
+mpc52xx_fec_remove(struct of_device *op)
+{
+ struct net_device *ndev;
+ struct mpc52xx_fec_priv *priv;
+
+ ndev = dev_get_drvdata(&op->dev);
+ priv = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ irq_dispose_mapping(ndev->irq);
+
+ bcom_fec_rx_release(priv->rx_dmatsk);
+ bcom_fec_tx_release(priv->tx_dmatsk);
+
+ iounmap(priv->fec);
+
+ release_mem_region(ndev->base_addr, sizeof(struct mpc52xx_fec));
+
+ free_netdev(ndev);
+
+ dev_set_drvdata(&op->dev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mpc52xx_fec_of_suspend(struct of_device *op, pm_message_t state)
+{
+ struct net_device *dev = dev_get_drvdata(&op->dev);
+
+ if (netif_running(dev))
+ mpc52xx_fec_close(dev);
+
+ return 0;
+}
+
+static int mpc52xx_fec_of_resume(struct of_device *op)
+{
+ struct net_device *dev = dev_get_drvdata(&op->dev);
+
+ mpc52xx_fec_hw_init(dev);
+ mpc52xx_fec_reset_stats(dev);
+
+ if (netif_running(dev))
+ mpc52xx_fec_open(dev);
+
+ return 0;
+}
+#endif
+
+static struct of_device_id mpc52xx_fec_match[] = {
+ {
+ .type = "network",
+ .compatible = "mpc5200-fec",
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mpc52xx_fec_match);
+
+static struct of_platform_driver mpc52xx_fec_driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .match_table = mpc52xx_fec_match,
+ .probe = mpc52xx_fec_probe,
+ .remove = mpc52xx_fec_remove,
+#ifdef CONFIG_PM
+ .suspend = mpc52xx_fec_of_suspend,
+ .resume = mpc52xx_fec_of_resume,
+#endif
+};
+
+
+/* ======================================================================== */
+/* Module */
+/* ======================================================================== */
+
+static int __init
+mpc52xx_fec_init(void)
+{
+#ifdef CONFIG_FEC_MPC52xx_MDIO
+ int ret;
+ ret = of_register_platform_driver(&mpc52xx_fec_mdio_driver);
+ if (ret) {
+ printk(KERN_ERR DRIVER_NAME ": failed to register mdio driver\n");
+ return ret;
+ }
+#endif
+ return of_register_platform_driver(&mpc52xx_fec_driver);
+}
+
+static void __exit
+mpc52xx_fec_exit(void)
+{
+ of_unregister_platform_driver(&mpc52xx_fec_driver);
+#ifdef CONFIG_FEC_MPC52xx_MDIO
+ of_unregister_platform_driver(&mpc52xx_fec_mdio_driver);
+#endif
+}
+
+
+module_init(mpc52xx_fec_init);
+module_exit(mpc52xx_fec_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dale Farnsworth");
+MODULE_DESCRIPTION("Ethernet driver for the Freescale MPC52xx FEC");
diff --git a/drivers/net/fec_mpc52xx.h b/drivers/net/fec_mpc52xx.h
new file mode 100644
index 0000000..8b1f753
--- /dev/null
+++ b/drivers/net/fec_mpc52xx.h
@@ -0,0 +1,313 @@
+/*
+ * drivers/drivers/net/fec_mpc52xx/fec.h
+ *
+ * Driver for the MPC5200 Fast Ethernet Controller
+ *
+ * Author: Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __DRIVERS_NET_MPC52XX_FEC_H__
+#define __DRIVERS_NET_MPC52XX_FEC_H__
+
+#include <linux/phy.h>
+
+/* Tunable constant */
+/* FEC_RX_BUFFER_SIZE includes 4 bytes for CRC32 */
+#define FEC_RX_BUFFER_SIZE 1522 /* max receive packet size */
+#define FEC_RX_NUM_BD 256
+#define FEC_TX_NUM_BD 64
+
+#define FEC_RESET_DELAY 50 /* uS */
+
+#define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000)
+
+struct mpc52xx_fec_priv {
+ int duplex;
+ int r_irq;
+ int t_irq;
+ struct mpc52xx_fec __iomem *fec;
+ struct bcom_task *rx_dmatsk;
+ struct bcom_task *tx_dmatsk;
+ spinlock_t lock;
+ int msg_enable;
+
+ int has_phy;
+ unsigned int phy_speed;
+ unsigned int phy_addr;
+ struct phy_device *phydev;
+ enum phy_state link;
+ int speed;
+};
+
+
+/* ======================================================================== */
+/* Hardware register sets & bits */
+/* ======================================================================== */
+
+struct mpc52xx_fec {
+ u32 fec_id; /* FEC + 0x000 */
+ u32 ievent; /* FEC + 0x004 */
+ u32 imask; /* FEC + 0x008 */
+
+ u32 reserved0[1]; /* FEC + 0x00C */
+ u32 r_des_active; /* FEC + 0x010 */
+ u32 x_des_active; /* FEC + 0x014 */
+ u32 r_des_active_cl; /* FEC + 0x018 */
+ u32 x_des_active_cl; /* FEC + 0x01C */
+ u32 ivent_set; /* FEC + 0x020 */
+ u32 ecntrl; /* FEC + 0x024 */
+
+ u32 reserved1[6]; /* FEC + 0x028-03C */
+ u32 mii_data; /* FEC + 0x040 */
+ u32 mii_speed; /* FEC + 0x044 */
+ u32 mii_status; /* FEC + 0x048 */
+
+ u32 reserved2[5]; /* FEC + 0x04C-05C */
+ u32 mib_data; /* FEC + 0x060 */
+ u32 mib_control; /* FEC + 0x064 */
+
+ u32 reserved3[6]; /* FEC + 0x068-7C */
+ u32 r_activate; /* FEC + 0x080 */
+ u32 r_cntrl; /* FEC + 0x084 */
+ u32 r_hash; /* FEC + 0x088 */
+ u32 r_data; /* FEC + 0x08C */
+ u32 ar_done; /* FEC + 0x090 */
+ u32 r_test; /* FEC + 0x094 */
+ u32 r_mib; /* FEC + 0x098 */
+ u32 r_da_low; /* FEC + 0x09C */
+ u32 r_da_high; /* FEC + 0x0A0 */
+
+ u32 reserved4[7]; /* FEC + 0x0A4-0BC */
+ u32 x_activate; /* FEC + 0x0C0 */
+ u32 x_cntrl; /* FEC + 0x0C4 */
+ u32 backoff; /* FEC + 0x0C8 */
+ u32 x_data; /* FEC + 0x0CC */
+ u32 x_status; /* FEC + 0x0D0 */
+ u32 x_mib; /* FEC + 0x0D4 */
+ u32 x_test; /* FEC + 0x0D8 */
+ u32 fdxfc_da1; /* FEC + 0x0DC */
+ u32 fdxfc_da2; /* FEC + 0x0E0 */
+ u32 paddr1; /* FEC + 0x0E4 */
+ u32 paddr2; /* FEC + 0x0E8 */
+ u32 op_pause; /* FEC + 0x0EC */
+
+ u32 reserved5[4]; /* FEC + 0x0F0-0FC */
+ u32 instr_reg; /* FEC + 0x100 */
+ u32 context_reg; /* FEC + 0x104 */
+ u32 test_cntrl; /* FEC + 0x108 */
+ u32 acc_reg; /* FEC + 0x10C */
+ u32 ones; /* FEC + 0x110 */
+ u32 zeros; /* FEC + 0x114 */
+ u32 iaddr1; /* FEC + 0x118 */
+ u32 iaddr2; /* FEC + 0x11C */
+ u32 gaddr1; /* FEC + 0x120 */
+ u32 gaddr2; /* FEC + 0x124 */
+ u32 random; /* FEC + 0x128 */
+ u32 rand1; /* FEC + 0x12C */
+ u32 tmp; /* FEC + 0x130 */
+
+ u32 reserved6[3]; /* FEC + 0x134-13C */
+ u32 fifo_id; /* FEC + 0x140 */
+ u32 x_wmrk; /* FEC + 0x144 */
+ u32 fcntrl; /* FEC + 0x148 */
+ u32 r_bound; /* FEC + 0x14C */
+ u32 r_fstart; /* FEC + 0x150 */
+ u32 r_count; /* FEC + 0x154 */
+ u32 r_lag; /* FEC + 0x158 */
+ u32 r_read; /* FEC + 0x15C */
+ u32 r_write; /* FEC + 0x160 */
+ u32 x_count; /* FEC + 0x164 */
+ u32 x_lag; /* FEC + 0x168 */
+ u32 x_retry; /* FEC + 0x16C */
+ u32 x_write; /* FEC + 0x170 */
+ u32 x_read; /* FEC + 0x174 */
+
+ u32 reserved7[2]; /* FEC + 0x178-17C */
+ u32 fm_cntrl; /* FEC + 0x180 */
+ u32 rfifo_data; /* FEC + 0x184 */
+ u32 rfifo_status; /* FEC + 0x188 */
+ u32 rfifo_cntrl; /* FEC + 0x18C */
+ u32 rfifo_lrf_ptr; /* FEC + 0x190 */
+ u32 rfifo_lwf_ptr; /* FEC + 0x194 */
+ u32 rfifo_alarm; /* FEC + 0x198 */
+ u32 rfifo_rdptr; /* FEC + 0x19C */
+ u32 rfifo_wrptr; /* FEC + 0x1A0 */
+ u32 tfifo_data; /* FEC + 0x1A4 */
+ u32 tfifo_status; /* FEC + 0x1A8 */
+ u32 tfifo_cntrl; /* FEC + 0x1AC */
+ u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */
+ u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */
+ u32 tfifo_alarm; /* FEC + 0x1B8 */
+ u32 tfifo_rdptr; /* FEC + 0x1BC */
+ u32 tfifo_wrptr; /* FEC + 0x1C0 */
+
+ u32 reset_cntrl; /* FEC + 0x1C4 */
+ u32 xmit_fsm; /* FEC + 0x1C8 */
+
+ u32 reserved8[3]; /* FEC + 0x1CC-1D4 */
+ u32 rdes_data0; /* FEC + 0x1D8 */
+ u32 rdes_data1; /* FEC + 0x1DC */
+ u32 r_length; /* FEC + 0x1E0 */
+ u32 x_length; /* FEC + 0x1E4 */
+ u32 x_addr; /* FEC + 0x1E8 */
+ u32 cdes_data; /* FEC + 0x1EC */
+ u32 status; /* FEC + 0x1F0 */
+ u32 dma_control; /* FEC + 0x1F4 */
+ u32 des_cmnd; /* FEC + 0x1F8 */
+ u32 data; /* FEC + 0x1FC */
+
+ u32 rmon_t_drop; /* FEC + 0x200 */
+ u32 rmon_t_packets; /* FEC + 0x204 */
+ u32 rmon_t_bc_pkt; /* FEC + 0x208 */
+ u32 rmon_t_mc_pkt; /* FEC + 0x20C */
+ u32 rmon_t_crc_align; /* FEC + 0x210 */
+ u32 rmon_t_undersize; /* FEC + 0x214 */
+ u32 rmon_t_oversize; /* FEC + 0x218 */
+ u32 rmon_t_frag; /* FEC + 0x21C */
+ u32 rmon_t_jab; /* FEC + 0x220 */
+ u32 rmon_t_col; /* FEC + 0x224 */
+ u32 rmon_t_p64; /* FEC + 0x228 */
+ u32 rmon_t_p65to127; /* FEC + 0x22C */
+ u32 rmon_t_p128to255; /* FEC + 0x230 */
+ u32 rmon_t_p256to511; /* FEC + 0x234 */
+ u32 rmon_t_p512to1023; /* FEC + 0x238 */
+ u32 rmon_t_p1024to2047; /* FEC + 0x23C */
+ u32 rmon_t_p_gte2048; /* FEC + 0x240 */
+ u32 rmon_t_octets; /* FEC + 0x244 */
+ u32 ieee_t_drop; /* FEC + 0x248 */
+ u32 ieee_t_frame_ok; /* FEC + 0x24C */
+ u32 ieee_t_1col; /* FEC + 0x250 */
+ u32 ieee_t_mcol; /* FEC + 0x254 */
+ u32 ieee_t_def; /* FEC + 0x258 */
+ u32 ieee_t_lcol; /* FEC + 0x25C */
+ u32 ieee_t_excol; /* FEC + 0x260 */
+ u32 ieee_t_macerr; /* FEC + 0x264 */
+ u32 ieee_t_cserr; /* FEC + 0x268 */
+ u32 ieee_t_sqe; /* FEC + 0x26C */
+ u32 t_fdxfc; /* FEC + 0x270 */
+ u32 ieee_t_octets_ok; /* FEC + 0x274 */
+
+ u32 reserved9[2]; /* FEC + 0x278-27C */
+ u32 rmon_r_drop; /* FEC + 0x280 */
+ u32 rmon_r_packets; /* FEC + 0x284 */
+ u32 rmon_r_bc_pkt; /* FEC + 0x288 */
+ u32 rmon_r_mc_pkt; /* FEC + 0x28C */
+ u32 rmon_r_crc_align; /* FEC + 0x290 */
+ u32 rmon_r_undersize; /* FEC + 0x294 */
+ u32 rmon_r_oversize; /* FEC + 0x298 */
+ u32 rmon_r_frag; /* FEC + 0x29C */
+ u32 rmon_r_jab; /* FEC + 0x2A0 */
+
+ u32 rmon_r_resvd_0; /* FEC + 0x2A4 */
+
+ u32 rmon_r_p64; /* FEC + 0x2A8 */
+ u32 rmon_r_p65to127; /* FEC + 0x2AC */
+ u32 rmon_r_p128to255; /* FEC + 0x2B0 */
+ u32 rmon_r_p256to511; /* FEC + 0x2B4 */
+ u32 rmon_r_p512to1023; /* FEC + 0x2B8 */
+ u32 rmon_r_p1024to2047; /* FEC + 0x2BC */
+ u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */
+ u32 rmon_r_octets; /* FEC + 0x2C4 */
+ u32 ieee_r_drop; /* FEC + 0x2C8 */
+ u32 ieee_r_frame_ok; /* FEC + 0x2CC */
+ u32 ieee_r_crc; /* FEC + 0x2D0 */
+ u32 ieee_r_align; /* FEC + 0x2D4 */
+ u32 r_macerr; /* FEC + 0x2D8 */
+ u32 r_fdxfc; /* FEC + 0x2DC */
+ u32 ieee_r_octets_ok; /* FEC + 0x2E0 */
+
+ u32 reserved10[7]; /* FEC + 0x2E4-2FC */
+
+ u32 reserved11[64]; /* FEC + 0x300-3FF */
+};
+
+#define FEC_MIB_DISABLE 0x80000000
+
+#define FEC_IEVENT_HBERR 0x80000000
+#define FEC_IEVENT_BABR 0x40000000
+#define FEC_IEVENT_BABT 0x20000000
+#define FEC_IEVENT_GRA 0x10000000
+#define FEC_IEVENT_TFINT 0x08000000
+#define FEC_IEVENT_MII 0x00800000
+#define FEC_IEVENT_LATE_COL 0x00200000
+#define FEC_IEVENT_COL_RETRY_LIM 0x00100000
+#define FEC_IEVENT_XFIFO_UN 0x00080000
+#define FEC_IEVENT_XFIFO_ERROR 0x00040000
+#define FEC_IEVENT_RFIFO_ERROR 0x00020000
+
+#define FEC_IMASK_HBERR 0x80000000
+#define FEC_IMASK_BABR 0x40000000
+#define FEC_IMASK_BABT 0x20000000
+#define FEC_IMASK_GRA 0x10000000
+#define FEC_IMASK_MII 0x00800000
+#define FEC_IMASK_LATE_COL 0x00200000
+#define FEC_IMASK_COL_RETRY_LIM 0x00100000
+#define FEC_IMASK_XFIFO_UN 0x00080000
+#define FEC_IMASK_XFIFO_ERROR 0x00040000
+#define FEC_IMASK_RFIFO_ERROR 0x00020000
+
+/* all but MII, which is enabled separately */
+#define FEC_IMASK_ENABLE (FEC_IMASK_HBERR | FEC_IMASK_BABR | \
+ FEC_IMASK_BABT | FEC_IMASK_GRA | FEC_IMASK_LATE_COL | \
+ FEC_IMASK_COL_RETRY_LIM | FEC_IMASK_XFIFO_UN | \
+ FEC_IMASK_XFIFO_ERROR | FEC_IMASK_RFIFO_ERROR)
+
+#define FEC_RCNTRL_MAX_FL_SHIFT 16
+#define FEC_RCNTRL_LOOP 0x01
+#define FEC_RCNTRL_DRT 0x02
+#define FEC_RCNTRL_MII_MODE 0x04
+#define FEC_RCNTRL_PROM 0x08
+#define FEC_RCNTRL_BC_REJ 0x10
+#define FEC_RCNTRL_FCE 0x20
+
+#define FEC_TCNTRL_GTS 0x00000001
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_TFC_PAUSE 0x00000008
+#define FEC_TCNTRL_RFC_PAUSE 0x00000010
+
+#define FEC_ECNTRL_RESET 0x00000001
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+
+#define FEC_MII_DATA_ST 0x40000000 /* Start frame */
+#define FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */
+#define FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */
+#define FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */
+#define FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */
+#define FEC_MII_DATA_TA 0x00020000 /* Turnaround */
+#define FEC_MII_DATA_DATAMSK 0x0000ffff /* PHY data mask */
+
+#define FEC_MII_READ_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA)
+#define FEC_MII_WRITE_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | FEC_MII_DATA_TA)
+
+#define FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */
+#define FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */
+
+#define FEC_PADDR2_TYPE 0x8808
+
+#define FEC_OP_PAUSE_OPCODE 0x00010000
+
+#define FEC_FIFO_WMRK_256B 0x3
+
+#define FEC_FIFO_STATUS_ERR 0x00400000
+#define FEC_FIFO_STATUS_UF 0x00200000
+#define FEC_FIFO_STATUS_OF 0x00100000
+
+#define FEC_FIFO_CNTRL_FRAME 0x08000000
+#define FEC_FIFO_CNTRL_LTG_7 0x07000000
+
+#define FEC_RESET_CNTRL_RESET_FIFO 0x02000000
+#define FEC_RESET_CNTRL_ENABLE_IS_RESET 0x01000000
+
+#define FEC_XMIT_FSM_APPEND_CRC 0x02000000
+#define FEC_XMIT_FSM_ENABLE_CRC 0x01000000
+
+
+extern struct of_platform_driver mpc52xx_fec_mdio_driver;
+
+#endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
new file mode 100644
index 0000000..ba6e8b2
--- /dev/null
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -0,0 +1,198 @@
+/*
+ * Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
+ *
+ * Copyright (C) 2007 Domen Puncer, Telargo, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+#include <asm/mpc52xx.h>
+#include "fec_mpc52xx.h"
+
+struct mpc52xx_fec_mdio_priv {
+ struct mpc52xx_fec __iomem *regs;
+};
+
+static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct mpc52xx_fec_mdio_priv *priv = bus->priv;
+ struct mpc52xx_fec __iomem *fec;
+ int tries = 100;
+ u32 request = FEC_MII_READ_FRAME;
+
+ fec = priv->regs;
+ out_be32(&fec->ievent, FEC_IEVENT_MII);
+
+ request |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
+ request |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
+
+ out_be32(&priv->regs->mii_data, request);
+
+ /* wait for it to finish, this takes about 23 us on lite5200b */
+ while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
+ udelay(5);
+
+ if (tries == 0)
+ return -ETIMEDOUT;
+
+ return in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK;
+}
+
+static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
+{
+ struct mpc52xx_fec_mdio_priv *priv = bus->priv;
+ struct mpc52xx_fec __iomem *fec;
+ u32 value = data;
+ int tries = 100;
+
+ fec = priv->regs;
+ out_be32(&fec->ievent, FEC_IEVENT_MII);
+
+ value |= FEC_MII_WRITE_FRAME;
+ value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
+ value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
+
+ out_be32(&priv->regs->mii_data, value);
+
+ /* wait for request to finish */
+ while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
+ udelay(5);
+
+ if (tries == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match)
+{
+ struct device *dev = &of->dev;
+ struct device_node *np = of->node;
+ struct device_node *child = NULL;
+ struct mii_bus *bus;
+ struct mpc52xx_fec_mdio_priv *priv;
+ struct resource res = {};
+ int err;
+ int i;
+
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (bus == NULL)
+ return -ENOMEM;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ bus->name = "mpc52xx MII bus";
+ bus->read = mpc52xx_fec_mdio_read;
+ bus->write = mpc52xx_fec_mdio_write;
+
+ /* setup irqs */
+ bus->irq = kmalloc(sizeof(bus->irq[0]) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (bus->irq == NULL) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ for (i=0; i<PHY_MAX_ADDR; i++)
+ bus->irq[i] = PHY_POLL;
+
+ while ((child = of_get_next_child(np, child)) != NULL) {
+ int irq = irq_of_parse_and_map(child, 0);
+ if (irq != NO_IRQ) {
+ const u32 *id = of_get_property(child, "reg", NULL);
+ bus->irq[*id] = irq;
+ }
+ }
+
+ /* setup registers */
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ goto out_free;
+ priv->regs = ioremap(res.start, res.end - res.start + 1);
+ if (priv->regs == NULL) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ bus->id = res.start;
+ bus->priv = priv;
+
+ bus->dev = dev;
+ dev_set_drvdata(dev, bus);
+
+ /* set MII speed */
+ out_be32(&priv->regs->mii_speed, ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
+
+ /* enable MII interrupt */
+ out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII);
+
+ err = mdiobus_register(bus);
+ if (err)
+ goto out_unmap;
+
+ return 0;
+
+ out_unmap:
+ iounmap(priv->regs);
+ out_free:
+ for (i=0; i<PHY_MAX_ADDR; i++)
+ if (bus->irq[i] != PHY_POLL)
+ irq_dispose_mapping(bus->irq[i]);
+ kfree(bus->irq);
+ kfree(priv);
+ kfree(bus);
+
+ return err;
+}
+
+static int mpc52xx_fec_mdio_remove(struct of_device *of)
+{
+ struct device *dev = &of->dev;
+ struct mii_bus *bus = dev_get_drvdata(dev);
+ struct mpc52xx_fec_mdio_priv *priv = bus->priv;
+ int i;
+
+ mdiobus_unregister(bus);
+ dev_set_drvdata(dev, NULL);
+
+ iounmap(priv->regs);
+ for (i=0; i<PHY_MAX_ADDR; i++)
+ if (bus->irq[i])
+ irq_dispose_mapping(bus->irq[i]);
+ kfree(priv);
+ kfree(bus->irq);
+ kfree(bus);
+
+ return 0;
+}
+
+
+static struct of_device_id mpc52xx_fec_mdio_match[] = {
+ {
+ .type = "mdio",
+ .compatible = "mpc5200b-fec-phy",
+ },
+ {},
+};
+
+struct of_platform_driver mpc52xx_fec_mdio_driver = {
+ .name = "mpc5200b-fec-phy",
+ .probe = mpc52xx_fec_mdio_probe,
+ .remove = mpc52xx_fec_mdio_remove,
+ .match_table = mpc52xx_fec_mdio_match,
+};
+
+/* let fec driver call it, since this has to be registered before it */
+EXPORT_SYMBOL_GPL(mpc52xx_fec_mdio_driver);
+
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 4dbdfaa..a1e4508 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -627,19 +627,16 @@ static int au1k_irda_rx(struct net_device *dev)
}
-void au1k_irda_interrupt(int irq, void *dev_id)
+static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
{
- struct net_device *dev = (struct net_device *) dev_id;
-
- if (dev == NULL) {
- printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
- return;
- }
+ struct net_device *dev = dev_id;
writel(0, IR_INT_CLEAR); /* ack irda interrupts */
au1k_irda_rx(dev);
au1k_tx_ack(dev);
+
+ return IRQ_HANDLED;
}
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 73dcbb7..ad134a6 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -274,7 +274,7 @@ static int tc574_probe(struct pcmcia_device *link)
spin_lock_init(&lp->window_lock);
link->io.NumPorts1 = 32;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->irq.Handler = &el3_interrupt;
link->irq.Instance = dev;
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 32076ca..a98fe07c 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -188,7 +188,7 @@ static int tc589_probe(struct pcmcia_device *link)
spin_lock_init(&lp->lock);
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->irq.Handler = &el3_interrupt;
link->irq.Instance = dev;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index a95a2ca..8d910a3 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -158,7 +158,7 @@ static int axnet_probe(struct pcmcia_device *link)
info = PRIV(dev);
info->p_dev = link;
link->priv = dev;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 6284467..8c719b4 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -249,7 +249,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
link->io.IOAddrLines = 5;
/* Interrupt setup */
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->irq.Handler = &fjn_interrupt;
link->irq.Instance = dev;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 9d45e96..db6a97d 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -254,7 +254,7 @@ static int pcnet_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = dev;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 58d716f..c9868e9 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -328,7 +328,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
link->io.NumPorts1 = 16;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
link->io.IOAddrLines = 4;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->irq.Handler = &smc_interrupt;
link->irq.Instance = dev;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index c3b6960..1f09bea 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -886,7 +886,7 @@ xirc2ps_config(struct pcmcia_device * link)
}
printk(KNOT_XIRC "no ports available\n");
} else {
- link->irq.Attributes |= IRQ_TYPE_EXCLUSIVE;
+ link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts1 = 16;
for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
link->io.BasePort1 = ioaddr;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index e8960f2..b94fa7e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -392,7 +392,9 @@ struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev; /* Index of PCI device */
struct net_device *dev;
+#ifdef CONFIG_R8169_NAPI
struct napi_struct napi;
+#endif
spinlock_t lock; /* spin lock flag */
u32 msg_enable;
int chipset;
@@ -2989,13 +2991,16 @@ static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- unsigned int poll_locked = 0;
unsigned int intrmask;
rtl8169_delete_timer(dev);
netif_stop_queue(dev);
+#ifdef CONFIG_R8169_NAPI
+ napi_disable(&tp->napi);
+#endif
+
core_down:
spin_lock_irq(&tp->lock);
@@ -3009,11 +3014,6 @@ core_down:
synchronize_irq(dev->irq);
- if (!poll_locked) {
- napi_disable(&tp->napi);
- poll_locked++;
- }
-
/* Give a racing hard_start_xmit a few cycles to complete. */
synchronize_sched(); /* FIXME: should this be synchronize_irq()? */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 9741d61..a3ff270 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2214,9 +2214,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
struct dev_mc_list *dmi;
struct ucc_fast *uf_regs;
struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
- u8 tempaddr[6];
- u8 *mcptr, *tdptr;
- int i, j;
+ int i;
ugeth = netdev_priv(dev);
@@ -2255,19 +2253,10 @@ static void ucc_geth_set_multi(struct net_device *dev)
if (!(dmi->dmi_addr[0] & 1))
continue;
- /* The address in dmi_addr is LSB first,
- * and taddr is MSB first. We have to
- * copy bytes MSB first from dmi_addr.
- */
- mcptr = (u8 *) dmi->dmi_addr + 5;
- tdptr = (u8 *) tempaddr;
- for (j = 0; j < 6; j++)
- *tdptr++ = *mcptr--;
-
/* Ask CPM to run CRC and set bit in
* filter mask.
*/
- hw_add_addr_in_hash(ugeth, tempaddr);
+ hw_add_addr_in_hash(ugeth, dmi->dmi_addr);
}
}
}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 9d9ff76..5058e60 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2391,7 +2391,7 @@ out_requeue:
if (b43_debug(dev, B43_DBG_PWORK_FAST))
delay = msecs_to_jiffies(50);
else
- delay = round_jiffies(HZ * 15);
+ delay = round_jiffies_relative(HZ * 15);
queue_delayed_work(wl->hw->workqueue, &dev->periodic_work, delay);
out:
mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index d09479e..f0e56df 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2260,7 +2260,7 @@ out_requeue:
if (b43legacy_debug(dev, B43legacy_DBG_PWORK_FAST))
delay = msecs_to_jiffies(50);
else
- delay = round_jiffies(HZ);
+ delay = round_jiffies_relative(HZ);
queue_delayed_work(dev->wl->hw->workqueue,
&dev->periodic_work, delay);
out:
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index a6c7904..8d53d08 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -1769,7 +1769,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
if (priv->stop_rf_kill) {
priv->stop_rf_kill = 0;
queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies(HZ));
+ round_jiffies_relative(HZ));
}
deferred = 1;
@@ -2086,7 +2086,8 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
/* Make sure the RF Kill check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill, round_jiffies(HZ));
+ queue_delayed_work(priv->workqueue, &priv->rf_kill,
+ round_jiffies_relative(HZ));
}
static void send_scan_event(void *data)
@@ -2123,7 +2124,7 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
if (!delayed_work_pending(&priv->scan_event_later))
queue_delayed_work(priv->workqueue,
&priv->scan_event_later,
- round_jiffies(msecs_to_jiffies(4000)));
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
priv->user_requested_scan = 0;
cancel_delayed_work(&priv->scan_event_later);
@@ -4242,7 +4243,7 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies(HZ));
+ round_jiffies_relative(HZ));
} else
schedule_reset(priv);
}
@@ -5981,7 +5982,7 @@ static void ipw2100_rf_kill(struct work_struct *work)
IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
if (!priv->stop_rf_kill)
queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies(HZ));
+ round_jiffies_relative(HZ));
goto exit_unlock;
}
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index e3c8284..54f44e5 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -1753,7 +1753,7 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
/* Make sure the RF_KILL check timer is running */
cancel_delayed_work(&priv->rf_kill);
queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies(2 * HZ));
+ round_jiffies_relative(2 * HZ));
} else
queue_work(priv->workqueue, &priv->up);
}
@@ -4364,7 +4364,7 @@ static void handle_scan_event(struct ipw_priv *priv)
if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event))
queue_delayed_work(priv->workqueue, &priv->scan_event,
- round_jiffies(msecs_to_jiffies(4000)));
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
union iwreq_data wrqu;
@@ -4728,7 +4728,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
&& priv->status & STATUS_ASSOCIATED)
queue_delayed_work(priv->workqueue,
&priv->request_scan,
- round_jiffies(HZ));
+ round_jiffies_relative(HZ));
/* Send an empty event to user space.
* We don't send the received data on the event because
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 557deeb..891f90d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -3232,9 +3232,7 @@ int iwl4965_tx_cmd(struct iwl_priv *priv, struct iwl_cmd *out_cmd,
tx->rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[rate_index].plcp,
rate_flags);
- if (ieee80211_is_probe_request(fc))
- tx->tx_flags |= TX_CMD_FLG_TSF_MSK;
- else if (ieee80211_is_back_request(fc))
+ if (ieee80211_is_back_request(fc))
tx->tx_flags |= TX_CMD_FLG_ACK_MSK |
TX_CMD_FLG_IMM_BA_RSP_MASK;
#ifdef CONFIG_IWLWIFI_HT
@@ -3872,7 +3870,7 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
*/
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
- if (network_packet && iwl_is_associated(priv)) {
+ if (network_packet) {
#ifdef CONFIG_IWLWIFI_HT
u8 *pos = NULL;
struct ieee802_11_elems elems;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 83019d1..4f22a71 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -6478,8 +6478,9 @@ static void iwl_bg_scan_check(struct work_struct *data)
IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
"Scan completion watchdog resetting adapter (%dms)\n",
jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
+
if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->restart);
+ iwl_send_scan_abort(priv);
}
mutex_unlock(&priv->mutex);
}
@@ -6575,7 +6576,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(600 * 1024);
+ scan->max_out_time = cpu_to_le32(200 * 1024);
if (!interval)
interval = suspend_time;
/*
@@ -6605,7 +6606,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
memcpy(scan->direct_scan[0].ssid,
priv->direct_ssid, priv->direct_ssid_len);
direct_mask = 1;
- } else if (!iwl_is_associated(priv)) {
+ } else if (!iwl_is_associated(priv) && priv->essid_len) {
scan->direct_scan[0].id = WLAN_EID_SSID;
scan->direct_scan[0].len = priv->essid_len;
memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
@@ -6744,6 +6745,12 @@ static void iwl_bg_post_associate(struct work_struct *data)
mutex_lock(&priv->mutex);
+ if (!priv->interface_id || !priv->is_open) {
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+ iwl_scan_cancel_timeout(priv, 200);
+
conf = ieee80211_get_hw_conf(priv->hw);
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -6882,9 +6889,19 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MAC80211("enter\n");
+
+
+ mutex_lock(&priv->mutex);
+ /* stop mac, cancel any scan request and clear
+ * RXON_FILTER_ASSOC_MSK BIT
+ */
priv->is_open = 0;
- /*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
+ iwl_scan_cancel_timeout(priv, 100);
+ cancel_delayed_work(&priv->post_associate);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_commit_rxon(priv);
+ mutex_unlock(&priv->mutex);
+
IWL_DEBUG_MAC80211("leave\n");
}
@@ -7169,8 +7186,6 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id,
if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
iwl_config_ap(priv);
else {
- priv->staging_rxon.filter_flags |=
- RXON_FILTER_ASSOC_MSK;
rc = iwl_commit_rxon(priv);
if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
iwl_add_station(priv,
@@ -7178,6 +7193,7 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id,
}
} else {
+ iwl_scan_cancel_timeout(priv, 100);
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwl_commit_rxon(priv);
}
@@ -7217,6 +7233,12 @@ static void iwl_mac_remove_interface(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211("enter\n");
mutex_lock(&priv->mutex);
+
+ iwl_scan_cancel_timeout(priv, 100);
+ cancel_delayed_work(&priv->post_associate);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_commit_rxon(priv);
+
if (priv->interface_id == conf->if_id) {
priv->interface_id = 0;
memset(priv->bssid, 0, ETH_ALEN);
@@ -7238,6 +7260,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
IWL_DEBUG_MAC80211("enter\n");
+ mutex_lock(&priv->mutex);
spin_lock_irqsave(&priv->lock, flags);
if (!iwl_is_ready_rf(priv)) {
@@ -7268,7 +7291,8 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
priv->direct_ssid_len = (u8)
min((u8) len, (u8) IW_ESSID_MAX_SIZE);
memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
- }
+ } else
+ priv->one_direct_scan = 0;
rc = iwl_scan_initiate(priv);
@@ -7276,6 +7300,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
out_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->mutex);
return rc;
}
@@ -7310,6 +7335,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&priv->mutex);
+ iwl_scan_cancel_timeout(priv, 100);
+
switch (cmd) {
case SET_KEY:
rc = iwl_update_sta_key_info(priv, key, sta_id);
@@ -7479,8 +7506,18 @@ static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&priv->lock, flags);
+ /* we are restarting association process
+ * clear RXON_FILTER_ASSOC_MSK bit
+ */
+ if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
+ iwl_scan_cancel_timeout(priv, 100);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_commit_rxon(priv);
+ }
+
/* Per mac80211.h: This is only used in IBSS mode... */
if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
+
IWL_DEBUG_MAC80211("leave - not in IBSS\n");
mutex_unlock(&priv->mutex);
return;
@@ -8558,6 +8595,9 @@ static void iwl_pci_remove(struct pci_dev *pdev)
iwl_rate_control_unregister(priv->hw);
}
+ /*netif_stop_queue(dev); */
+ flush_workqueue(priv->workqueue);
+
/* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
* priv->workqueue... so we can't take down the workqueue
* until now... */
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index 5e12792..d60adcb 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -6845,8 +6845,9 @@ static void iwl_bg_scan_check(struct work_struct *data)
IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN,
"Scan completion watchdog resetting adapter (%dms)\n",
jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
+
if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->restart);
+ iwl_send_scan_abort(priv);
}
mutex_unlock(&priv->mutex);
}
@@ -6942,7 +6943,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(600 * 1024);
+ scan->max_out_time = cpu_to_le32(200 * 1024);
if (!interval)
interval = suspend_time;
@@ -6965,7 +6966,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
memcpy(scan->direct_scan[0].ssid,
priv->direct_ssid, priv->direct_ssid_len);
direct_mask = 1;
- } else if (!iwl_is_associated(priv)) {
+ } else if (!iwl_is_associated(priv) && priv->essid_len) {
scan->direct_scan[0].id = WLAN_EID_SSID;
scan->direct_scan[0].len = priv->essid_len;
memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
@@ -7118,6 +7119,12 @@ static void iwl_bg_post_associate(struct work_struct *data)
mutex_lock(&priv->mutex);
+ if (!priv->interface_id || !priv->is_open) {
+ mutex_unlock(&priv->mutex);
+ return;
+ }
+ iwl_scan_cancel_timeout(priv, 200);
+
conf = ieee80211_get_hw_conf(priv->hw);
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -7271,9 +7278,19 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MAC80211("enter\n");
+
+
+ mutex_lock(&priv->mutex);
+ /* stop mac, cancel any scan request and clear
+ * RXON_FILTER_ASSOC_MSK BIT
+ */
priv->is_open = 0;
- /*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
+ iwl_scan_cancel_timeout(priv, 100);
+ cancel_delayed_work(&priv->post_associate);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_commit_rxon(priv);
+ mutex_unlock(&priv->mutex);
+
IWL_DEBUG_MAC80211("leave\n");
}
@@ -7573,8 +7590,6 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id,
if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
iwl_config_ap(priv);
else {
- priv->staging_rxon.filter_flags |=
- RXON_FILTER_ASSOC_MSK;
rc = iwl_commit_rxon(priv);
if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc)
iwl_rxon_add_station(
@@ -7582,6 +7597,7 @@ static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id,
}
} else {
+ iwl_scan_cancel_timeout(priv, 100);
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwl_commit_rxon(priv);
}
@@ -7621,6 +7637,12 @@ static void iwl_mac_remove_interface(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211("enter\n");
mutex_lock(&priv->mutex);
+
+ iwl_scan_cancel_timeout(priv, 100);
+ cancel_delayed_work(&priv->post_associate);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_commit_rxon(priv);
+
if (priv->interface_id == conf->if_id) {
priv->interface_id = 0;
memset(priv->bssid, 0, ETH_ALEN);
@@ -7642,6 +7664,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
IWL_DEBUG_MAC80211("enter\n");
+ mutex_lock(&priv->mutex);
spin_lock_irqsave(&priv->lock, flags);
if (!iwl_is_ready_rf(priv)) {
@@ -7672,7 +7695,8 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
priv->direct_ssid_len = (u8)
min((u8) len, (u8) IW_ESSID_MAX_SIZE);
memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
- }
+ } else
+ priv->one_direct_scan = 0;
rc = iwl_scan_initiate(priv);
@@ -7680,6 +7704,7 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
out_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->mutex);
return rc;
}
@@ -7713,6 +7738,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mutex_lock(&priv->mutex);
+ iwl_scan_cancel_timeout(priv, 100);
+
switch (cmd) {
case SET_KEY:
rc = iwl_update_sta_key_info(priv, key, sta_id);
@@ -7903,8 +7930,18 @@ static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&priv->lock, flags);
+ /* we are restarting association process
+ * clear RXON_FILTER_ASSOC_MSK bit
+ */
+ if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
+ iwl_scan_cancel_timeout(priv, 100);
+ priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_commit_rxon(priv);
+ }
+
/* Per mac80211.h: This is only used in IBSS mode... */
if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
+
IWL_DEBUG_MAC80211("leave - not in IBSS\n");
mutex_unlock(&priv->mutex);
return;
@@ -9152,6 +9189,9 @@ static void iwl_pci_remove(struct pci_dev *pdev)
iwl_rate_control_unregister(priv->hw);
}
+ /*netif_stop_queue(dev); */
+ flush_workqueue(priv->workqueue);
+
/* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
* priv->workqueue... so we can't take down the workqueue
* until now... */
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 298faa9d..06d9bc0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -30,7 +30,7 @@
* Interval defines
* Both the link tuner as the rfkill will be called once per second.
*/
-#define LINK_TUNE_INTERVAL ( round_jiffies(HZ) )
+#define LINK_TUNE_INTERVAL ( round_jiffies_relative(HZ) )
#define RFKILL_POLL_INTERVAL ( 1000 )
/*
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index de61c8f..e454ae8 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -433,6 +433,9 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
+ rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
+ rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
+
rtl8187_init_urbs(dev);
reg = RTL818X_RX_CONF_ONLYERLPKT |
@@ -582,32 +585,31 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev, int if_id,
static void rtl8187_configure_filter(struct ieee80211_hw *dev,
unsigned int changed_flags,
unsigned int *total_flags,
- int mc_count, struct dev_addr_list *mc_list)
+ int mc_count, struct dev_addr_list *mclist)
{
struct rtl8187_priv *priv = dev->priv;
- *total_flags = 0;
-
- if (changed_flags & FIF_ALLMULTI)
- priv->rx_conf ^= RTL818X_RX_CONF_MULTICAST;
if (changed_flags & FIF_FCSFAIL)
priv->rx_conf ^= RTL818X_RX_CONF_FCS;
if (changed_flags & FIF_CONTROL)
priv->rx_conf ^= RTL818X_RX_CONF_CTRL;
if (changed_flags & FIF_OTHER_BSS)
priv->rx_conf ^= RTL818X_RX_CONF_MONITOR;
-
- if (mc_count > 0)
+ if (*total_flags & FIF_ALLMULTI || mc_count > 0)
priv->rx_conf |= RTL818X_RX_CONF_MULTICAST;
+ else
+ priv->rx_conf &= ~RTL818X_RX_CONF_MULTICAST;
+
+ *total_flags = 0;
- if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST)
- *total_flags |= FIF_ALLMULTI;
if (priv->rx_conf & RTL818X_RX_CONF_FCS)
*total_flags |= FIF_FCSFAIL;
if (priv->rx_conf & RTL818X_RX_CONF_CTRL)
*total_flags |= FIF_CONTROL;
if (priv->rx_conf & RTL818X_RX_CONF_MONITOR)
*total_flags |= FIF_OTHER_BSS;
+ if (priv->rx_conf & RTL818X_RX_CONF_MULTICAST)
+ *total_flags |= FIF_ALLMULTI;
rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf);
}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 0c4ab3b..9b35259 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -745,7 +745,7 @@ static char *fault_reason_strings[] =
"non-zero reserved fields in PTE",
"Unknown"
};
-#define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings)
+#define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings) - 1
char *dmar_get_fault_reason(u8 fault_reason)
{
@@ -995,7 +995,6 @@ static struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd)
return iommu;
error_unmap:
iounmap(iommu->reg);
- iommu->reg = 0;
error:
kfree(iommu);
return NULL;
@@ -1808,7 +1807,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
if (!domain) {
printk(KERN_ERR
"Allocating domain for %s failed", pci_name(pdev));
- return 0;
+ return NULL;
}
/* make sure context mapping is ok */
@@ -1818,7 +1817,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
printk(KERN_ERR
"Domain context map for %s failed",
pci_name(pdev));
- return 0;
+ return NULL;
}
}
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
index ee88dd2..459ad1f 100644
--- a/drivers/pci/intel-iommu.h
+++ b/drivers/pci/intel-iommu.h
@@ -58,7 +58,7 @@
hi = readl(dmar + reg + 4); \
(((u64) hi) << 32) + lo; })
*/
-static inline u64 dmar_readq(void *addr)
+static inline u64 dmar_readq(void __iomem *addr)
{
u32 lo, hi;
lo = readl(addr);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 0754542..e268f79 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -70,11 +70,12 @@ zfcp_sg_to_address(struct scatterlist *list)
* zfcp_address_to_sg - set up struct scatterlist from kernel address
* @address: kernel address
* @list: struct scatterlist
+ * @size: buffer size
*/
static inline void
-zfcp_address_to_sg(void *address, struct scatterlist *list)
+zfcp_address_to_sg(void *address, struct scatterlist *list, unsigned int size)
{
- sg_set_buf(list, address, 0);
+ sg_set_buf(list, address, size);
}
#define REQUEST_LIST_SIZE 128
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 72b0393..1e6d7a9 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -391,7 +391,7 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
/*
* Extract the fibctx from the input parameters
*/
- if (fibctx->unique == (u32)(ptrdiff_t)arg) /* We found a winner */
+ if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
break;
entry = entry->next;
fibctx = NULL;
@@ -590,7 +590,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
}
addr = (u64)upsg->sg[i].addr[0];
addr += ((u64)upsg->sg[i].addr[1]) << 32;
- sg_user[i] = (void __user *)(ptrdiff_t)addr;
+ sg_user[i] = (void __user *)(uintptr_t)addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
@@ -633,7 +633,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -ENOMEM;
goto cleanup;
}
- sg_user[i] = (void __user *)(ptrdiff_t)usg->sg[i].addr;
+ sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
@@ -664,7 +664,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
if (actual_fibsize64 == fibsize) {
struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
for (i = 0; i < upsg->count; i++) {
- u64 addr;
+ uintptr_t addr;
void* p;
/* Does this really need to be GFP_DMA? */
p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
@@ -676,7 +676,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
}
addr = (u64)usg->sg[i].addr[0];
addr += ((u64)usg->sg[i].addr[1]) << 32;
- sg_user[i] = (void __user *)(ptrdiff_t)addr;
+ sg_user[i] = (void __user *)addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
@@ -704,7 +704,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -ENOMEM;
goto cleanup;
}
- sg_user[i] = (void __user *)(ptrdiff_t)upsg->sg[i].addr;
+ sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
sg_list[i] = p; // save so we can clean up later
sg_indx = i;
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 3009ad8..8736813 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -110,7 +110,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
/*
* Align the beginning of Headers to commalign
*/
- align = (commalign - ((ptrdiff_t)(base) & (commalign - 1)));
+ align = (commalign - ((uintptr_t)(base) & (commalign - 1)));
base = base + align;
phys = phys + align;
/*
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index fcd25f7..e6032ff 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -254,7 +254,7 @@ unsigned int aac_intr_normal(struct aac_dev * dev, u32 Index)
kfree (fib);
return 1;
}
- memcpy(hw_fib, (struct hw_fib *)(((ptrdiff_t)(dev->regs.sa)) +
+ memcpy(hw_fib, (struct hw_fib *)(((uintptr_t)(dev->regs.sa)) +
(index & ~0x00000002L)), sizeof(struct hw_fib));
INIT_LIST_HEAD(&fib->fiblink);
fib->type = FSAFS_NTC_FIB_CONTEXT;
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index ace7a15..a67e29f 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -141,14 +141,14 @@ struct CMD_MESSAGE_FIELD
#define IS_SG64_ADDR 0x01000000 /* bit24 */
struct SG32ENTRY
{
- uint32_t length;
- uint32_t address;
+ __le32 length;
+ __le32 address;
};
struct SG64ENTRY
{
- uint32_t length;
- uint32_t address;
- uint32_t addresshigh;
+ __le32 length;
+ __le32 address;
+ __le32 addresshigh;
};
struct SGENTRY_UNION
{
@@ -339,23 +339,15 @@ struct MessageUnit_B
uint32_t done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
uint32_t postq_index;
uint32_t doneq_index;
- uint32_t *drv2iop_doorbell_reg;
- uint32_t *drv2iop_doorbell_mask_reg;
- uint32_t *iop2drv_doorbell_reg;
- uint32_t *iop2drv_doorbell_mask_reg;
- uint32_t *msgcode_rwbuffer_reg;
- uint32_t *ioctl_wbuffer_reg;
- uint32_t *ioctl_rbuffer_reg;
+ uint32_t __iomem *drv2iop_doorbell_reg;
+ uint32_t __iomem *drv2iop_doorbell_mask_reg;
+ uint32_t __iomem *iop2drv_doorbell_reg;
+ uint32_t __iomem *iop2drv_doorbell_mask_reg;
+ uint32_t __iomem *msgcode_rwbuffer_reg;
+ uint32_t __iomem *ioctl_wbuffer_reg;
+ uint32_t __iomem *ioctl_rbuffer_reg;
};
-struct MessageUnit
-{
- union
- {
- struct MessageUnit_A pmu_A;
- struct MessageUnit_B pmu_B;
- } u;
-};
/*
*******************************************************************************
** Adapter Control Block
@@ -374,7 +366,10 @@ struct AdapterControlBlock
/* Offset is used in making arc cdb physical to virtual calculations */
uint32_t outbound_int_enable;
- struct MessageUnit * pmu;
+ union {
+ struct MessageUnit_A __iomem * pmuA;
+ struct MessageUnit_B * pmuB;
+ };
/* message unit ATU inbound base address0 */
uint32_t acb_flags;
@@ -558,7 +553,7 @@ struct SENSE_DATA
extern void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *);
extern void arcmsr_iop_message_read(struct AdapterControlBlock *);
-extern struct QBUFFER *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *);
+extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *);
extern struct class_device_attribute *arcmsr_host_attrs[];
extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *);
void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb);
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index d04d1aa..7d7b0a5 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -85,13 +85,13 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct kobject *kobj,
allxfer_len++;
}
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
- struct QBUFFER *prbuffer;
- uint8_t *iop_data;
+ struct QBUFFER __iomem *prbuffer;
+ uint8_t __iomem *iop_data;
int32_t iop_len;
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
- iop_data = (uint8_t *)prbuffer->data;
+ iop_data = prbuffer->data;
iop_len = readl(&prbuffer->data_len);
while (iop_len > 0) {
acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f7a2528..d466a2d 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -236,18 +236,22 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
uint32_t intmask_org;
int i, j;
- acb->pmu = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
- if (!acb->pmu) {
+ acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+ if (!acb->pmuA) {
printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
acb->host->host_no);
+ return -ENOMEM;
}
dma_coherent = dma_alloc_coherent(&pdev->dev,
ARCMSR_MAX_FREECCB_NUM *
sizeof (struct CommandControlBlock) + 0x20,
&dma_coherent_handle, GFP_KERNEL);
- if (!dma_coherent)
+
+ if (!dma_coherent) {
+ iounmap(acb->pmuA);
return -ENOMEM;
+ }
acb->dma_coherent = dma_coherent;
acb->dma_coherent_handle = dma_coherent_handle;
@@ -287,7 +291,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
struct pci_dev *pdev = acb->pdev;
struct MessageUnit_B *reg;
- void *mem_base0, *mem_base1;
+ void __iomem *mem_base0, *mem_base1;
void *dma_coherent;
dma_addr_t dma_coherent_handle, dma_addr;
uint32_t intmask_org;
@@ -328,25 +332,28 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
reg = (struct MessageUnit_B *)(dma_coherent +
ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
- acb->pmu = (struct MessageUnit *)reg;
+ acb->pmuB = reg;
mem_base0 = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
+ if (!mem_base0)
+ goto out;
+
mem_base1 = ioremap(pci_resource_start(pdev, 2),
pci_resource_len(pdev, 2));
- reg->drv2iop_doorbell_reg = (uint32_t *)((char *)mem_base0 +
- ARCMSR_DRV2IOP_DOORBELL);
- reg->drv2iop_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 +
- ARCMSR_DRV2IOP_DOORBELL_MASK);
- reg->iop2drv_doorbell_reg = (uint32_t *)((char *)mem_base0 +
- ARCMSR_IOP2DRV_DOORBELL);
- reg->iop2drv_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 +
- ARCMSR_IOP2DRV_DOORBELL_MASK);
- reg->ioctl_wbuffer_reg = (uint32_t *)((char *)mem_base1 +
- ARCMSR_IOCTL_WBUFFER);
- reg->ioctl_rbuffer_reg = (uint32_t *)((char *)mem_base1 +
- ARCMSR_IOCTL_RBUFFER);
- reg->msgcode_rwbuffer_reg = (uint32_t *)((char *)mem_base1 +
- ARCMSR_MSGCODE_RWBUFFER);
+ if (!mem_base1) {
+ iounmap(mem_base0);
+ goto out;
+ }
+
+ reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL;
+ reg->drv2iop_doorbell_mask_reg = mem_base0 +
+ ARCMSR_DRV2IOP_DOORBELL_MASK;
+ reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL;
+ reg->iop2drv_doorbell_mask_reg = mem_base0 +
+ ARCMSR_IOP2DRV_DOORBELL_MASK;
+ reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER;
+ reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER;
+ reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER;
acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
@@ -362,6 +369,12 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
break;
}
return 0;
+
+out:
+ dma_free_coherent(&acb->pdev->dev,
+ ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20,
+ acb->dma_coherent, acb->dma_coherent_handle);
+ return -ENOMEM;
}
static int arcmsr_probe(struct pci_dev *pdev,
@@ -454,7 +467,6 @@ static int arcmsr_probe(struct pci_dev *pdev,
free_irq(pdev->irq, acb);
out_free_ccb_pool:
arcmsr_free_ccb_pool(acb);
- iounmap(acb->pmu);
out_release_regions:
pci_release_regions(pdev);
out_host_put:
@@ -467,7 +479,7 @@ static int arcmsr_probe(struct pci_dev *pdev,
static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
{
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
uint32_t Index;
uint8_t Retries = 0x00;
@@ -488,7 +500,7 @@ static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
{
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
uint32_t Index;
uint8_t Retries = 0x00;
@@ -509,7 +521,7 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
{
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
if (arcmsr_hba_wait_msgint_ready(acb))
@@ -520,7 +532,7 @@ static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
{
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
if (arcmsr_hbb_wait_msgint_ready(acb))
@@ -566,7 +578,7 @@ static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
{
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
int retry_count = 30;
writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
@@ -583,7 +595,7 @@ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
{
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
int retry_count = 30;
writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
@@ -637,7 +649,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A : {
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
orig_mask = readl(&reg->outbound_intmask)|\
ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
@@ -646,7 +658,7 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_B : {
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \
(~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
writel(0, reg->iop2drv_doorbell_mask_reg);
@@ -748,14 +760,13 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- struct MessageUnit_A __iomem *reg = \
- (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
uint32_t outbound_intstatus;
- outbound_intstatus = readl(&reg->outbound_intstatus) & \
+ outbound_intstatus = readl(&reg->outbound_intstatus) &
acb->outbound_int_enable;
/*clear and abort all outbound posted Q*/
writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
- while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) \
+ while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
&& (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
arcmsr_drain_donequeue(acb, flag_ccb);
}
@@ -763,7 +774,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_B: {
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
/*clear all outbound posted Q*/
for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
@@ -816,7 +827,6 @@ static void arcmsr_remove(struct pci_dev *pdev)
}
free_irq(pdev->irq, acb);
- iounmap(acb->pmu);
arcmsr_free_ccb_pool(acb);
pci_release_regions(pdev);
@@ -859,7 +869,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A : {
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
writel(mask, &reg->outbound_intmask);
@@ -868,7 +878,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
break;
case ACB_ADAPTER_TYPE_B : {
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \
ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
writel(mask, reg->iop2drv_doorbell_mask_reg);
@@ -882,7 +892,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
{
struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
int8_t *psge = (int8_t *)&arcmsr_cdb->u;
- uint32_t address_lo, address_hi;
+ __le32 address_lo, address_hi;
int arccdbsize = 0x30;
int nseg;
@@ -900,7 +910,8 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
BUG_ON(nseg < 0);
if (nseg) {
- int length, i, cdb_sgcount = 0;
+ __le32 length;
+ int i, cdb_sgcount = 0;
struct scatterlist *sg;
/* map stor port SG list to our iop SG List. */
@@ -921,7 +932,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
pdma_sg->addresshigh = address_hi;
pdma_sg->address = address_lo;
- pdma_sg->length = length|IS_SG64_ADDR;
+ pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
psge += sizeof (struct SG64ENTRY);
arccdbsize += sizeof (struct SG64ENTRY);
}
@@ -947,7 +958,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
@@ -959,7 +970,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
break;
case ACB_ADAPTER_TYPE_B: {
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
uint32_t ending_index, index = reg->postq_index;
ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
@@ -982,7 +993,7 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
{
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
@@ -995,7 +1006,7 @@ static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
{
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg);
@@ -1023,6 +1034,17 @@ static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ iounmap(acb->pmuA);
+ break;
+ }
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = acb->pmuB;
+ iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
+ iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
+ }
+ }
dma_free_coherent(&acb->pdev->dev,
ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
acb->dma_coherent,
@@ -1033,13 +1055,13 @@ void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
}
break;
case ACB_ADAPTER_TYPE_B: {
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
}
break;
@@ -1050,7 +1072,7 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
/*
** push inbound doorbell tell iop, driver data write ok
** and wait reply on next hwinterrupt for next Qbuffer post
@@ -1060,7 +1082,7 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_B: {
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
/*
** push inbound doorbell tell iop, driver data write ok
** and wait reply on next hwinterrupt for next Qbuffer post
@@ -1071,41 +1093,41 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
}
}
-struct QBUFFER *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
+struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
{
- static struct QBUFFER *qbuffer;
+ struct QBUFFER __iomem *qbuffer = NULL;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
- qbuffer = (struct QBUFFER __iomem *) &reg->message_rbuffer;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
}
break;
case ACB_ADAPTER_TYPE_B: {
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
- qbuffer = (struct QBUFFER __iomem *) reg->ioctl_rbuffer_reg;
+ struct MessageUnit_B *reg = acb->pmuB;
+ qbuffer = (struct QBUFFER __iomem *)reg->ioctl_rbuffer_reg;
}
break;
}
return qbuffer;
}
-static struct QBUFFER *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
+static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
{
- static struct QBUFFER *pqbuffer;
+ struct QBUFFER __iomem *pqbuffer = NULL;
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
- pqbuffer = (struct QBUFFER *) &reg->message_wbuffer;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
+ pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
}
break;
case ACB_ADAPTER_TYPE_B: {
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg;
}
break;
@@ -1115,15 +1137,15 @@ static struct QBUFFER *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
{
- struct QBUFFER *prbuffer;
+ struct QBUFFER __iomem *prbuffer;
struct QBUFFER *pQbuffer;
- uint8_t *iop_data;
+ uint8_t __iomem *iop_data;
int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
rqbuf_lastindex = acb->rqbuf_lastindex;
rqbuf_firstindex = acb->rqbuf_firstindex;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
- iop_data = (uint8_t *)prbuffer->data;
+ iop_data = (uint8_t __iomem *)prbuffer->data;
iop_len = prbuffer->data_len;
my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1);
@@ -1151,8 +1173,8 @@ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
uint8_t *pQbuffer;
- struct QBUFFER *pwbuffer;
- uint8_t *iop_data;
+ struct QBUFFER __iomem *pwbuffer;
+ uint8_t __iomem *iop_data;
int32_t allxfer_len = 0;
acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
@@ -1181,7 +1203,7 @@ static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
{
uint32_t outbound_doorbell;
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
outbound_doorbell = readl(&reg->outbound_doorbell);
writel(outbound_doorbell, &reg->outbound_doorbell);
@@ -1197,7 +1219,7 @@ static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t flag_ccb;
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
arcmsr_drain_donequeue(acb, flag_ccb);
@@ -1208,7 +1230,7 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
{
uint32_t index;
uint32_t flag_ccb;
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
index = reg->doneq_index;
@@ -1224,7 +1246,7 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
{
uint32_t outbound_intstatus;
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
outbound_intstatus = readl(&reg->outbound_intstatus) & \
acb->outbound_int_enable;
@@ -1244,7 +1266,7 @@ static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
{
uint32_t outbound_doorbell;
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \
acb->outbound_int_enable;
@@ -1305,8 +1327,8 @@ void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
{
int32_t wqbuf_firstindex, wqbuf_lastindex;
uint8_t *pQbuffer;
- struct QBUFFER *pwbuffer;
- uint8_t *iop_data;
+ struct QBUFFER __iomem *pwbuffer;
+ uint8_t __iomem *iop_data;
int32_t allxfer_len = 0;
pwbuffer = arcmsr_get_iop_wqbuffer(acb);
@@ -1380,13 +1402,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
}
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
- struct QBUFFER *prbuffer;
- uint8_t *iop_data;
+ struct QBUFFER __iomem *prbuffer;
+ uint8_t __iomem *iop_data;
int32_t iop_len;
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
- iop_data = (uint8_t *)prbuffer->data;
+ iop_data = prbuffer->data;
iop_len = readl(&prbuffer->data_len);
while (iop_len > 0) {
acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
@@ -1669,11 +1691,11 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
{
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
char *acb_firm_model = acb->firm_model;
char *acb_firm_version = acb->firm_version;
- char *iop_firm_model = (char *) (&reg->message_rwbuffer[15]);
- char *iop_firm_version = (char *) (&reg->message_rwbuffer[17]);
+ char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
+ char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
int count;
writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
@@ -1710,13 +1732,13 @@ static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
{
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
- uint32_t *lrwbuffer = reg->msgcode_rwbuffer_reg;
+ struct MessageUnit_B *reg = acb->pmuB;
+ uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
char *acb_firm_model = acb->firm_model;
char *acb_firm_version = acb->firm_version;
- char *iop_firm_model = (char *) (&lrwbuffer[15]);
+ char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
/*firm_model,15,60-67*/
- char *iop_firm_version = (char *) (&lrwbuffer[17]);
+ char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
/*firm_version,17,68-83*/
int count;
@@ -1777,7 +1799,7 @@ static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
struct CommandControlBlock *ccb;
uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
@@ -1826,7 +1848,7 @@ static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
struct CommandControlBlock *poll_ccb)
{
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
struct CommandControlBlock *ccb;
uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
int index;
@@ -1918,8 +1940,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_A: {
if (ccb_phyaddr_hi32 != 0) {
- struct MessageUnit_A __iomem *reg = \
- (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
uint32_t intmask_org;
intmask_org = arcmsr_disable_outbound_ints(acb);
writel(ARCMSR_SIGNATURE_SET_CONFIG, \
@@ -1940,9 +1961,9 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
case ACB_ADAPTER_TYPE_B: {
unsigned long post_queue_phyaddr;
- uint32_t *rwbuffer;
+ uint32_t __iomem *rwbuffer;
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
uint32_t intmask_org;
intmask_org = arcmsr_disable_outbound_ints(acb);
reg->postq_index = 0;
@@ -1994,7 +2015,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
do {
firmware_state = readl(&reg->outbound_msgaddr1);
} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
@@ -2002,7 +2023,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_B: {
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
do {
firmware_state = readl(reg->iop2drv_doorbell_reg);
} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
@@ -2013,7 +2034,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
{
- struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
acb->acb_flags |= ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
if (arcmsr_hba_wait_msgint_ready(acb)) {
@@ -2024,7 +2045,7 @@ static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
{
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
acb->acb_flags |= ACB_F_MSG_START_BGRB;
writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg);
if (arcmsr_hbb_wait_msgint_ready(acb)) {
@@ -2049,7 +2070,7 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
- struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu;
+ struct MessageUnit_A __iomem *reg = acb->pmuA;
uint32_t outbound_doorbell;
/* empty doorbell Qbuffer if door bell ringed */
outbound_doorbell = readl(&reg->outbound_doorbell);
@@ -2060,7 +2081,7 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
break;
case ACB_ADAPTER_TYPE_B: {
- struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ struct MessageUnit_B *reg = acb->pmuB;
/*clear interrupt and message state*/
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index b5fa4f0..f1871ea 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1652,6 +1652,7 @@ sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
schp->buffer = kzalloc(sg_bufflen, gfp_flags);
if (!schp->buffer)
return -ENOMEM;
+ sg_init_table(schp->buffer, tablesize);
schp->sglist_len = sg_bufflen;
return tablesize; /* number of scat_gath elements allocated */
}
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 5afcb2f..d8b6600 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -345,7 +345,7 @@ static int serial_probe(struct pcmcia_device *link)
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
- link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->irq.IRQInfo1 = IRQ_LEVEL_ID;
link->conf.Attributes = CONF_ENABLE_IRQ;
if (do_sound) {
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index c55459c..b3518ca 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -184,14 +184,14 @@ static int spidev_message(struct spidev_data *spidev,
if (u_tmp->rx_buf) {
k_tmp->rx_buf = buf;
if (!access_ok(VERIFY_WRITE, (u8 __user *)
- (ptrdiff_t) u_tmp->rx_buf,
+ (uintptr_t) u_tmp->rx_buf,
u_tmp->len))
goto done;
}
if (u_tmp->tx_buf) {
k_tmp->tx_buf = buf;
if (copy_from_user(buf, (const u8 __user *)
- (ptrdiff_t) u_tmp->tx_buf,
+ (uintptr_t) u_tmp->tx_buf,
u_tmp->len))
goto done;
}
@@ -224,7 +224,7 @@ static int spidev_message(struct spidev_data *spidev,
for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
if (u_tmp->rx_buf) {
if (__copy_to_user((u8 __user *)
- (ptrdiff_t) u_tmp->rx_buf, buf,
+ (uintptr_t) u_tmp->rx_buf, buf,
u_tmp->len)) {
status = -EFAULT;
goto done;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 63c95af..d80baaa 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -358,7 +358,8 @@ static cputime_t task_utime(struct task_struct *p)
}
utime = (clock_t)temp;
- return clock_t_to_cputime(utime);
+ p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
+ return p->prev_utime;
}
static cputime_t task_stime(struct task_struct *p)
diff --git a/include/asm-um/unistd.h b/include/asm-um/unistd.h
index 732c83f..38bd9d9 100644
--- a/include/asm-um/unistd.h
+++ b/include/asm-um/unistd.h
@@ -14,7 +14,6 @@ extern int um_execve(const char *file, char *const argv[], char *const env[]);
#ifdef __KERNEL__
/* We get __ARCH_WANT_OLD_STAT and __ARCH_WANT_STAT64 from the base arch */
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 8263a7b..128dc7a 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -180,6 +180,7 @@ enum {
ATA_CMD_VERIFY_EXT = 0x42,
ATA_CMD_STANDBYNOW1 = 0xE0,
ATA_CMD_IDLEIMMEDIATE = 0xE1,
+ ATA_CMD_SLEEP = 0xE6,
ATA_CMD_INIT_DEV_PARAMS = 0x91,
ATA_CMD_READ_NATIVE_MAX = 0xF8,
ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
@@ -235,6 +236,7 @@ enum {
/* SETFEATURE Sector counts for SATA features */
SATA_AN = 0x05, /* Asynchronous Notification */
+ SATA_DIPM = 0x03, /* Device Initiated Power Management */
/* ATAPI stuff */
ATAPI_PKT_DMA = (1 << 0),
@@ -377,6 +379,26 @@ struct ata_taskfile {
#define ata_id_cdb_intr(id) (((id)[0] & 0x60) == 0x20)
+static inline bool ata_id_has_hipm(const u16 *id)
+{
+ u16 val = id[76];
+
+ if (val == 0 || val == 0xffff)
+ return false;
+
+ return val & (1 << 9);
+}
+
+static inline bool ata_id_has_dipm(const u16 *id)
+{
+ u16 val = id[78];
+
+ if (val == 0 || val == 0xffff)
+ return false;
+
+ return val & (1 << 3);
+}
+
static inline int ata_id_has_fua(const u16 *id)
{
if ((id[84] & 0xC000) != 0x4000)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bbf906a..8396db2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -341,7 +341,6 @@ enum blk_queue_state {
struct blk_queue_tag {
struct request **tag_index; /* map of busy tags */
unsigned long *tag_map; /* bit map of free/busy tags */
- struct list_head busy_list; /* fifo list of busy tags */
int busy; /* current depth */
int max_depth; /* what we will send to device */
int real_max_depth; /* what the array can hold */
@@ -435,6 +434,7 @@ struct request_queue
unsigned int dma_alignment;
struct blk_queue_tag *queue_tags;
+ struct list_head tag_busy_list;
unsigned int nr_sorted;
unsigned int in_flight;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index c83534e..0365ec9 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -59,7 +59,6 @@ extern void *__alloc_bootmem_core(struct bootmem_data *bdata,
unsigned long align,
unsigned long goal,
unsigned long limit);
-extern void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size);
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
extern void reserve_bootmem(unsigned long addr, unsigned long size);
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index d2a96cb..cf79853 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -32,18 +32,13 @@
* On x86-64 make the 64bit structure have the same alignment as the
* 32bit structure. This makes 32bit emulation easier.
*
- * UML/x86_64 needs the same packing as x86_64 - UML + UML_X86 +
- * 64_BIT adds up to UML/x86_64.
+ * UML/x86_64 needs the same packing as x86_64
*/
#ifdef __x86_64__
#define EPOLL_PACKED __attribute__((packed))
#else
-#if defined(CONFIG_UML) && defined(CONFIG_UML_X86) && defined(CONFIG_64BIT)
-#define EPOLL_PACKED __attribute__((packed))
-#else
#define EPOLL_PACKED
#endif
-#endif
struct epoll_event {
__u32 events;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6fd24e0..147ccc4 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -133,11 +133,14 @@ enum {
ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */
ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
ATA_DFLAG_AN = (1 << 7), /* AN configured */
+ ATA_DFLAG_HIPM = (1 << 8), /* device supports HIPM */
+ ATA_DFLAG_DIPM = (1 << 9), /* device supports DIPM */
ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
ATA_DFLAG_SPUNDOWN = (1 << 14), /* XXX: for spindown_compat */
+ ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
ATA_DFLAG_DETACH = (1 << 16),
@@ -185,6 +188,7 @@ enum {
ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */
ATA_FLAG_AN = (1 << 18), /* controller supports AN */
ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
+ ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */
/* The following flag belongs to ap->pflags but is kept in
* ap->flags because it's referenced in many LLDs and will be
@@ -234,6 +238,13 @@ enum {
ATA_TMOUT_INTERNAL = 30 * HZ,
ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
+ /* FIXME: GoVault needs 2s but we can't afford that without
+ * parallel probing. 800ms is enough for iVDR disk
+ * HHD424020F7SV00. Increase to 2secs when parallel probing
+ * is in place.
+ */
+ ATA_TMOUT_FF_WAIT = 4 * HZ / 5,
+
/* ATA bus states */
BUS_UNKNOWN = 0,
BUS_DMA = 1,
@@ -294,6 +305,7 @@ enum {
ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
ATA_EHI_QUIET = (1 << 3), /* be quiet */
+ ATA_EHI_LPM = (1 << 4), /* link power management action */
ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
@@ -325,6 +337,7 @@ enum {
ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */
ATA_HORKAGE_SKIP_PM = (1 << 5), /* Skip PM operations */
ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */
+ ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -370,6 +383,18 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
unsigned long deadline);
typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
+/*
+ * host pm policy: If you alter this, you also need to alter libata-scsi.c
+ * (for the ascii descriptions)
+ */
+enum link_pm {
+ NOT_AVAILABLE,
+ MIN_POWER,
+ MAX_PERFORMANCE,
+ MEDIUM_POWER,
+};
+extern struct class_device_attribute class_device_attr_link_power_management_policy;
+
struct ata_ioports {
void __iomem *cmd_addr;
void __iomem *data_addr;
@@ -616,6 +641,7 @@ struct ata_port {
pm_message_t pm_mesg;
int *pm_result;
+ enum link_pm pm_policy;
struct timer_list fastdrain_timer;
unsigned long fastdrain_cnt;
@@ -683,7 +709,8 @@ struct ata_port_operations {
int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
int (*port_resume) (struct ata_port *ap);
-
+ int (*enable_pm) (struct ata_port *ap, enum link_pm policy);
+ void (*disable_pm) (struct ata_port *ap);
int (*port_start) (struct ata_port *ap);
void (*port_stop) (struct ata_port *ap);
@@ -799,6 +826,7 @@ extern void ata_host_resume(struct ata_host *host);
extern int ata_ratelimit(void);
extern int ata_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat, unsigned long timeout);
+extern void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline);
extern int ata_wait_ready(struct ata_port *ap, unsigned long deadline);
extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn,
void *data, unsigned long delay);
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 4571231..32326c2 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -150,7 +150,7 @@ static inline struct scatterlist *sg_last(struct scatterlist *sgl,
struct scatterlist *ret = &sgl[nents - 1];
#else
struct scatterlist *sg, *ret = NULL;
- int i;
+ unsigned int i;
for_each_sg(sgl, sg, nents, i)
ret = sg;
@@ -179,7 +179,11 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
#ifndef ARCH_HAS_SG_CHAIN
BUG();
#endif
- prv[prv_nents - 1].page_link = (unsigned long) sgl | 0x01;
+ /*
+ * Set lowest bit to indicate a link pointer, and make sure to clear
+ * the termination bit if it happens to be set.
+ */
+ prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
}
/**
@@ -239,7 +243,7 @@ static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
sg_mark_end(sgl, nents);
#ifdef CONFIG_DEBUG_SG
{
- int i;
+ unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3c07d59..b0b1fe6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1009,6 +1009,7 @@ struct task_struct {
unsigned int rt_priority;
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
+ cputime_t prev_utime;
unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time; /* monotonic time */
struct timespec real_start_time; /* boot based time */
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 0013a0d..87b895d 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -41,17 +41,17 @@
#define _LINUX_SUNRPC_RPC_RDMA_H
struct rpcrdma_segment {
- uint32_t rs_handle; /* Registered memory handle */
- uint32_t rs_length; /* Length of the chunk in bytes */
- uint64_t rs_offset; /* Chunk virtual address or offset */
+ __be32 rs_handle; /* Registered memory handle */
+ __be32 rs_length; /* Length of the chunk in bytes */
+ __be64 rs_offset; /* Chunk virtual address or offset */
};
/*
* read chunk(s), encoded as a linked list.
*/
struct rpcrdma_read_chunk {
- uint32_t rc_discrim; /* 1 indicates presence */
- uint32_t rc_position; /* Position in XDR stream */
+ __be32 rc_discrim; /* 1 indicates presence */
+ __be32 rc_position; /* Position in XDR stream */
struct rpcrdma_segment rc_target;
};
@@ -66,29 +66,29 @@ struct rpcrdma_write_chunk {
* write chunk(s), encoded as a counted array.
*/
struct rpcrdma_write_array {
- uint32_t wc_discrim; /* 1 indicates presence */
- uint32_t wc_nchunks; /* Array count */
+ __be32 wc_discrim; /* 1 indicates presence */
+ __be32 wc_nchunks; /* Array count */
struct rpcrdma_write_chunk wc_array[0];
};
struct rpcrdma_msg {
- uint32_t rm_xid; /* Mirrors the RPC header xid */
- uint32_t rm_vers; /* Version of this protocol */
- uint32_t rm_credit; /* Buffers requested/granted */
- uint32_t rm_type; /* Type of message (enum rpcrdma_proc) */
+ __be32 rm_xid; /* Mirrors the RPC header xid */
+ __be32 rm_vers; /* Version of this protocol */
+ __be32 rm_credit; /* Buffers requested/granted */
+ __be32 rm_type; /* Type of message (enum rpcrdma_proc) */
union {
struct { /* no chunks */
- uint32_t rm_empty[3]; /* 3 empty chunk lists */
+ __be32 rm_empty[3]; /* 3 empty chunk lists */
} rm_nochunks;
struct { /* no chunks and padded */
- uint32_t rm_align; /* Padding alignment */
- uint32_t rm_thresh; /* Padding threshold */
- uint32_t rm_pempty[3]; /* 3 empty chunk lists */
+ __be32 rm_align; /* Padding alignment */
+ __be32 rm_thresh; /* Padding threshold */
+ __be32 rm_pempty[3]; /* 3 empty chunk lists */
} rm_padded;
- uint32_t rm_chunks[0]; /* read, write and reply chunks */
+ __be32 rm_chunks[0]; /* read, write and reply chunks */
} rm_body;
};
diff --git a/include/linux/types.h b/include/linux/types.h
index 4f0dad2..f4f8d19 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -37,6 +37,8 @@ typedef __kernel_gid32_t gid_t;
typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;
+typedef unsigned long uintptr_t;
+
#ifdef CONFIG_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
index 9e8f13b..5db261a 100644
--- a/include/net/sctp/auth.h
+++ b/include/net/sctp/auth.h
@@ -103,7 +103,7 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc);
void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
struct sctp_hmac_algo_param *hmacs);
int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
- __u16 hmac_id);
+ __be16 hmac_id);
int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc);
int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc);
void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
diff --git a/kernel/fork.c b/kernel/fork.c
index ddafdfa..a65bfc4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1056,6 +1056,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->gtime = cputime_zero;
p->utimescaled = cputime_zero;
p->stimescaled = cputime_zero;
+ p->prev_utime = cputime_zero;
#ifdef CONFIG_TASK_XACCT
p->rchar = 0; /* I/O counter: bytes read */
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index b6d2ff7..22a2514 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -602,7 +602,7 @@ static int hrtimer_switch_to_hres(void)
/* "Retrigger" the interrupt to get things going */
retrigger_next_event(NULL);
local_irq_restore(flags);
- printk(KERN_INFO "Switched to high resolution mode on CPU %d\n",
+ printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
smp_processor_id());
return 1;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index b4fbbc4..3f6bd11 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -172,6 +172,7 @@ struct task_group {
unsigned long shares;
/* spinlock to serialize modification to shares */
spinlock_t lock;
+ struct rcu_head rcu;
};
/* Default task group's sched entity on each cpu */
@@ -258,7 +259,6 @@ struct cfs_rq {
*/
struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
struct task_group *tg; /* group that "owns" this runqueue */
- struct rcu_head rcu;
#endif
};
@@ -3355,7 +3355,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in virtual machine since the last update
*/
-void account_guest_time(struct task_struct *p, cputime_t cputime)
+static void account_guest_time(struct task_struct *p, cputime_t cputime)
{
cputime64_t tmp;
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
@@ -5365,7 +5365,7 @@ static struct ctl_table sd_ctl_dir[] = {
.procname = "sched_domain",
.mode = 0555,
},
- {0,},
+ {0, },
};
static struct ctl_table sd_ctl_root[] = {
@@ -5375,7 +5375,7 @@ static struct ctl_table sd_ctl_root[] = {
.mode = 0555,
.child = sd_ctl_dir,
},
- {0,},
+ {0, },
};
static struct ctl_table *sd_alloc_ctl_entry(int n)
@@ -7019,8 +7019,8 @@ err:
/* rcu callback to free various structures associated with a task group */
static void free_sched_group(struct rcu_head *rhp)
{
- struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu);
- struct task_group *tg = cfs_rq->tg;
+ struct task_group *tg = container_of(rhp, struct task_group, rcu);
+ struct cfs_rq *cfs_rq;
struct sched_entity *se;
int i;
@@ -7041,7 +7041,7 @@ static void free_sched_group(struct rcu_head *rhp)
/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
- struct cfs_rq *cfs_rq;
+ struct cfs_rq *cfs_rq = NULL;
int i;
for_each_possible_cpu(i) {
@@ -7049,10 +7049,10 @@ void sched_destroy_group(struct task_group *tg)
list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
}
- cfs_rq = tg->cfs_rq[0];
+ BUG_ON(!cfs_rq);
/* wait for possible concurrent references to cfs_rqs complete */
- call_rcu(&cfs_rq->rcu, free_sched_group);
+ call_rcu(&tg->rcu, free_sched_group);
}
/* change task's runqueue when it moves between groups.
@@ -7211,25 +7211,53 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
return (u64) tg->shares;
}
-static struct cftype cpu_shares = {
- .name = "shares",
- .read_uint = cpu_shares_read_uint,
- .write_uint = cpu_shares_write_uint,
+static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft)
+{
+ struct task_group *tg = cgroup_tg(cgrp);
+ unsigned long flags;
+ u64 res = 0;
+ int i;
+
+ for_each_possible_cpu(i) {
+ /*
+ * Lock to prevent races with updating 64-bit counters
+ * on 32-bit arches.
+ */
+ spin_lock_irqsave(&cpu_rq(i)->lock, flags);
+ res += tg->se[i]->sum_exec_runtime;
+ spin_unlock_irqrestore(&cpu_rq(i)->lock, flags);
+ }
+ /* Convert from ns to ms */
+ do_div(res, 1000000);
+
+ return res;
+}
+
+static struct cftype cpu_files[] = {
+ {
+ .name = "shares",
+ .read_uint = cpu_shares_read_uint,
+ .write_uint = cpu_shares_write_uint,
+ },
+ {
+ .name = "usage",
+ .read_uint = cpu_usage_read,
+ },
};
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
{
- return cgroup_add_file(cont, ss, &cpu_shares);
+ return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
}
struct cgroup_subsys cpu_cgroup_subsys = {
- .name = "cpu",
- .create = cpu_cgroup_create,
- .destroy = cpu_cgroup_destroy,
- .can_attach = cpu_cgroup_can_attach,
- .attach = cpu_cgroup_attach,
- .populate = cpu_cgroup_populate,
- .subsys_id = cpu_cgroup_subsys_id,
+ .name = "cpu",
+ .create = cpu_cgroup_create,
+ .destroy = cpu_cgroup_destroy,
+ .can_attach = cpu_cgroup_can_attach,
+ .attach = cpu_cgroup_attach,
+ .populate = cpu_cgroup_populate,
+ .subsys_id = cpu_cgroup_subsys_id,
.early_init = 1,
};
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9971831..01859f6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1025,7 +1025,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
}
}
-#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
+#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
/*
* Share the fairness runtime between parent and child, thus the
diff --git a/kernel/signal.c b/kernel/signal.c
index 1200630..4537bdd 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -732,7 +732,7 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
printk("%s/%d: potentially unexpected fatal signal %d.\n",
current->comm, task_pid_nr(current), signr);
-#ifdef __i386__
+#if defined(__i386__) && !defined(__arch_um__)
printk("code at %08lx: ", regs->eip);
{
int i;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 10a1347..5997456 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -320,8 +320,6 @@ ktime_t tick_nohz_get_sleep_length(void)
return ts->sleep_length;
}
-EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length);
-
/**
* nohz_restart_sched_tick - restart the idle tick from the idle task
*
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index fdb2e03..12c5f4c 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -129,7 +129,8 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
- SEQ_printf(m, "\ncpu: %d\n", cpu);
+ SEQ_printf(m, "\n");
+ SEQ_printf(m, "cpu: %d\n", cpu);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
SEQ_printf(m, " clock %d:\n", i);
print_base(m, cpu_base->clock_base + i, now);
@@ -184,7 +185,8 @@ print_tickdevice(struct seq_file *m, struct tick_device *td)
{
struct clock_event_device *dev = td->evtdev;
- SEQ_printf(m, "\nTick Device: mode: %d\n", td->mode);
+ SEQ_printf(m, "\n");
+ SEQ_printf(m, "Tick Device: mode: %d\n", td->mode);
SEQ_printf(m, "Clock Event Device: ");
if (!dev) {
diff --git a/mm/filemap.c b/mm/filemap.c
index 5209e47..7c86436 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -28,6 +28,7 @@
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
+#include <linux/backing-dev.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/cpuset.h>
diff --git a/mm/nommu.c b/mm/nommu.c
index 8f09333..35622c5 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -12,6 +12,7 @@
* Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
*/
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/swap.h>
diff --git a/mm/slub.c b/mm/slub.c
index aac1dd3..bcdb2c8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2734,7 +2734,7 @@ static void slab_mem_offline_callback(void *arg)
* and offline_pages() function shoudn't call this
* callback. So, we must fail.
*/
- BUG_ON(atomic_read(&n->nr_slabs));
+ BUG_ON(atomic_long_read(&n->nr_slabs));
s->node[offline_node] = NULL;
kmem_cache_free(kmalloc_caches, n);
diff --git a/mm/sparse.c b/mm/sparse.c
index 08fb14f..e06f514 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -220,12 +220,6 @@ static int __meminit sparse_init_one_section(struct mem_section *ms,
return 1;
}
-__attribute__((weak)) __init
-void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
-{
- return NULL;
-}
-
static unsigned long usemap_size(void)
{
unsigned long size_bytes;
@@ -267,11 +261,6 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
if (map)
return map;
- map = alloc_bootmem_high_node(NODE_DATA(nid),
- sizeof(struct page) * PAGES_PER_SECTION);
- if (map)
- return map;
-
map = alloc_bootmem_node(NODE_DATA(nid),
sizeof(struct page) * PAGES_PER_SECTION);
return map;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 8af1004..6d5fa6b 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -556,7 +556,7 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
return &sctp_hmac_list[id];
}
-static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id)
+static int __sctp_auth_find_hmacid(__be16 *hmacs, int n_elts, __be16 hmac_id)
{
int found = 0;
int i;
@@ -573,7 +573,7 @@ static int __sctp_auth_find_hmacid(__u16 *hmacs, int n_elts, __u16 hmac_id)
/* See if the HMAC_ID is one that we claim as supported */
int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
- __u16 hmac_id)
+ __be16 hmac_id)
{
struct sctp_hmac_algo_param *hmacs;
__u16 n_elt;
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 12db635..f877b88 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -181,7 +181,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
struct rpcrdma_read_chunk *cur_rchunk = NULL;
struct rpcrdma_write_array *warray = NULL;
struct rpcrdma_write_chunk *cur_wchunk = NULL;
- u32 *iptr = headerp->rm_body.rm_chunks;
+ __be32 *iptr = headerp->rm_body.rm_chunks;
if (type == rpcrdma_readch || type == rpcrdma_areadch) {
/* a read chunk - server will RDMA Read our memory */
@@ -217,7 +217,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
xdr_encode_hyper(
- (u32 *)&cur_rchunk->rc_target.rs_offset,
+ (__be32 *)&cur_rchunk->rc_target.rs_offset,
seg->mr_base);
dprintk("RPC: %s: read chunk "
"elem %d@0x%llx:0x%x pos %d (%s)\n", __func__,
@@ -229,7 +229,7 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
xdr_encode_hyper(
- (u32 *)&cur_wchunk->wc_target.rs_offset,
+ (__be32 *)&cur_wchunk->wc_target.rs_offset,
seg->mr_base);
dprintk("RPC: %s: %s chunk "
"elem %d@0x%llx:0x%x (%s)\n", __func__,
@@ -257,14 +257,14 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
* finish off header. If write, marshal discrim and nchunks.
*/
if (cur_rchunk) {
- iptr = (u32 *) cur_rchunk;
+ iptr = (__be32 *) cur_rchunk;
*iptr++ = xdr_zero; /* finish the read chunk list */
*iptr++ = xdr_zero; /* encode a NULL write chunk list */
*iptr++ = xdr_zero; /* encode a NULL reply chunk */
} else {
warray->wc_discrim = xdr_one;
warray->wc_nchunks = htonl(nchunks);
- iptr = (u32 *) cur_wchunk;
+ iptr = (__be32 *) cur_wchunk;
if (type == rpcrdma_writech) {
*iptr++ = xdr_zero; /* finish the write chunk list */
*iptr++ = xdr_zero; /* encode a NULL reply chunk */
@@ -559,7 +559,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
* RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
*/
static int
-rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
+rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, __be32 **iptrp)
{
unsigned int i, total_len;
struct rpcrdma_write_chunk *cur_wchunk;
@@ -573,7 +573,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
ifdebug(FACILITY) {
u64 off;
- xdr_decode_hyper((u32 *)&seg->rs_offset, &off);
+ xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
__func__,
ntohl(seg->rs_length),
@@ -585,7 +585,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
}
/* check and adjust for properly terminated write chunk */
if (wrchunk) {
- u32 *w = (u32 *) cur_wchunk;
+ __be32 *w = (__be32 *) cur_wchunk;
if (*w++ != xdr_zero)
return -1;
cur_wchunk = (struct rpcrdma_write_chunk *) w;
@@ -593,7 +593,7 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int max, int wrchunk, u32 **iptrp)
if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
return -1;
- *iptrp = (u32 *) cur_wchunk;
+ *iptrp = (__be32 *) cur_wchunk;
return total_len;
}
@@ -721,7 +721,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
struct rpc_rqst *rqst;
struct rpc_xprt *xprt = rep->rr_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
- u32 *iptr;
+ __be32 *iptr;
int i, rdmalen, status;
/* Check status. If bad, signal disconnect and return rep to pool */
@@ -801,7 +801,7 @@ repost:
r_xprt->rx_stats.total_rdma_reply += rdmalen;
} else {
/* else ordinary inline */
- iptr = (u32 *)((unsigned char *)headerp + 28);
+ iptr = (__be32 *)((unsigned char *)headerp + 28);
rep->rr_len -= 28; /*sizeof *headerp;*/
status = rep->rr_len;
}
@@ -816,7 +816,7 @@ repost:
headerp->rm_body.rm_chunks[2] != xdr_one ||
req->rl_nchunks == 0)
goto badheader;
- iptr = (u32 *)((unsigned char *)headerp + 28);
+ iptr = (__be32 *)((unsigned char *)headerp + 28);
rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
if (rdmalen < 0)
goto badheader;
OpenPOWER on IntegriCloud