summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-08-29 17:55:59 -0400
committerJeff Garzik <jeff@garzik.org>2006-08-29 17:55:59 -0400
commitb01e86fee6c821e4e003fd4e9f65453ac478a58e (patch)
tree21695e10cbe7001d2ccc8c87cee5e7a7865b1025 /drivers
parente889173c2c67dc288e9b050ab066cfae151b047e (diff)
parent60d4684068ff1eec78f55b5888d0bd2d4cca1520 (diff)
downloadop-kernel-dev-b01e86fee6c821e4e003fd4e9f65453ac478a58e.zip
op-kernel-dev-b01e86fee6c821e4e003fd4e9f65453ac478a58e.tar.gz
Merge /spare/repo/linux-2.6 into upstream
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/acpi_memhotplug.c8
-rw-r--r--drivers/acpi/battery.c3
-rw-r--r--drivers/acpi/bus.c11
-rw-r--r--drivers/acpi/hotkey.c281
-rw-r--r--drivers/acpi/i2c_ec.c2
-rw-r--r--drivers/acpi/osl.c10
-rw-r--r--drivers/acpi/sbs.c3
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/acpi/utils.c2
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/cdrom/gscd.c2
-rw-r--r--drivers/char/moxa.c8
-rw-r--r--drivers/char/tty_io.c808
-rw-r--r--drivers/char/tty_ioctl.c59
-rw-r--r--drivers/char/vt_ioctl.c2
-rw-r--r--drivers/hwmon/abituguru.c99
-rw-r--r--drivers/i2c/chips/tps65010.c12
-rw-r--r--drivers/ieee1394/ohci1394.c4
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/sa_query.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c54
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c22
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/misc/wistron_btns.c16
-rw-r--r--drivers/input/mouse/psmouse-base.c7
-rw-r--r--drivers/md/dm-raid1.c4
-rw-r--r--drivers/md/md.c13
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/message/fusion/mptbase.h1
-rw-r--r--drivers/message/fusion/mptfc.c92
-rw-r--r--drivers/mtd/nand/ams-delta.c10
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/net/3c515.c3
-rw-r--r--drivers/net/82596.c9
-rw-r--r--drivers/net/Kconfig41
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/ac3200.c3
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/dm9000.c14
-rw-r--r--drivers/net/e1000/e1000_hw.c89
-rw-r--r--drivers/net/e1000/e1000_hw.h32
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/e2100.c4
-rw-r--r--drivers/net/eepro.c3
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/es3210.c3
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/fealnx.c2
-rw-r--r--drivers/net/fs_enet/Makefile6
-rw-r--r--drivers/net/fs_enet/fec.h42
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c207
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c505
-rw-r--r--drivers/net/fs_enet/fs_enet.h40
-rw-r--r--drivers/net/fs_enet/mac-fcc.c32
-rw-r--r--drivers/net/fs_enet/mac-fec.c142
-rw-r--r--drivers/net/fs_enet/mac-scc.c4
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c448
-rw-r--r--drivers/net/fs_enet/mii-fec.c243
-rw-r--r--drivers/net/fs_enet/mii-fixed.c91
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c18
-rw-r--r--drivers/net/pcnet32.c25
-rw-r--r--drivers/net/phy/Kconfig17
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/fixed.c358
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/phy_device.c51
-rw-r--r--drivers/net/s2io.c1
-rw-r--r--drivers/net/seeq8005.c2
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sky2.c5
-rw-r--r--drivers/net/smc911x.c1
-rw-r--r--drivers/net/smc91x.c8
-rw-r--r--drivers/net/smc91x.h29
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/spider_net.h3
-rw-r--r--drivers/net/spider_net_ethtool.c13
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/tokenring/ibmtr.c4
-rw-r--r--drivers/net/tokenring/smctr.c5
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c3
-rw-r--r--drivers/net/ucc_geth.c4278
-rw-r--r--drivers/net/ucc_geth.h1339
-rw-r--r--drivers/net/ucc_geth_phy.c801
-rw-r--r--drivers/net/ucc_geth_phy.h217
-rw-r--r--drivers/net/via-rhine.c90
-rw-r--r--drivers/net/wan/c101.c9
-rw-r--r--drivers/net/wd.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c3
-rw-r--r--drivers/net/wireless/spectrum_cs.c2
-rw-r--r--drivers/pci/hotplug/Kconfig2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c54
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/quirks.c59
-rw-r--r--drivers/rtc/rtc-s3c.c124
-rw-r--r--drivers/s390/block/dasd_devmap.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c14
-rw-r--r--drivers/s390/net/qeth_main.c8
-rw-r--r--drivers/s390/scsi/zfcp_aux.c120
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c5
-rw-r--r--drivers/s390/scsi/zfcp_def.h15
-rw-r--r--drivers/s390/scsi/zfcp_erp.c212
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c122
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c79
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c73
-rw-r--r--drivers/scsi/esp.c3
-rw-r--r--drivers/scsi/hptiop.c568
-rw-r--r--drivers/scsi/ide-scsi.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c209
-rw-r--r--drivers/scsi/iscsi_tcp.h2
-rw-r--r--drivers/scsi/libiscsi.c214
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c101
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c13
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/mega_common.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_ioctl.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c42
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h4
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c15
-rw-r--r--drivers/scsi/sg.c8
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/serial/sunsab.c9
-rw-r--r--drivers/serial/sunzilog.c3
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c2
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/video/imacfb.c4
-rw-r--r--drivers/video/matrox/g450_pll.c8
158 files changed, 10347 insertions, 2921 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 96309b9..11abc7b 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -285,6 +285,8 @@ static int __init acpi_ac_init(void)
{
int result;
+ if (acpi_disabled)
+ return -ENODEV;
acpi_ac_dir = acpi_lock_ac_dir();
if (!acpi_ac_dir)
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index b0d4b14..1dda370 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -484,10 +484,8 @@ acpi_memory_register_notify_handler(acpi_handle handle,
status = is_memory_device(handle);
- if (ACPI_FAILURE(status)){
- ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device"));
+ if (ACPI_FAILURE(status))
return AE_OK; /* continue */
- }
status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
acpi_memory_device_notify, NULL);
@@ -503,10 +501,8 @@ acpi_memory_deregister_notify_handler(acpi_handle handle,
status = is_memory_device(handle);
- if (ACPI_FAILURE(status)){
- ACPI_EXCEPTION((AE_INFO, status, "handle is no memory device"));
+ if (ACPI_FAILURE(status))
return AE_OK; /* continue */
- }
status = acpi_remove_notify_handler(handle,
ACPI_SYSTEM_NOTIFY,
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6e52217..9810e2a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -757,6 +757,9 @@ static int __init acpi_battery_init(void)
{
int result;
+ if (acpi_disabled)
+ return -ENODEV;
+
acpi_battery_dir = acpi_lock_battery_dir();
if (!acpi_battery_dir)
return -ENODEV;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index b297769..279c4ba 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/pm.h>
@@ -68,7 +69,8 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
if (ACPI_FAILURE(status) || !*device) {
- ACPI_EXCEPTION((AE_INFO, status, "No context for object [%p]", handle));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
+ handle));
return -ENODEV;
}
@@ -192,7 +194,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
/* Make sure this is a valid target state */
if (!device->flags.power_manageable) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n",
device->kobj.name));
return -ENODEV;
}
@@ -738,7 +740,10 @@ static int __init acpi_init(void)
return -ENODEV;
}
- firmware_register(&acpi_subsys);
+ result = firmware_register(&acpi_subsys);
+ if (result < 0)
+ printk(KERN_WARNING "%s: firmware_register error: %d\n",
+ __FUNCTION__, result);
result = acpi_bus_init();
diff --git a/drivers/acpi/hotkey.c b/drivers/acpi/hotkey.c
index 32c9d88..1ba2db6 100644
--- a/drivers/acpi/hotkey.c
+++ b/drivers/acpi/hotkey.c
@@ -91,6 +91,14 @@ enum {
HK_EVENT_ENTERRING_S5,
};
+enum conf_entry_enum {
+ bus_handle = 0,
+ bus_method = 1,
+ action_handle = 2,
+ method = 3,
+ LAST_CONF_ENTRY
+};
+
/* procdir we use */
static struct proc_dir_entry *hotkey_proc_dir;
static struct proc_dir_entry *hotkey_config;
@@ -244,19 +252,15 @@ static int hotkey_info_open_fs(struct inode *inode, struct file *file)
static char *format_result(union acpi_object *object)
{
- char *buf = NULL;
-
- buf = (char *)kmalloc(RESULT_STR_LEN, GFP_KERNEL);
- if (buf)
- memset(buf, 0, RESULT_STR_LEN);
- else
- goto do_fail;
+ char *buf;
+ buf = kzalloc(RESULT_STR_LEN, GFP_KERNEL);
+ if (!buf)
+ return NULL;
/* Now, just support integer type */
if (object->type == ACPI_TYPE_INTEGER)
sprintf(buf, "%d\n", (u32) object->integer.value);
- do_fail:
- return (buf);
+ return buf;
}
static int hotkey_polling_seq_show(struct seq_file *seq, void *offset)
@@ -486,98 +490,102 @@ static void free_hotkey_device(union acpi_hotkey *key)
static void free_hotkey_buffer(union acpi_hotkey *key)
{
+ /* key would never be null, action method could be */
kfree(key->event_hotkey.action_method);
}
static void free_poll_hotkey_buffer(union acpi_hotkey *key)
{
+ /* key would never be null, others could be*/
kfree(key->poll_hotkey.action_method);
kfree(key->poll_hotkey.poll_method);
kfree(key->poll_hotkey.poll_result);
}
static int
-init_hotkey_device(union acpi_hotkey *key, char *bus_str, char *action_str,
- char *method, int std_num, int external_num)
+init_hotkey_device(union acpi_hotkey *key, char **config_entry,
+ int std_num, int external_num)
{
acpi_handle tmp_handle;
acpi_status status = AE_OK;
-
if (std_num < 0 || IS_POLL(std_num) || !key)
goto do_fail;
- if (!bus_str || !action_str || !method)
+ if (!config_entry[bus_handle] || !config_entry[action_handle]
+ || !config_entry[method])
goto do_fail;
key->link.hotkey_type = ACPI_HOTKEY_EVENT;
key->link.hotkey_standard_num = std_num;
key->event_hotkey.flag = 0;
- key->event_hotkey.action_method = method;
+ key->event_hotkey.action_method = config_entry[method];
- status =
- acpi_get_handle(NULL, bus_str, &(key->event_hotkey.bus_handle));
+ status = acpi_get_handle(NULL, config_entry[bus_handle],
+ &(key->event_hotkey.bus_handle));
if (ACPI_FAILURE(status))
- goto do_fail;
+ goto do_fail_zero;
key->event_hotkey.external_hotkey_num = external_num;
- status =
- acpi_get_handle(NULL, action_str,
+ status = acpi_get_handle(NULL, config_entry[action_handle],
&(key->event_hotkey.action_handle));
if (ACPI_FAILURE(status))
- goto do_fail;
+ goto do_fail_zero;
status = acpi_get_handle(key->event_hotkey.action_handle,
- method, &tmp_handle);
+ config_entry[method], &tmp_handle);
if (ACPI_FAILURE(status))
- goto do_fail;
+ goto do_fail_zero;
return AE_OK;
- do_fail:
+do_fail_zero:
+ key->event_hotkey.action_method = NULL;
+do_fail:
return -ENODEV;
}
static int
-init_poll_hotkey_device(union acpi_hotkey *key,
- char *poll_str,
- char *poll_method,
- char *action_str, char *action_method, int std_num)
+init_poll_hotkey_device(union acpi_hotkey *key, char **config_entry,
+ int std_num)
{
acpi_status status = AE_OK;
acpi_handle tmp_handle;
-
if (std_num < 0 || IS_EVENT(std_num) || !key)
goto do_fail;
-
- if (!poll_str || !poll_method || !action_str || !action_method)
+ if (!config_entry[bus_handle] ||!config_entry[bus_method] ||
+ !config_entry[action_handle] || !config_entry[method])
goto do_fail;
key->link.hotkey_type = ACPI_HOTKEY_POLLING;
key->link.hotkey_standard_num = std_num;
key->poll_hotkey.flag = 0;
- key->poll_hotkey.poll_method = poll_method;
- key->poll_hotkey.action_method = action_method;
+ key->poll_hotkey.poll_method = config_entry[bus_method];
+ key->poll_hotkey.action_method = config_entry[method];
- status =
- acpi_get_handle(NULL, poll_str, &(key->poll_hotkey.poll_handle));
+ status = acpi_get_handle(NULL, config_entry[bus_handle],
+ &(key->poll_hotkey.poll_handle));
if (ACPI_FAILURE(status))
- goto do_fail;
+ goto do_fail_zero;
status = acpi_get_handle(key->poll_hotkey.poll_handle,
- poll_method, &tmp_handle);
+ config_entry[bus_method], &tmp_handle);
if (ACPI_FAILURE(status))
- goto do_fail;
+ goto do_fail_zero;
status =
- acpi_get_handle(NULL, action_str,
+ acpi_get_handle(NULL, config_entry[action_handle],
&(key->poll_hotkey.action_handle));
if (ACPI_FAILURE(status))
- goto do_fail;
+ goto do_fail_zero;
status = acpi_get_handle(key->poll_hotkey.action_handle,
- action_method, &tmp_handle);
+ config_entry[method], &tmp_handle);
if (ACPI_FAILURE(status))
- goto do_fail;
+ goto do_fail_zero;
key->poll_hotkey.poll_result =
(union acpi_object *)kmalloc(sizeof(union acpi_object), GFP_KERNEL);
if (!key->poll_hotkey.poll_result)
- goto do_fail;
+ goto do_fail_zero;
return AE_OK;
- do_fail:
+
+do_fail_zero:
+ key->poll_hotkey.poll_method = NULL;
+ key->poll_hotkey.action_method = NULL;
+do_fail:
return -ENODEV;
}
@@ -652,17 +660,18 @@ static int hotkey_poll_config_seq_show(struct seq_file *seq, void *offset)
}
static int
-get_parms(char *config_record,
- int *cmd,
- char **bus_handle,
- char **bus_method,
- char **action_handle,
- char **method, int *internal_event_num, int *external_event_num)
+get_parms(char *config_record, int *cmd, char **config_entry,
+ int *internal_event_num, int *external_event_num)
{
+/* the format of *config_record =
+ * "1:\d+:*" : "cmd:internal_event_num"
+ * "\d+:\w+:\w+:\w+:\w+:\d+:\d+" :
+ * "cmd:bus_handle:bus_method:action_handle:method:internal_event_num:external_event_num"
+ */
char *tmp, *tmp1, count;
+ int i;
sscanf(config_record, "%d", cmd);
-
if (*cmd == 1) {
if (sscanf(config_record, "%d:%d", cmd, internal_event_num) !=
2)
@@ -674,59 +683,27 @@ get_parms(char *config_record,
if (!tmp)
goto do_fail;
tmp++;
- tmp1 = strchr(tmp, ':');
- if (!tmp1)
- goto do_fail;
-
- count = tmp1 - tmp;
- *bus_handle = (char *)kmalloc(count + 1, GFP_KERNEL);
- if (!*bus_handle)
- goto do_fail;
- strncpy(*bus_handle, tmp, count);
- *(*bus_handle + count) = 0;
-
- tmp = tmp1;
- tmp++;
- tmp1 = strchr(tmp, ':');
- if (!tmp1)
- goto do_fail;
- count = tmp1 - tmp;
- *bus_method = (char *)kmalloc(count + 1, GFP_KERNEL);
- if (!*bus_method)
- goto do_fail;
- strncpy(*bus_method, tmp, count);
- *(*bus_method + count) = 0;
-
- tmp = tmp1;
- tmp++;
- tmp1 = strchr(tmp, ':');
- if (!tmp1)
- goto do_fail;
- count = tmp1 - tmp;
- *action_handle = (char *)kmalloc(count + 1, GFP_KERNEL);
- if (!*action_handle)
- goto do_fail;
- strncpy(*action_handle, tmp, count);
- *(*action_handle + count) = 0;
-
- tmp = tmp1;
- tmp++;
- tmp1 = strchr(tmp, ':');
- if (!tmp1)
- goto do_fail;
- count = tmp1 - tmp;
- *method = (char *)kmalloc(count + 1, GFP_KERNEL);
- if (!*method)
- goto do_fail;
- strncpy(*method, tmp, count);
- *(*method + count) = 0;
-
- if (sscanf(tmp1 + 1, "%d:%d", internal_event_num, external_event_num) <=
- 0)
- goto do_fail;
-
- return 6;
- do_fail:
+ for (i = 0; i < LAST_CONF_ENTRY; i++) {
+ tmp1 = strchr(tmp, ':');
+ if (!tmp1) {
+ goto do_fail;
+ }
+ count = tmp1 - tmp;
+ config_entry[i] = kzalloc(count + 1, GFP_KERNEL);
+ if (!config_entry[i])
+ goto handle_failure;
+ strncpy(config_entry[i], tmp, count);
+ tmp = tmp1 + 1;
+ }
+ if (sscanf(tmp, "%d:%d", internal_event_num, external_event_num) <= 0)
+ goto handle_failure;
+ if (!IS_OTHERS(*internal_event_num)) {
+ return 6;
+ }
+handle_failure:
+ while (i-- > 0)
+ kfree(config_entry[i]);
+do_fail:
return -1;
}
@@ -736,50 +713,34 @@ static ssize_t hotkey_write_config(struct file *file,
size_t count, loff_t * data)
{
char *config_record = NULL;
- char *bus_handle = NULL;
- char *bus_method = NULL;
- char *action_handle = NULL;
- char *method = NULL;
+ char *config_entry[LAST_CONF_ENTRY];
int cmd, internal_event_num, external_event_num;
int ret = 0;
- union acpi_hotkey *key = NULL;
+ union acpi_hotkey *key = kzalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
- config_record = (char *)kmalloc(count + 1, GFP_KERNEL);
- if (!config_record)
+ config_record = kzalloc(count + 1, GFP_KERNEL);
+ if (!config_record) {
+ kfree(key);
return -ENOMEM;
+ }
if (copy_from_user(config_record, buffer, count)) {
kfree(config_record);
+ kfree(key);
printk(KERN_ERR PREFIX "Invalid data\n");
return -EINVAL;
}
- config_record[count] = 0;
-
- ret = get_parms(config_record,
- &cmd,
- &bus_handle,
- &bus_method,
- &action_handle,
- &method, &internal_event_num, &external_event_num);
-
+ ret = get_parms(config_record, &cmd, config_entry,
+ &internal_event_num, &external_event_num);
kfree(config_record);
- if (IS_OTHERS(internal_event_num))
- goto do_fail;
if (ret != 6) {
- do_fail:
- kfree(bus_handle);
- kfree(bus_method);
- kfree(action_handle);
- kfree(method);
printk(KERN_ERR PREFIX "Invalid data format ret=%d\n", ret);
return -EINVAL;
}
- key = kmalloc(sizeof(union acpi_hotkey), GFP_KERNEL);
- if (!key)
- goto do_fail;
- memset(key, 0, sizeof(union acpi_hotkey));
if (cmd == 1) {
union acpi_hotkey *tmp = NULL;
tmp = get_hotkey_by_event(&global_hotkey_list,
@@ -791,34 +752,19 @@ static ssize_t hotkey_write_config(struct file *file,
goto cont_cmd;
}
if (IS_EVENT(internal_event_num)) {
- kfree(bus_method);
- ret = init_hotkey_device(key, bus_handle, action_handle, method,
- internal_event_num,
- external_event_num);
- } else
- ret = init_poll_hotkey_device(key, bus_handle, bus_method,
- action_handle, method,
- internal_event_num);
- if (ret) {
- kfree(bus_handle);
- kfree(action_handle);
- if (IS_EVENT(internal_event_num))
- free_hotkey_buffer(key);
- else
- free_poll_hotkey_buffer(key);
- kfree(key);
- printk(KERN_ERR PREFIX "Invalid hotkey\n");
- return -EINVAL;
+ if (init_hotkey_device(key, config_entry,
+ internal_event_num, external_event_num))
+ goto init_hotkey_fail;
+ } else {
+ if (init_poll_hotkey_device(key, config_entry,
+ internal_event_num))
+ goto init_poll_hotkey_fail;
}
-
- cont_cmd:
- kfree(bus_handle);
- kfree(action_handle);
-
+cont_cmd:
switch (cmd) {
case 0:
- if (get_hotkey_by_event
- (&global_hotkey_list, key->link.hotkey_standard_num))
+ if (get_hotkey_by_event(&global_hotkey_list,
+ key->link.hotkey_standard_num))
goto fail_out;
else
hotkey_add(key);
@@ -827,6 +773,7 @@ static ssize_t hotkey_write_config(struct file *file,
hotkey_remove(key);
break;
case 2:
+ /* key is kfree()ed if matched*/
if (hotkey_update(key))
goto fail_out;
break;
@@ -835,11 +782,22 @@ static ssize_t hotkey_write_config(struct file *file,
break;
}
return count;
- fail_out:
- if (IS_EVENT(internal_event_num))
- free_hotkey_buffer(key);
- else
- free_poll_hotkey_buffer(key);
+
+init_poll_hotkey_fail: /* failed init_poll_hotkey_device */
+ kfree(config_entry[bus_method]);
+ config_entry[bus_method] = NULL;
+init_hotkey_fail: /* failed init_hotkey_device */
+ kfree(config_entry[method]);
+fail_out:
+ kfree(config_entry[bus_handle]);
+ kfree(config_entry[action_handle]);
+ /* No double free since elements =NULL for error cases */
+ if (IS_EVENT(internal_event_num)) {
+ if (config_entry[bus_method])
+ kfree(config_entry[bus_method]);
+ free_hotkey_buffer(key); /* frees [method] */
+ } else
+ free_poll_hotkey_buffer(key); /* frees [bus_method]+[method] */
kfree(key);
printk(KERN_ERR PREFIX "invalid key\n");
return -EINVAL;
@@ -923,10 +881,9 @@ static ssize_t hotkey_execute_aml_method(struct file *file,
union acpi_hotkey *key;
- arg = (char *)kmalloc(count + 1, GFP_KERNEL);
+ arg = kzalloc(count + 1, GFP_KERNEL);
if (!arg)
return -ENOMEM;
- arg[count] = 0;
if (copy_from_user(arg, buffer, count)) {
kfree(arg);
diff --git a/drivers/acpi/i2c_ec.c b/drivers/acpi/i2c_ec.c
index 84239d5..6809c28 100644
--- a/drivers/acpi/i2c_ec.c
+++ b/drivers/acpi/i2c_ec.c
@@ -330,7 +330,7 @@ static int acpi_ec_hc_add(struct acpi_device *device)
status = acpi_evaluate_integer(ec_hc->handle, "_EC", NULL, &val);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Error obtaining _EC\n"));
- kfree(ec_hc->smbus);
+ kfree(ec_hc);
kfree(smbus);
return -EIO;
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b7d1514..507f051 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -746,6 +746,16 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout));
+ /*
+ * This can be called during resume with interrupts off.
+ * Like boot-time, we should be single threaded and will
+ * always get the lock if we try -- timeout or not.
+ * If this doesn't succeed, then we will oops courtesy of
+ * might_sleep() in down().
+ */
+ if (!down_trylock(sem))
+ return AE_OK;
+
switch (timeout) {
/*
* No Wait:
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index db7b350..62bef0b 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -1714,6 +1714,9 @@ static int __init acpi_sbs_init(void)
{
int result = 0;
+ if (acpi_disabled)
+ return -ENODEV;
+
init_MUTEX(&sbs_sem);
if (capacity_mode != DEF_CAPACITY_UNIT
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5fcb50c..698a154 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -4,6 +4,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/acpi.h>
#include <acpi/acpi_drivers.h>
@@ -113,6 +114,8 @@ static struct kset acpi_namespace_kset = {
static void acpi_device_register(struct acpi_device *device,
struct acpi_device *parent)
{
+ int err;
+
/*
* Linkage
* -------
@@ -138,7 +141,10 @@ static void acpi_device_register(struct acpi_device *device,
device->kobj.parent = &parent->kobj;
device->kobj.ktype = &ktype_acpi_ns;
device->kobj.kset = &acpi_namespace_kset;
- kobject_register(&device->kobj);
+ err = kobject_register(&device->kobj);
+ if (err < 0)
+ printk(KERN_WARNING "%s: kobject_register error: %d\n",
+ __FUNCTION__, err);
create_sysfs_device_files(device);
}
@@ -1450,7 +1456,9 @@ static int __init acpi_scan_init(void)
if (acpi_disabled)
return 0;
- kset_register(&acpi_namespace_kset);
+ result = kset_register(&acpi_namespace_kset);
+ if (result < 0)
+ printk(KERN_ERR PREFIX "kset_register error: %d\n", result);
result = bus_register(&acpi_bus_type);
if (result) {
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index f48227f..d0d84c4 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -262,7 +262,7 @@ acpi_evaluate_integer(acpi_handle handle,
if (!data)
return AE_BAD_PARAMETER;
- element = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
+ element = kmalloc(sizeof(union acpi_object), irqs_disabled() ? GFP_ATOMIC: GFP_KERNEL);
if (!element)
return AE_NO_MEMORY;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index d7de1753..e9b0957 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -64,7 +64,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
"Node %d Mapped: %8lu kB\n"
"Node %d AnonPages: %8lu kB\n"
"Node %d PageTables: %8lu kB\n"
- "Node %d NFS Unstable: %8lu kB\n"
+ "Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
"Node %d Slab: %8lu kB\n",
nid, K(i.totalram),
diff --git a/drivers/cdrom/gscd.c b/drivers/cdrom/gscd.c
index b6ee50a..fa70824 100644
--- a/drivers/cdrom/gscd.c
+++ b/drivers/cdrom/gscd.c
@@ -266,7 +266,7 @@ repeat:
goto out;
if (req->cmd != READ) {
- printk("GSCD: bad cmd %lu\n", rq_data_dir(req));
+ printk("GSCD: bad cmd %u\n", rq_data_dir(req));
end_request(req, 0);
goto repeat;
}
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 4ea7bd5..a369dd6 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -142,6 +142,7 @@ typedef struct _moxa_board_conf {
static moxa_board_conf moxa_boards[MAX_BOARDS];
static void __iomem *moxaBaseAddr[MAX_BOARDS];
+static int loadstat[MAX_BOARDS];
struct moxa_str {
int type;
@@ -1688,6 +1689,8 @@ int MoxaDriverPoll(void)
if (moxaCard == 0)
return (-1);
for (card = 0; card < MAX_BOARDS; card++) {
+ if (loadstat[card] == 0)
+ continue;
if ((ports = moxa_boards[card].numPorts) == 0)
continue;
if (readb(moxaIntPend[card]) == 0xff) {
@@ -2903,6 +2906,7 @@ static int moxaloadcode(int cardno, unsigned char __user *tmp, int len)
}
break;
}
+ loadstat[cardno] = 1;
return (0);
}
@@ -2920,7 +2924,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
len1 = len >> 1;
ptr = (ushort *) moxaBuff;
for (i = 0; i < len1; i++)
- usum += *(ptr + i);
+ usum += le16_to_cpu(*(ptr + i));
retry = 0;
do {
len1 = len >> 1;
@@ -2992,7 +2996,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
wlen = len >> 1;
uptr = (ushort *) moxaBuff;
for (i = 0; i < wlen; i++)
- usum += uptr[i];
+ usum += le16_to_cpu(uptr[i]);
retry = 0;
j = 0;
do {
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index bfdb902..bb0d919 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -153,6 +153,15 @@ int tty_ioctl(struct inode * inode, struct file * file,
static int tty_fasync(int fd, struct file * filp, int on);
static void release_mem(struct tty_struct *tty, int idx);
+/**
+ * alloc_tty_struct - allocate a tty object
+ *
+ * Return a new empty tty structure. The data fields have not
+ * been initialized in any way but has been zeroed
+ *
+ * Locking: none
+ * FIXME: use kzalloc
+ */
static struct tty_struct *alloc_tty_struct(void)
{
@@ -166,6 +175,15 @@ static struct tty_struct *alloc_tty_struct(void)
static void tty_buffer_free_all(struct tty_struct *);
+/**
+ * free_tty_struct - free a disused tty
+ * @tty: tty struct to free
+ *
+ * Free the write buffers, tty queue and tty memory itself.
+ *
+ * Locking: none. Must be called after tty is definitely unused
+ */
+
static inline void free_tty_struct(struct tty_struct *tty)
{
kfree(tty->write_buf);
@@ -175,6 +193,17 @@ static inline void free_tty_struct(struct tty_struct *tty)
#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
+/**
+ * tty_name - return tty naming
+ * @tty: tty structure
+ * @buf: buffer for output
+ *
+ * Convert a tty structure into a name. The name reflects the kernel
+ * naming policy and if udev is in use may not reflect user space
+ *
+ * Locking: none
+ */
+
char *tty_name(struct tty_struct *tty, char *buf)
{
if (!tty) /* Hmm. NULL pointer. That's fun. */
@@ -235,6 +264,28 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
* Tty buffer allocation management
*/
+
+/**
+ * tty_buffer_free_all - free buffers used by a tty
+ * @tty: tty to free from
+ *
+ * Remove all the buffers pending on a tty whether queued with data
+ * or in the free ring. Must be called when the tty is no longer in use
+ *
+ * Locking: none
+ */
+
+
+/**
+ * tty_buffer_free_all - free buffers used by a tty
+ * @tty: tty to free from
+ *
+ * Remove all the buffers pending on a tty whether queued with data
+ * or in the free ring. Must be called when the tty is no longer in use
+ *
+ * Locking: none
+ */
+
static void tty_buffer_free_all(struct tty_struct *tty)
{
struct tty_buffer *thead;
@@ -247,19 +298,47 @@ static void tty_buffer_free_all(struct tty_struct *tty)
kfree(thead);
}
tty->buf.tail = NULL;
+ tty->buf.memory_used = 0;
}
+/**
+ * tty_buffer_init - prepare a tty buffer structure
+ * @tty: tty to initialise
+ *
+ * Set up the initial state of the buffer management for a tty device.
+ * Must be called before the other tty buffer functions are used.
+ *
+ * Locking: none
+ */
+
static void tty_buffer_init(struct tty_struct *tty)
{
spin_lock_init(&tty->buf.lock);
tty->buf.head = NULL;
tty->buf.tail = NULL;
tty->buf.free = NULL;
+ tty->buf.memory_used = 0;
}
-static struct tty_buffer *tty_buffer_alloc(size_t size)
+/**
+ * tty_buffer_alloc - allocate a tty buffer
+ * @tty: tty device
+ * @size: desired size (characters)
+ *
+ * Allocate a new tty buffer to hold the desired number of characters.
+ * Return NULL if out of memory or the allocation would exceed the
+ * per device queue
+ *
+ * Locking: Caller must hold tty->buf.lock
+ */
+
+static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size)
{
- struct tty_buffer *p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
+ struct tty_buffer *p;
+
+ if (tty->buf.memory_used + size > 65536)
+ return NULL;
+ p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
if(p == NULL)
return NULL;
p->used = 0;
@@ -269,17 +348,27 @@ static struct tty_buffer *tty_buffer_alloc(size_t size)
p->read = 0;
p->char_buf_ptr = (char *)(p->data);
p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
-/* printk("Flip create %p\n", p); */
+ tty->buf.memory_used += size;
return p;
}
-/* Must be called with the tty_read lock held. This needs to acquire strategy
- code to decide if we should kfree or relink a given expired buffer */
+/**
+ * tty_buffer_free - free a tty buffer
+ * @tty: tty owning the buffer
+ * @b: the buffer to free
+ *
+ * Free a tty buffer, or add it to the free list according to our
+ * internal strategy
+ *
+ * Locking: Caller must hold tty->buf.lock
+ */
static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
{
/* Dumb strategy for now - should keep some stats */
-/* printk("Flip dispose %p\n", b); */
+ tty->buf.memory_used -= b->size;
+ WARN_ON(tty->buf.memory_used < 0);
+
if(b->size >= 512)
kfree(b);
else {
@@ -288,6 +377,18 @@ static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b)
}
}
+/**
+ * tty_buffer_find - find a free tty buffer
+ * @tty: tty owning the buffer
+ * @size: characters wanted
+ *
+ * Locate an existing suitable tty buffer or if we are lacking one then
+ * allocate a new one. We round our buffers off in 256 character chunks
+ * to get better allocation behaviour.
+ *
+ * Locking: Caller must hold tty->buf.lock
+ */
+
static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
{
struct tty_buffer **tbh = &tty->buf.free;
@@ -299,20 +400,28 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
t->used = 0;
t->commit = 0;
t->read = 0;
- /* DEBUG ONLY */
-/* memset(t->data, '*', size); */
-/* printk("Flip recycle %p\n", t); */
+ tty->buf.memory_used += t->size;
return t;
}
tbh = &((*tbh)->next);
}
/* Round the buffer size out */
size = (size + 0xFF) & ~ 0xFF;
- return tty_buffer_alloc(size);
+ return tty_buffer_alloc(tty, size);
/* Should possibly check if this fails for the largest buffer we
have queued and recycle that ? */
}
+/**
+ * tty_buffer_request_room - grow tty buffer if needed
+ * @tty: tty structure
+ * @size: size desired
+ *
+ * Make at least size bytes of linear space available for the tty
+ * buffer. If we fail return the size we managed to find.
+ *
+ * Locking: Takes tty->buf.lock
+ */
int tty_buffer_request_room(struct tty_struct *tty, size_t size)
{
struct tty_buffer *b, *n;
@@ -347,6 +456,18 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
}
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
+/**
+ * tty_insert_flip_string - Add characters to the tty buffer
+ * @tty: tty structure
+ * @chars: characters
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. All the characters
+ * passed are marked as without error. Returns the number added.
+ *
+ * Locking: Called functions may take tty->buf.lock
+ */
+
int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
size_t size)
{
@@ -370,6 +491,20 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
}
EXPORT_SYMBOL(tty_insert_flip_string);
+/**
+ * tty_insert_flip_string_flags - Add characters to the tty buffer
+ * @tty: tty structure
+ * @chars: characters
+ * @flags: flag bytes
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. For each character
+ * the flags array indicates the status of the character. Returns the
+ * number added.
+ *
+ * Locking: Called functions may take tty->buf.lock
+ */
+
int tty_insert_flip_string_flags(struct tty_struct *tty,
const unsigned char *chars, const char *flags, size_t size)
{
@@ -394,6 +529,17 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
}
EXPORT_SYMBOL(tty_insert_flip_string_flags);
+/**
+ * tty_schedule_flip - push characters to ldisc
+ * @tty: tty to push from
+ *
+ * Takes any pending buffers and transfers their ownership to the
+ * ldisc side of the queue. It then schedules those characters for
+ * processing by the line discipline.
+ *
+ * Locking: Takes tty->buf.lock
+ */
+
void tty_schedule_flip(struct tty_struct *tty)
{
unsigned long flags;
@@ -405,12 +551,19 @@ void tty_schedule_flip(struct tty_struct *tty)
}
EXPORT_SYMBOL(tty_schedule_flip);
-/*
+/**
+ * tty_prepare_flip_string - make room for characters
+ * @tty: tty
+ * @chars: return pointer for character write area
+ * @size: desired size
+ *
* Prepare a block of space in the buffer for data. Returns the length
* available and buffer pointer to the space which is now allocated and
* accounted for as ready for normal characters. This is used for drivers
* that need their own block copy routines into the buffer. There is no
* guarantee the buffer is a DMA target!
+ *
+ * Locking: May call functions taking tty->buf.lock
*/
int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size)
@@ -427,12 +580,20 @@ int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_
EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
-/*
+/**
+ * tty_prepare_flip_string_flags - make room for characters
+ * @tty: tty
+ * @chars: return pointer for character write area
+ * @flags: return pointer for status flag write area
+ * @size: desired size
+ *
* Prepare a block of space in the buffer for data. Returns the length
* available and buffer pointer to the space which is now allocated and
* accounted for as ready for characters. This is used for drivers
* that need their own block copy routines into the buffer. There is no
* guarantee the buffer is a DMA target!
+ *
+ * Locking: May call functions taking tty->buf.lock
*/
int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size)
@@ -451,10 +612,16 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
-/*
+/**
+ * tty_set_termios_ldisc - set ldisc field
+ * @tty: tty structure
+ * @num: line discipline number
+ *
* This is probably overkill for real world processors but
* they are not on hot paths so a little discipline won't do
* any harm.
+ *
+ * Locking: takes termios_sem
*/
static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
@@ -474,6 +641,19 @@ static DEFINE_SPINLOCK(tty_ldisc_lock);
static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */
+/**
+ * tty_register_ldisc - install a line discipline
+ * @disc: ldisc number
+ * @new_ldisc: pointer to the ldisc object
+ *
+ * Installs a new line discipline into the kernel. The discipline
+ * is set up as unreferenced and then made available to the kernel
+ * from this point onwards.
+ *
+ * Locking:
+ * takes tty_ldisc_lock to guard against ldisc races
+ */
+
int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
{
unsigned long flags;
@@ -493,6 +673,18 @@ int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc)
}
EXPORT_SYMBOL(tty_register_ldisc);
+/**
+ * tty_unregister_ldisc - unload a line discipline
+ * @disc: ldisc number
+ * @new_ldisc: pointer to the ldisc object
+ *
+ * Remove a line discipline from the kernel providing it is not
+ * currently in use.
+ *
+ * Locking:
+ * takes tty_ldisc_lock to guard against ldisc races
+ */
+
int tty_unregister_ldisc(int disc)
{
unsigned long flags;
@@ -512,6 +704,19 @@ int tty_unregister_ldisc(int disc)
}
EXPORT_SYMBOL(tty_unregister_ldisc);
+/**
+ * tty_ldisc_get - take a reference to an ldisc
+ * @disc: ldisc number
+ *
+ * Takes a reference to a line discipline. Deals with refcounts and
+ * module locking counts. Returns NULL if the discipline is not available.
+ * Returns a pointer to the discipline and bumps the ref count if it is
+ * available
+ *
+ * Locking:
+ * takes tty_ldisc_lock to guard against ldisc races
+ */
+
struct tty_ldisc *tty_ldisc_get(int disc)
{
unsigned long flags;
@@ -540,6 +745,17 @@ struct tty_ldisc *tty_ldisc_get(int disc)
EXPORT_SYMBOL_GPL(tty_ldisc_get);
+/**
+ * tty_ldisc_put - drop ldisc reference
+ * @disc: ldisc number
+ *
+ * Drop a reference to a line discipline. Manage refcounts and
+ * module usage counts
+ *
+ * Locking:
+ * takes tty_ldisc_lock to guard against ldisc races
+ */
+
void tty_ldisc_put(int disc)
{
struct tty_ldisc *ld;
@@ -557,6 +773,19 @@ void tty_ldisc_put(int disc)
EXPORT_SYMBOL_GPL(tty_ldisc_put);
+/**
+ * tty_ldisc_assign - set ldisc on a tty
+ * @tty: tty to assign
+ * @ld: line discipline
+ *
+ * Install an instance of a line discipline into a tty structure. The
+ * ldisc must have a reference count above zero to ensure it remains/
+ * The tty instance refcount starts at zero.
+ *
+ * Locking:
+ * Caller must hold references
+ */
+
static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
{
tty->ldisc = *ld;
@@ -571,6 +800,8 @@ static void tty_ldisc_assign(struct tty_struct *tty, struct tty_ldisc *ld)
* the tty ldisc. Return 0 on failure or 1 on success. This is
* used to implement both the waiting and non waiting versions
* of tty_ldisc_ref
+ *
+ * Locking: takes tty_ldisc_lock
*/
static int tty_ldisc_try(struct tty_struct *tty)
@@ -602,6 +833,8 @@ static int tty_ldisc_try(struct tty_struct *tty)
* must also be careful not to hold other locks that will deadlock
* against a discipline change, such as an existing ldisc reference
* (which we check for)
+ *
+ * Locking: call functions take tty_ldisc_lock
*/
struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
@@ -622,6 +855,8 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
* Dereference the line discipline for the terminal and take a
* reference to it. If the line discipline is in flux then
* return NULL. Can be called from IRQ and timer functions.
+ *
+ * Locking: called functions take tty_ldisc_lock
*/
struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
@@ -639,6 +874,8 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref);
*
* Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
* be called in IRQ context.
+ *
+ * Locking: takes tty_ldisc_lock
*/
void tty_ldisc_deref(struct tty_ldisc *ld)
@@ -683,6 +920,9 @@ static void tty_ldisc_enable(struct tty_struct *tty)
*
* Set the discipline of a tty line. Must be called from a process
* context.
+ *
+ * Locking: takes tty_ldisc_lock.
+ * called functions take termios_sem
*/
static int tty_set_ldisc(struct tty_struct *tty, int ldisc)
@@ -846,9 +1086,17 @@ restart:
return retval;
}
-/*
- * This routine returns a tty driver structure, given a device number
+/**
+ * get_tty_driver - find device of a tty
+ * @dev_t: device identifier
+ * @index: returns the index of the tty
+ *
+ * This routine returns a tty driver structure, given a device number
+ * and also passes back the index number.
+ *
+ * Locking: caller must hold tty_mutex
*/
+
static struct tty_driver *get_tty_driver(dev_t device, int *index)
{
struct tty_driver *p;
@@ -863,11 +1111,17 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index)
return NULL;
}
-/*
- * If we try to write to, or set the state of, a terminal and we're
- * not in the foreground, send a SIGTTOU. If the signal is blocked or
- * ignored, go ahead and perform the operation. (POSIX 7.2)
+/**
+ * tty_check_change - check for POSIX terminal changes
+ * @tty: tty to check
+ *
+ * If we try to write to, or set the state of, a terminal and we're
+ * not in the foreground, send a SIGTTOU. If the signal is blocked or
+ * ignored, go ahead and perform the operation. (POSIX 7.2)
+ *
+ * Locking: none
*/
+
int tty_check_change(struct tty_struct * tty)
{
if (current->signal->tty != tty)
@@ -1005,10 +1259,27 @@ void tty_ldisc_flush(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_ldisc_flush);
-/*
- * This can be called by the "eventd" kernel thread. That is process synchronous,
- * but doesn't hold any locks, so we need to make sure we have the appropriate
- * locks for what we're doing..
+/**
+ * do_tty_hangup - actual handler for hangup events
+ * @data: tty device
+ *
+ * This can be called by the "eventd" kernel thread. That is process
+ * synchronous but doesn't hold any locks, so we need to make sure we
+ * have the appropriate locks for what we're doing.
+ *
+ * The hangup event clears any pending redirections onto the hung up
+ * device. It ensures future writes will error and it does the needed
+ * line discipline hangup and signal delivery. The tty object itself
+ * remains intact.
+ *
+ * Locking:
+ * BKL
+ * redirect lock for undoing redirection
+ * file list lock for manipulating list of ttys
+ * tty_ldisc_lock from called functions
+ * termios_sem resetting termios data
+ * tasklist_lock to walk task list for hangup event
+ *
*/
static void do_tty_hangup(void *data)
{
@@ -1133,6 +1404,14 @@ static void do_tty_hangup(void *data)
fput(f);
}
+/**
+ * tty_hangup - trigger a hangup event
+ * @tty: tty to hangup
+ *
+ * A carrier loss (virtual or otherwise) has occurred on this like
+ * schedule a hangup sequence to run after this event.
+ */
+
void tty_hangup(struct tty_struct * tty)
{
#ifdef TTY_DEBUG_HANGUP
@@ -1145,6 +1424,15 @@ void tty_hangup(struct tty_struct * tty)
EXPORT_SYMBOL(tty_hangup);
+/**
+ * tty_vhangup - process vhangup
+ * @tty: tty to hangup
+ *
+ * The user has asked via system call for the terminal to be hung up.
+ * We do this synchronously so that when the syscall returns the process
+ * is complete. That guarantee is neccessary for security reasons.
+ */
+
void tty_vhangup(struct tty_struct * tty)
{
#ifdef TTY_DEBUG_HANGUP
@@ -1156,6 +1444,14 @@ void tty_vhangup(struct tty_struct * tty)
}
EXPORT_SYMBOL(tty_vhangup);
+/**
+ * tty_hung_up_p - was tty hung up
+ * @filp: file pointer of tty
+ *
+ * Return true if the tty has been subject to a vhangup or a carrier
+ * loss
+ */
+
int tty_hung_up_p(struct file * filp)
{
return (filp->f_op == &hung_up_tty_fops);
@@ -1163,19 +1459,28 @@ int tty_hung_up_p(struct file * filp)
EXPORT_SYMBOL(tty_hung_up_p);
-/*
- * This function is typically called only by the session leader, when
- * it wants to disassociate itself from its controlling tty.
+/**
+ * disassociate_ctty - disconnect controlling tty
+ * @on_exit: true if exiting so need to "hang up" the session
+ *
+ * This function is typically called only by the session leader, when
+ * it wants to disassociate itself from its controlling tty.
*
- * It performs the following functions:
+ * It performs the following functions:
* (1) Sends a SIGHUP and SIGCONT to the foreground process group
* (2) Clears the tty from being controlling the session
* (3) Clears the controlling tty for all processes in the
* session group.
*
- * The argument on_exit is set to 1 if called when a process is
- * exiting; it is 0 if called by the ioctl TIOCNOTTY.
+ * The argument on_exit is set to 1 if called when a process is
+ * exiting; it is 0 if called by the ioctl TIOCNOTTY.
+ *
+ * Locking: tty_mutex is taken to protect current->signal->tty
+ * BKL is taken for hysterical raisins
+ * Tasklist lock is taken (under tty_mutex) to walk process
+ * lists for the session.
*/
+
void disassociate_ctty(int on_exit)
{
struct tty_struct *tty;
@@ -1222,6 +1527,25 @@ void disassociate_ctty(int on_exit)
unlock_kernel();
}
+
+/**
+ * stop_tty - propogate flow control
+ * @tty: tty to stop
+ *
+ * Perform flow control to the driver. For PTY/TTY pairs we
+ * must also propogate the TIOCKPKT status. May be called
+ * on an already stopped device and will not re-call the driver
+ * method.
+ *
+ * This functionality is used by both the line disciplines for
+ * halting incoming flow and by the driver. It may therefore be
+ * called from any context, may be under the tty atomic_write_lock
+ * but not always.
+ *
+ * Locking:
+ * Broken. Relies on BKL which is unsafe here.
+ */
+
void stop_tty(struct tty_struct *tty)
{
if (tty->stopped)
@@ -1238,6 +1562,19 @@ void stop_tty(struct tty_struct *tty)
EXPORT_SYMBOL(stop_tty);
+/**
+ * start_tty - propogate flow control
+ * @tty: tty to start
+ *
+ * Start a tty that has been stopped if at all possible. Perform
+ * any neccessary wakeups and propogate the TIOCPKT status. If this
+ * is the tty was previous stopped and is being started then the
+ * driver start method is invoked and the line discipline woken.
+ *
+ * Locking:
+ * Broken. Relies on BKL which is unsafe here.
+ */
+
void start_tty(struct tty_struct *tty)
{
if (!tty->stopped || tty->flow_stopped)
@@ -1258,6 +1595,23 @@ void start_tty(struct tty_struct *tty)
EXPORT_SYMBOL(start_tty);
+/**
+ * tty_read - read method for tty device files
+ * @file: pointer to tty file
+ * @buf: user buffer
+ * @count: size of user buffer
+ * @ppos: unused
+ *
+ * Perform the read system call function on this terminal device. Checks
+ * for hung up devices before calling the line discipline method.
+ *
+ * Locking:
+ * Locks the line discipline internally while needed
+ * For historical reasons the line discipline read method is
+ * invoked under the BKL. This will go away in time so do not rely on it
+ * in new code. Multiple read calls may be outstanding in parallel.
+ */
+
static ssize_t tty_read(struct file * file, char __user * buf, size_t count,
loff_t *ppos)
{
@@ -1302,6 +1656,7 @@ static inline ssize_t do_tty_write(
ssize_t ret = 0, written = 0;
unsigned int chunk;
+ /* FIXME: O_NDELAY ... */
if (mutex_lock_interruptible(&tty->atomic_write_lock)) {
return -ERESTARTSYS;
}
@@ -1318,6 +1673,9 @@ static inline ssize_t do_tty_write(
* layer has problems with bigger chunks. It will
* claim to be able to handle more characters than
* it actually does.
+ *
+ * FIXME: This can probably go away now except that 64K chunks
+ * are too likely to fail unless switched to vmalloc...
*/
chunk = 2048;
if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
@@ -1375,6 +1733,24 @@ static inline ssize_t do_tty_write(
}
+/**
+ * tty_write - write method for tty device file
+ * @file: tty file pointer
+ * @buf: user data to write
+ * @count: bytes to write
+ * @ppos: unused
+ *
+ * Write data to a tty device via the line discipline.
+ *
+ * Locking:
+ * Locks the line discipline as required
+ * Writes to the tty driver are serialized by the atomic_write_lock
+ * and are then processed in chunks to the device. The line discipline
+ * write method will not be involked in parallel for each device
+ * The line discipline write method is called under the big
+ * kernel lock for historical reasons. New code should not rely on this.
+ */
+
static ssize_t tty_write(struct file * file, const char __user * buf, size_t count,
loff_t *ppos)
{
@@ -1422,7 +1798,18 @@ ssize_t redirected_tty_write(struct file * file, const char __user * buf, size_t
static char ptychar[] = "pqrstuvwxyzabcde";
-static inline void pty_line_name(struct tty_driver *driver, int index, char *p)
+/**
+ * pty_line_name - generate name for a pty
+ * @driver: the tty driver in use
+ * @index: the minor number
+ * @p: output buffer of at least 6 bytes
+ *
+ * Generate a name from a driver reference and write it to the output
+ * buffer.
+ *
+ * Locking: None
+ */
+static void pty_line_name(struct tty_driver *driver, int index, char *p)
{
int i = index + driver->name_base;
/* ->name is initialized to "ttyp", but "tty" is expected */
@@ -1431,24 +1818,53 @@ static inline void pty_line_name(struct tty_driver *driver, int index, char *p)
ptychar[i >> 4 & 0xf], i & 0xf);
}
-static inline void tty_line_name(struct tty_driver *driver, int index, char *p)
+/**
+ * pty_line_name - generate name for a tty
+ * @driver: the tty driver in use
+ * @index: the minor number
+ * @p: output buffer of at least 7 bytes
+ *
+ * Generate a name from a driver reference and write it to the output
+ * buffer.
+ *
+ * Locking: None
+ */
+static void tty_line_name(struct tty_driver *driver, int index, char *p)
{
sprintf(p, "%s%d", driver->name, index + driver->name_base);
}
-/*
+/**
+ * init_dev - initialise a tty device
+ * @driver: tty driver we are opening a device on
+ * @idx: device index
+ * @tty: returned tty structure
+ *
+ * Prepare a tty device. This may not be a "new" clean device but
+ * could also be an active device. The pty drivers require special
+ * handling because of this.
+ *
+ * Locking:
+ * The function is called under the tty_mutex, which
+ * protects us from the tty struct or driver itself going away.
+ *
+ * On exit the tty device has the line discipline attached and
+ * a reference count of 1. If a pair was created for pty/tty use
+ * and the other was a pty master then it too has a reference count of 1.
+ *
* WSH 06/09/97: Rewritten to remove races and properly clean up after a
* failed open. The new code protects the open with a mutex, so it's
* really quite straightforward. The mutex locking can probably be
* relaxed for the (most common) case of reopening a tty.
*/
+
static int init_dev(struct tty_driver *driver, int idx,
struct tty_struct **ret_tty)
{
struct tty_struct *tty, *o_tty;
struct termios *tp, **tp_loc, *o_tp, **o_tp_loc;
struct termios *ltp, **ltp_loc, *o_ltp, **o_ltp_loc;
- int retval=0;
+ int retval = 0;
/* check whether we're reopening an existing tty */
if (driver->flags & TTY_DRIVER_DEVPTS_MEM) {
@@ -1662,10 +2078,20 @@ release_mem_out:
goto end_init;
}
-/*
- * Releases memory associated with a tty structure, and clears out the
- * driver table slots.
+/**
+ * release_mem - release tty structure memory
+ *
+ * Releases memory associated with a tty structure, and clears out the
+ * driver table slots. This function is called when a device is no longer
+ * in use. It also gets called when setup of a device fails.
+ *
+ * Locking:
+ * tty_mutex - sometimes only
+ * takes the file list lock internally when working on the list
+ * of ttys that the driver keeps.
+ * FIXME: should we require tty_mutex is held here ??
*/
+
static void release_mem(struct tty_struct *tty, int idx)
{
struct tty_struct *o_tty;
@@ -2006,18 +2432,27 @@ static void release_dev(struct file * filp)
}
-/*
- * tty_open and tty_release keep up the tty count that contains the
- * number of opens done on a tty. We cannot use the inode-count, as
- * different inodes might point to the same tty.
+/**
+ * tty_open - open a tty device
+ * @inode: inode of device file
+ * @filp: file pointer to tty
+ *
+ * tty_open and tty_release keep up the tty count that contains the
+ * number of opens done on a tty. We cannot use the inode-count, as
+ * different inodes might point to the same tty.
*
- * Open-counting is needed for pty masters, as well as for keeping
- * track of serial lines: DTR is dropped when the last close happens.
- * (This is not done solely through tty->count, now. - Ted 1/27/92)
+ * Open-counting is needed for pty masters, as well as for keeping
+ * track of serial lines: DTR is dropped when the last close happens.
+ * (This is not done solely through tty->count, now. - Ted 1/27/92)
*
- * The termios state of a pty is reset on first open so that
- * settings don't persist across reuse.
+ * The termios state of a pty is reset on first open so that
+ * settings don't persist across reuse.
+ *
+ * Locking: tty_mutex protects current->signal->tty, get_tty_driver and
+ * init_dev work. tty->count should protect the rest.
+ * task_lock is held to update task details for sessions
*/
+
static int tty_open(struct inode * inode, struct file * filp)
{
struct tty_struct *tty;
@@ -2132,6 +2567,18 @@ got_driver:
}
#ifdef CONFIG_UNIX98_PTYS
+/**
+ * ptmx_open - open a unix 98 pty master
+ * @inode: inode of device file
+ * @filp: file pointer to tty
+ *
+ * Allocate a unix98 pty master device from the ptmx driver.
+ *
+ * Locking: tty_mutex protects theinit_dev work. tty->count should
+ protect the rest.
+ * allocated_ptys_lock handles the list of free pty numbers
+ */
+
static int ptmx_open(struct inode * inode, struct file * filp)
{
struct tty_struct *tty;
@@ -2191,6 +2638,18 @@ out:
}
#endif
+/**
+ * tty_release - vfs callback for close
+ * @inode: inode of tty
+ * @filp: file pointer for handle to tty
+ *
+ * Called the last time each file handle is closed that references
+ * this tty. There may however be several such references.
+ *
+ * Locking:
+ * Takes bkl. See release_dev
+ */
+
static int tty_release(struct inode * inode, struct file * filp)
{
lock_kernel();
@@ -2199,7 +2658,18 @@ static int tty_release(struct inode * inode, struct file * filp)
return 0;
}
-/* No kernel lock held - fine */
+/**
+ * tty_poll - check tty status
+ * @filp: file being polled
+ * @wait: poll wait structures to update
+ *
+ * Call the line discipline polling method to obtain the poll
+ * status of the device.
+ *
+ * Locking: locks called line discipline but ldisc poll method
+ * may be re-entered freely by other callers.
+ */
+
static unsigned int tty_poll(struct file * filp, poll_table * wait)
{
struct tty_struct * tty;
@@ -2243,6 +2713,21 @@ static int tty_fasync(int fd, struct file * filp, int on)
return 0;
}
+/**
+ * tiocsti - fake input character
+ * @tty: tty to fake input into
+ * @p: pointer to character
+ *
+ * Fake input to a tty device. Does the neccessary locking and
+ * input management.
+ *
+ * FIXME: does not honour flow control ??
+ *
+ * Locking:
+ * Called functions take tty_ldisc_lock
+ * current->signal->tty check is safe without locks
+ */
+
static int tiocsti(struct tty_struct *tty, char __user *p)
{
char ch, mbz = 0;
@@ -2258,6 +2743,18 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
return 0;
}
+/**
+ * tiocgwinsz - implement window query ioctl
+ * @tty; tty
+ * @arg: user buffer for result
+ *
+ * Copies the kernel idea of the window size into the user buffer. No
+ * locking is done.
+ *
+ * FIXME: Returning random values racing a window size set is wrong
+ * should lock here against that
+ */
+
static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
{
if (copy_to_user(arg, &tty->winsize, sizeof(*arg)))
@@ -2265,6 +2762,24 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg)
return 0;
}
+/**
+ * tiocswinsz - implement window size set ioctl
+ * @tty; tty
+ * @arg: user buffer for result
+ *
+ * Copies the user idea of the window size to the kernel. Traditionally
+ * this is just advisory information but for the Linux console it
+ * actually has driver level meaning and triggers a VC resize.
+ *
+ * Locking:
+ * The console_sem is used to ensure we do not try and resize
+ * the console twice at once.
+ * FIXME: Two racing size sets may leave the console and kernel
+ * parameters disagreeing. Is this exploitable ?
+ * FIXME: Random values racing a window size get is wrong
+ * should lock here against that
+ */
+
static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
struct winsize __user * arg)
{
@@ -2294,6 +2809,15 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
return 0;
}
+/**
+ * tioccons - allow admin to move logical console
+ * @file: the file to become console
+ *
+ * Allow the adminstrator to move the redirected console device
+ *
+ * Locking: uses redirect_lock to guard the redirect information
+ */
+
static int tioccons(struct file *file)
{
if (!capable(CAP_SYS_ADMIN))
@@ -2319,6 +2843,17 @@ static int tioccons(struct file *file)
return 0;
}
+/**
+ * fionbio - non blocking ioctl
+ * @file: file to set blocking value
+ * @p: user parameter
+ *
+ * Historical tty interfaces had a blocking control ioctl before
+ * the generic functionality existed. This piece of history is preserved
+ * in the expected tty API of posix OS's.
+ *
+ * Locking: none, the open fle handle ensures it won't go away.
+ */
static int fionbio(struct file *file, int __user *p)
{
@@ -2334,6 +2869,23 @@ static int fionbio(struct file *file, int __user *p)
return 0;
}
+/**
+ * tiocsctty - set controlling tty
+ * @tty: tty structure
+ * @arg: user argument
+ *
+ * This ioctl is used to manage job control. It permits a session
+ * leader to set this tty as the controlling tty for the session.
+ *
+ * Locking:
+ * Takes tasklist lock internally to walk sessions
+ * Takes task_lock() when updating signal->tty
+ *
+ * FIXME: tty_mutex is needed to protect signal->tty references.
+ * FIXME: why task_lock on the signal->tty reference ??
+ *
+ */
+
static int tiocsctty(struct tty_struct *tty, int arg)
{
struct task_struct *p;
@@ -2374,6 +2926,18 @@ static int tiocsctty(struct tty_struct *tty, int arg)
return 0;
}
+/**
+ * tiocgpgrp - get process group
+ * @tty: tty passed by user
+ * @real_tty: tty side of the tty pased by the user if a pty else the tty
+ * @p: returned pid
+ *
+ * Obtain the process group of the tty. If there is no process group
+ * return an error.
+ *
+ * Locking: none. Reference to ->signal->tty is safe.
+ */
+
static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
{
/*
@@ -2385,6 +2949,20 @@ static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
return put_user(real_tty->pgrp, p);
}
+/**
+ * tiocspgrp - attempt to set process group
+ * @tty: tty passed by user
+ * @real_tty: tty side device matching tty passed by user
+ * @p: pid pointer
+ *
+ * Set the process group of the tty to the session passed. Only
+ * permitted where the tty session is our session.
+ *
+ * Locking: None
+ *
+ * FIXME: current->signal->tty referencing is unsafe.
+ */
+
static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
{
pid_t pgrp;
@@ -2408,6 +2986,18 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
return 0;
}
+/**
+ * tiocgsid - get session id
+ * @tty: tty passed by user
+ * @real_tty: tty side of the tty pased by the user if a pty else the tty
+ * @p: pointer to returned session id
+ *
+ * Obtain the session id of the tty. If there is no session
+ * return an error.
+ *
+ * Locking: none. Reference to ->signal->tty is safe.
+ */
+
static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
{
/*
@@ -2421,6 +3011,16 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _
return put_user(real_tty->session, p);
}
+/**
+ * tiocsetd - set line discipline
+ * @tty: tty device
+ * @p: pointer to user data
+ *
+ * Set the line discipline according to user request.
+ *
+ * Locking: see tty_set_ldisc, this function is just a helper
+ */
+
static int tiocsetd(struct tty_struct *tty, int __user *p)
{
int ldisc;
@@ -2430,6 +3030,21 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
return tty_set_ldisc(tty, ldisc);
}
+/**
+ * send_break - performed time break
+ * @tty: device to break on
+ * @duration: timeout in mS
+ *
+ * Perform a timed break on hardware that lacks its own driver level
+ * timed break functionality.
+ *
+ * Locking:
+ * None
+ *
+ * FIXME:
+ * What if two overlap
+ */
+
static int send_break(struct tty_struct *tty, unsigned int duration)
{
tty->driver->break_ctl(tty, -1);
@@ -2442,8 +3057,19 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
return 0;
}
-static int
-tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
+/**
+ * tiocmget - get modem status
+ * @tty: tty device
+ * @file: user file pointer
+ * @p: pointer to result
+ *
+ * Obtain the modem status bits from the tty driver if the feature
+ * is supported. Return -EINVAL if it is not available.
+ *
+ * Locking: none (up to the driver)
+ */
+
+static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
{
int retval = -EINVAL;
@@ -2456,8 +3082,20 @@ tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p)
return retval;
}
-static int
-tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd,
+/**
+ * tiocmset - set modem status
+ * @tty: tty device
+ * @file: user file pointer
+ * @cmd: command - clear bits, set bits or set all
+ * @p: pointer to desired bits
+ *
+ * Set the modem status bits from the tty driver if the feature
+ * is supported. Return -EINVAL if it is not available.
+ *
+ * Locking: none (up to the driver)
+ */
+
+static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int cmd,
unsigned __user *p)
{
int retval = -EINVAL;
@@ -2573,6 +3211,7 @@ int tty_ioctl(struct inode * inode, struct file * file,
clear_bit(TTY_EXCLUSIVE, &tty->flags);
return 0;
case TIOCNOTTY:
+ /* FIXME: taks lock or tty_mutex ? */
if (current->signal->tty != tty)
return -ENOTTY;
if (current->signal->leader)
@@ -2753,9 +3392,16 @@ void do_SAK(struct tty_struct *tty)
EXPORT_SYMBOL(do_SAK);
-/*
- * This routine is called out of the software interrupt to flush data
- * from the buffer chain to the line discipline.
+/**
+ * flush_to_ldisc
+ * @private_: tty structure passed from work queue.
+ *
+ * This routine is called out of the software interrupt to flush data
+ * from the buffer chain to the line discipline.
+ *
+ * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
+ * while invoking the line discipline receive_buf method. The
+ * receive_buf method is single threaded for each tty instance.
*/
static void flush_to_ldisc(void *private_)
@@ -2831,6 +3477,8 @@ static int n_baud_table = ARRAY_SIZE(baud_table);
* Convert termios baud rate data into a speed. This should be called
* with the termios lock held if this termios is a terminal termios
* structure. May change the termios data.
+ *
+ * Locking: none
*/
int tty_termios_baud_rate(struct termios *termios)
@@ -2859,6 +3507,8 @@ EXPORT_SYMBOL(tty_termios_baud_rate);
* Returns the baud rate as an integer for this terminal. The
* termios lock must be held by the caller and the terminal bit
* flags may be updated.
+ *
+ * Locking: none
*/
int tty_get_baud_rate(struct tty_struct *tty)
@@ -2888,6 +3538,8 @@ EXPORT_SYMBOL(tty_get_baud_rate);
*
* In the event of the queue being busy for flipping the work will be
* held off and retried later.
+ *
+ * Locking: tty buffer lock. Driver locks in low latency mode.
*/
void tty_flip_buffer_push(struct tty_struct *tty)
@@ -2907,9 +3559,16 @@ void tty_flip_buffer_push(struct tty_struct *tty)
EXPORT_SYMBOL(tty_flip_buffer_push);
-/*
- * This subroutine initializes a tty structure.
+/**
+ * initialize_tty_struct
+ * @tty: tty to initialize
+ *
+ * This subroutine initializes a tty structure that has been newly
+ * allocated.
+ *
+ * Locking: none - tty in question must not be exposed at this point
*/
+
static void initialize_tty_struct(struct tty_struct *tty)
{
memset(tty, 0, sizeof(struct tty_struct));
@@ -2935,6 +3594,7 @@ static void initialize_tty_struct(struct tty_struct *tty)
/*
* The default put_char routine if the driver did not define one.
*/
+
static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
{
tty->driver->write(tty, &ch, 1);
@@ -2943,19 +3603,23 @@ static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
static struct class *tty_class;
/**
- * tty_register_device - register a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
- * @device: a struct device that is associated with this tty device.
- * This field is optional, if there is no known struct device for this
- * tty device it can be set to NULL safely.
+ * tty_register_device - register a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ * @device: a struct device that is associated with this tty device.
+ * This field is optional, if there is no known struct device
+ * for this tty device it can be set to NULL safely.
*
- * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error).
+ * Returns a pointer to the class device (or ERR_PTR(-EFOO) on error).
*
- * This call is required to be made to register an individual tty device if
- * the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If that
- * bit is not set, this function should not be called by a tty driver.
+ * This call is required to be made to register an individual tty device
+ * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
+ * that bit is not set, this function should not be called by a tty
+ * driver.
+ *
+ * Locking: ??
*/
+
struct class_device *tty_register_device(struct tty_driver *driver,
unsigned index, struct device *device)
{
@@ -2977,13 +3641,16 @@ struct class_device *tty_register_device(struct tty_driver *driver,
}
/**
- * tty_unregister_device - unregister a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
+ * tty_unregister_device - unregister a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
*
- * If a tty device is registered with a call to tty_register_device() then
- * this function must be made when the tty device is gone.
+ * If a tty device is registered with a call to tty_register_device() then
+ * this function must be called when the tty device is gone.
+ *
+ * Locking: ??
*/
+
void tty_unregister_device(struct tty_driver *driver, unsigned index)
{
class_device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index);
@@ -3094,7 +3761,6 @@ int tty_register_driver(struct tty_driver *driver)
driver->cdev.owner = driver->owner;
error = cdev_add(&driver->cdev, dev, driver->num);
if (error) {
- cdev_del(&driver->cdev);
unregister_chrdev_region(dev, driver->num);
driver->ttys = NULL;
driver->termios = driver->termios_locked = NULL;
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index f19cf9d..4ad47d3 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -36,6 +36,18 @@
#define TERMIOS_WAIT 2
#define TERMIOS_TERMIO 4
+
+/**
+ * tty_wait_until_sent - wait for I/O to finish
+ * @tty: tty we are waiting for
+ * @timeout: how long we will wait
+ *
+ * Wait for characters pending in a tty driver to hit the wire, or
+ * for a timeout to occur (eg due to flow control)
+ *
+ * Locking: none
+ */
+
void tty_wait_until_sent(struct tty_struct * tty, long timeout)
{
DECLARE_WAITQUEUE(wait, current);
@@ -94,6 +106,18 @@ static void unset_locked_termios(struct termios *termios,
old->c_cc[i] : termios->c_cc[i];
}
+/**
+ * change_termios - update termios values
+ * @tty: tty to update
+ * @new_termios: desired new value
+ *
+ * Perform updates to the termios values set on this terminal. There
+ * is a bit of layering violation here with n_tty in terms of the
+ * internal knowledge of this function.
+ *
+ * Locking: termios_sem
+ */
+
static void change_termios(struct tty_struct * tty, struct termios * new_termios)
{
int canon_change;
@@ -155,6 +179,19 @@ static void change_termios(struct tty_struct * tty, struct termios * new_termios
up(&tty->termios_sem);
}
+/**
+ * set_termios - set termios values for a tty
+ * @tty: terminal device
+ * @arg: user data
+ * @opt: option information
+ *
+ * Helper function to prepare termios data and run neccessary other
+ * functions before using change_termios to do the actual changes.
+ *
+ * Locking:
+ * Called functions take ldisc and termios_sem locks
+ */
+
static int set_termios(struct tty_struct * tty, void __user *arg, int opt)
{
struct termios tmp_termios;
@@ -284,6 +321,17 @@ static void set_sgflags(struct termios * termios, int flags)
}
}
+/**
+ * set_sgttyb - set legacy terminal values
+ * @tty: tty structure
+ * @sgttyb: pointer to old style terminal structure
+ *
+ * Updates a terminal from the legacy BSD style terminal information
+ * structure.
+ *
+ * Locking: termios_sem
+ */
+
static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb)
{
int retval;
@@ -369,9 +417,16 @@ static int set_ltchars(struct tty_struct * tty, struct ltchars __user * ltchars)
}
#endif
-/*
- * Send a high priority character to the tty.
+/**
+ * send_prio_char - send priority character
+ *
+ * Send a high priority character to the tty even if stopped
+ *
+ * Locking: none
+ *
+ * FIXME: overlapping calls with start/stop tty lose state of tty
*/
+
static void send_prio_char(struct tty_struct *tty, char ch)
{
int was_stopped = tty->stopped;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index eccffaf..a5628a8 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -1011,6 +1011,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
return -EPERM;
vt_dont_switch = 0;
return 0;
+ case VT_GETHIFONTMASK:
+ return put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg);
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index cc15c4f..35ad1b0 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -26,6 +26,7 @@
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -64,17 +65,17 @@
#define ABIT_UGURU_IN_SENSOR 0
#define ABIT_UGURU_TEMP_SENSOR 1
#define ABIT_UGURU_NC 2
-/* Timeouts / Retries, if these turn out to need a lot of fiddling we could
- convert them to params. */
-/* 250 was determined by trial and error, 200 works most of the time, but not
- always. I assume this is cpu-speed independent, since the ISA-bus and not
- the CPU should be the bottleneck. Note that 250 sometimes is still not
- enough (only reported on AN7 mb) this is handled by a higher layer. */
-#define ABIT_UGURU_WAIT_TIMEOUT 250
+/* In many cases we need to wait for the uGuru to reach a certain status, most
+ of the time it will reach this status within 30 - 90 ISA reads, and thus we
+ can best busy wait. This define gives the total amount of reads to try. */
+#define ABIT_UGURU_WAIT_TIMEOUT 125
+/* However sometimes older versions of the uGuru seem to be distracted and they
+ do not respond for a long time. To handle this we sleep before each of the
+ last ABIT_UGURU_WAIT_TIMEOUT_SLEEP tries. */
+#define ABIT_UGURU_WAIT_TIMEOUT_SLEEP 5
/* Normally all expected status in abituguru_ready, are reported after the
- first read, but sometimes not and we need to poll, 5 polls was not enough
- 50 sofar is. */
-#define ABIT_UGURU_READY_TIMEOUT 50
+ first read, but sometimes not and we need to poll. */
+#define ABIT_UGURU_READY_TIMEOUT 5
/* Maximum 3 retries on timedout reads/writes, delay 200 ms before retrying */
#define ABIT_UGURU_MAX_RETRIES 3
#define ABIT_UGURU_RETRY_DELAY (HZ/5)
@@ -226,6 +227,10 @@ static int abituguru_wait(struct abituguru_data *data, u8 state)
timeout--;
if (timeout == 0)
return -EBUSY;
+ /* sleep a bit before our last few tries, see the comment on
+ this where ABIT_UGURU_WAIT_TIMEOUT_SLEEP is defined. */
+ if (timeout <= ABIT_UGURU_WAIT_TIMEOUT_SLEEP)
+ msleep(0);
}
return 0;
}
@@ -256,6 +261,7 @@ static int abituguru_ready(struct abituguru_data *data)
"CMD reg does not hold 0xAC after ready command\n");
return -EIO;
}
+ msleep(0);
}
/* After this the ABIT_UGURU_DATA port should contain
@@ -268,6 +274,7 @@ static int abituguru_ready(struct abituguru_data *data)
"state != more input after ready command\n");
return -EIO;
}
+ msleep(0);
}
data->uguru_ready = 1;
@@ -331,7 +338,8 @@ static int abituguru_read(struct abituguru_data *data,
/* And read the data */
for (i = 0; i < count; i++) {
if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) {
- ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for "
+ ABIT_UGURU_DEBUG(retries ? 1 : 3,
+ "timeout exceeded waiting for "
"read state (bank: %d, sensor: %d)\n",
(int)bank_addr, (int)sensor_addr);
break;
@@ -350,7 +358,9 @@ static int abituguru_read(struct abituguru_data *data,
static int abituguru_write(struct abituguru_data *data,
u8 bank_addr, u8 sensor_addr, u8 *buf, int count)
{
- int i;
+ /* We use the ready timeout as we have to wait for 0xAC just like the
+ ready function */
+ int i, timeout = ABIT_UGURU_READY_TIMEOUT;
/* Send the address */
i = abituguru_send_address(data, bank_addr, sensor_addr,
@@ -370,7 +380,8 @@ static int abituguru_write(struct abituguru_data *data,
}
/* Now we need to wait till the chip is ready to be read again,
- don't ask why */
+ so that we can read 0xAC as confirmation that our write has
+ succeeded. */
if (abituguru_wait(data, ABIT_UGURU_STATUS_READ)) {
ABIT_UGURU_DEBUG(1, "timeout exceeded waiting for read state "
"after write (bank: %d, sensor: %d)\n", (int)bank_addr,
@@ -379,11 +390,15 @@ static int abituguru_write(struct abituguru_data *data,
}
/* Cmd port MUST be read now and should contain 0xAC */
- if (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) {
- ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after write "
- "(bank: %d, sensor: %d)\n", (int)bank_addr,
- (int)sensor_addr);
- return -EIO;
+ while (inb_p(data->addr + ABIT_UGURU_CMD) != 0xAC) {
+ timeout--;
+ if (timeout == 0) {
+ ABIT_UGURU_DEBUG(1, "CMD reg does not hold 0xAC after "
+ "write (bank: %d, sensor: %d)\n",
+ (int)bank_addr, (int)sensor_addr);
+ return -EIO;
+ }
+ msleep(0);
}
/* Last put the chip back in ready state */
@@ -403,7 +418,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
u8 sensor_addr)
{
u8 val, buf[3];
- int ret = ABIT_UGURU_NC;
+ int i, ret = -ENODEV; /* error is the most common used retval :| */
/* If overriden by the user return the user selected type */
if (bank1_types[sensor_addr] >= ABIT_UGURU_IN_SENSOR &&
@@ -439,7 +454,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
buf[2] = 250;
if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
buf, 3) != 3)
- return -ENODEV;
+ goto abituguru_detect_bank1_sensor_type_exit;
/* Now we need 20 ms to give the uguru time to read the sensors
and raise a voltage alarm */
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -447,21 +462,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
/* Check for alarm and check the alarm is a volt low alarm. */
if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3,
ABIT_UGURU_MAX_RETRIES) != 3)
- return -ENODEV;
+ goto abituguru_detect_bank1_sensor_type_exit;
if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) {
if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1,
sensor_addr, buf, 3,
ABIT_UGURU_MAX_RETRIES) != 3)
- return -ENODEV;
+ goto abituguru_detect_bank1_sensor_type_exit;
if (buf[0] & ABIT_UGURU_VOLT_LOW_ALARM_FLAG) {
- /* Restore original settings */
- if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2,
- sensor_addr,
- data->bank1_settings[sensor_addr],
- 3) != 3)
- return -ENODEV;
ABIT_UGURU_DEBUG(2, " found volt sensor\n");
- return ABIT_UGURU_IN_SENSOR;
+ ret = ABIT_UGURU_IN_SENSOR;
+ goto abituguru_detect_bank1_sensor_type_exit;
} else
ABIT_UGURU_DEBUG(2, " alarm raised during volt "
"sensor test, but volt low flag not set\n");
@@ -477,7 +487,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
buf[2] = 10;
if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
buf, 3) != 3)
- return -ENODEV;
+ goto abituguru_detect_bank1_sensor_type_exit;
/* Now we need 50 ms to give the uguru time to read the sensors
and raise a temp alarm */
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -485,15 +495,16 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
/* Check for alarm and check the alarm is a temp high alarm. */
if (abituguru_read(data, ABIT_UGURU_ALARM_BANK, 0, buf, 3,
ABIT_UGURU_MAX_RETRIES) != 3)
- return -ENODEV;
+ goto abituguru_detect_bank1_sensor_type_exit;
if (buf[sensor_addr/8] & (0x01 << (sensor_addr % 8))) {
if (abituguru_read(data, ABIT_UGURU_SENSOR_BANK1 + 1,
sensor_addr, buf, 3,
ABIT_UGURU_MAX_RETRIES) != 3)
- return -ENODEV;
+ goto abituguru_detect_bank1_sensor_type_exit;
if (buf[0] & ABIT_UGURU_TEMP_HIGH_ALARM_FLAG) {
- ret = ABIT_UGURU_TEMP_SENSOR;
ABIT_UGURU_DEBUG(2, " found temp sensor\n");
+ ret = ABIT_UGURU_TEMP_SENSOR;
+ goto abituguru_detect_bank1_sensor_type_exit;
} else
ABIT_UGURU_DEBUG(2, " alarm raised during temp "
"sensor test, but temp high flag not set\n");
@@ -501,11 +512,23 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
ABIT_UGURU_DEBUG(2, " alarm not raised during temp sensor "
"test\n");
- /* Restore original settings */
- if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2, sensor_addr,
- data->bank1_settings[sensor_addr], 3) != 3)
+ ret = ABIT_UGURU_NC;
+abituguru_detect_bank1_sensor_type_exit:
+ /* Restore original settings, failing here is really BAD, it has been
+ reported that some BIOS-es hang when entering the uGuru menu with
+ invalid settings present in the uGuru, so we try this 3 times. */
+ for (i = 0; i < 3; i++)
+ if (abituguru_write(data, ABIT_UGURU_SENSOR_BANK1 + 2,
+ sensor_addr, data->bank1_settings[sensor_addr],
+ 3) == 3)
+ break;
+ if (i == 3) {
+ printk(KERN_ERR ABIT_UGURU_NAME
+ ": Fatal error could not restore original settings. "
+ "This should never happen please report this to the "
+ "abituguru maintainer (see MAINTAINERS)\n");
return -ENODEV;
-
+ }
return ret;
}
@@ -1305,7 +1328,7 @@ static struct abituguru_data *abituguru_update_device(struct device *dev)
data->update_timeouts = 0;
LEAVE_UPDATE:
/* handle timeout condition */
- if (err == -EBUSY) {
+ if (!success && (err == -EBUSY || err >= 0)) {
/* No overflow please */
if (data->update_timeouts < 255u)
data->update_timeouts++;
diff --git a/drivers/i2c/chips/tps65010.c b/drivers/i2c/chips/tps65010.c
index e7e2704..0be6fd6 100644
--- a/drivers/i2c/chips/tps65010.c
+++ b/drivers/i2c/chips/tps65010.c
@@ -43,13 +43,12 @@
/*-------------------------------------------------------------------------*/
#define DRIVER_VERSION "2 May 2005"
-#define DRIVER_NAME (tps65010_driver.name)
+#define DRIVER_NAME (tps65010_driver.driver.name)
MODULE_DESCRIPTION("TPS6501x Power Management Driver");
MODULE_LICENSE("GPL");
static unsigned short normal_i2c[] = { 0x48, /* 0x49, */ I2C_CLIENT_END };
-static unsigned short normal_i2c_range[] = { I2C_CLIENT_END };
I2C_CLIENT_INSMOD;
@@ -100,7 +99,7 @@ struct tps65010 {
/* not currently tracking GPIO state */
};
-#define POWER_POLL_DELAY msecs_to_jiffies(800)
+#define POWER_POLL_DELAY msecs_to_jiffies(5000)
/*-------------------------------------------------------------------------*/
@@ -520,8 +519,11 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
goto fail1;
}
+ /* the IRQ is active low, but many gpio lines can't support that
+ * so this driver can use falling-edge triggers instead.
+ */
+ irqflags = IRQF_SAMPLE_RANDOM;
#ifdef CONFIG_ARM
- irqflags = IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_LOW;
if (machine_is_omap_h2()) {
tps->model = TPS65010;
omap_cfg_reg(W4_GPIO58);
@@ -543,8 +545,6 @@ tps65010_probe(struct i2c_adapter *bus, int address, int kind)
// FIXME set up this board's IRQ ...
}
-#else
- irqflags = IRQF_SAMPLE_RANDOM;
#endif
if (tps->irq > 0) {
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index d4bad67..448df27 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -3552,6 +3552,8 @@ static int ohci1394_pci_resume (struct pci_dev *pdev)
static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
{
+ pci_save_state(pdev);
+
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac)) {
struct device_node *of_node;
@@ -3563,8 +3565,6 @@ static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
}
#endif
- pci_save_state(pdev);
-
return 0;
}
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index e05ca2c..75313ad 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -301,7 +301,8 @@ static void ib_cache_event(struct ib_event_handler *handler,
event->event == IB_EVENT_PORT_ACTIVE ||
event->event == IB_EVENT_LID_CHANGE ||
event->event == IB_EVENT_PKEY_CHANGE ||
- event->event == IB_EVENT_SM_CHANGE) {
+ event->event == IB_EVENT_SM_CHANGE ||
+ event->event == IB_EVENT_CLIENT_REREGISTER) {
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (work) {
INIT_WORK(&work->work, ib_cache_task, work);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index aeda484..d6b8422 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -405,7 +405,8 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
event->event == IB_EVENT_PORT_ACTIVE ||
event->event == IB_EVENT_LID_CHANGE ||
event->event == IB_EVENT_PKEY_CHANGE ||
- event->event == IB_EVENT_SM_CHANGE) {
+ event->event == IB_EVENT_SM_CHANGE ||
+ event->event == IB_EVENT_CLIENT_REREGISTER) {
struct ib_sa_device *sa_dev;
sa_dev = container_of(handler, typeof(*sa_dev), event_handler);
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 557cde3..7b82c19 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -967,12 +967,12 @@ static struct {
} mthca_hca_table[] = {
[TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0),
.flags = 0 },
- [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 400),
+ [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600),
.flags = MTHCA_FLAG_PCIE },
- [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 0),
+ [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE },
- [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 0, 800),
+ [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE |
MTHCA_FLAG_SINAI_OPT }
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 230ae21..265b1d1 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1287,11 +1287,7 @@ int mthca_register_device(struct mthca_dev *dev)
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
(1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
- (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
- (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
- (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
dev->ib_dev.node_type = IB_NODE_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
dev->ib_dev.dma_device = &dev->pdev->dev;
@@ -1316,6 +1312,11 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.modify_srq = mthca_modify_srq;
dev->ib_dev.query_srq = mthca_query_srq;
dev->ib_dev.destroy_srq = mthca_destroy_srq;
+ dev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
if (mthca_is_memfree(dev))
dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 8de2887..9a5bece 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -136,8 +136,8 @@ struct mthca_ah {
* We have one global lock that protects dev->cq/qp_table. Each
* struct mthca_cq/qp also has its own lock. An individual qp lock
* may be taken inside of an individual cq lock. Both cqs attached to
- * a qp may be locked, with the send cq locked first. No other
- * nesting should be done.
+ * a qp may be locked, with the cq with the lower cqn locked first.
+ * No other nesting should be done.
*
* Each struct mthca_cq/qp also has an ref count, protected by the
* corresponding table lock. The pointer from the cq/qp_table to the
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index cd8b672..2e8f6f3 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -99,6 +99,10 @@ enum {
MTHCA_QP_BIT_RSC = 1 << 3
};
+enum {
+ MTHCA_SEND_DOORBELL_FENCE = 1 << 5
+};
+
struct mthca_qp_path {
__be32 port_pkey;
u8 rnr_retry;
@@ -1259,6 +1263,32 @@ int mthca_alloc_qp(struct mthca_dev *dev,
return 0;
}
+static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+{
+ if (send_cq == recv_cq)
+ spin_lock_irq(&send_cq->lock);
+ else if (send_cq->cqn < recv_cq->cqn) {
+ spin_lock_irq(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock_irq(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+ }
+}
+
+static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+{
+ if (send_cq == recv_cq)
+ spin_unlock_irq(&send_cq->lock);
+ else if (send_cq->cqn < recv_cq->cqn) {
+ spin_unlock(&recv_cq->lock);
+ spin_unlock_irq(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+ spin_unlock_irq(&recv_cq->lock);
+ }
+}
+
int mthca_alloc_sqp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
@@ -1311,17 +1341,13 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
- spin_lock_irq(&send_cq->lock);
- if (send_cq != recv_cq)
- spin_lock(&recv_cq->lock);
+ mthca_lock_cqs(send_cq, recv_cq);
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp, mqpn);
spin_unlock(&dev->qp_table.lock);
- if (send_cq != recv_cq)
- spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ mthca_unlock_cqs(send_cq, recv_cq);
err_out:
dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
@@ -1355,9 +1381,7 @@ void mthca_free_qp(struct mthca_dev *dev,
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
- spin_lock_irq(&send_cq->lock);
- if (send_cq != recv_cq)
- spin_lock(&recv_cq->lock);
+ mthca_lock_cqs(send_cq, recv_cq);
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp,
@@ -1365,9 +1389,7 @@ void mthca_free_qp(struct mthca_dev *dev,
--qp->refcount;
spin_unlock(&dev->qp_table.lock);
- if (send_cq != recv_cq)
- spin_unlock(&recv_cq->lock);
- spin_unlock_irq(&send_cq->lock);
+ mthca_unlock_cqs(send_cq, recv_cq);
wait_event(qp->wait, !get_qp_refcount(dev, qp));
@@ -1502,7 +1524,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int i;
int size;
int size0 = 0;
- u32 f0 = 0;
+ u32 f0;
int ind;
u8 op0 = 0;
@@ -1686,6 +1708,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (!size0) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
+ f0 = wr->send_flags & IB_SEND_FENCE ?
+ MTHCA_SEND_DOORBELL_FENCE : 0;
}
++ind;
@@ -1843,7 +1867,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int i;
int size;
int size0 = 0;
- u32 f0 = 0;
+ u32 f0;
int ind;
u8 op0 = 0;
@@ -2051,6 +2075,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
if (!size0) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
+ f0 = wr->send_flags & IB_SEND_FENCE ?
+ MTHCA_SEND_DOORBELL_FENCE : 0;
}
++ind;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 34b0da5..1437d7e 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -378,21 +378,6 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
return iser_conn_set_full_featured_mode(conn);
}
-static void
-iscsi_iser_conn_terminate(struct iscsi_conn *conn)
-{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iser_conn *ib_conn = iser_conn->ib_conn;
-
- BUG_ON(!ib_conn);
- /* starts conn teardown process, waits until all previously *
- * posted buffers get flushed, deallocates all conn resources */
- iser_conn_terminate(ib_conn);
- iser_conn->ib_conn = NULL;
- conn->recv_lock = NULL;
-}
-
-
static struct iscsi_transport iscsi_iser_transport;
static struct iscsi_cls_session *
@@ -555,13 +540,13 @@ iscsi_iser_ep_poll(__u64 ep_handle, int timeout_ms)
static void
iscsi_iser_ep_disconnect(__u64 ep_handle)
{
- struct iser_conn *ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
+ struct iser_conn *ib_conn;
+ ib_conn = iscsi_iser_ib_conn_lookup(ep_handle);
if (!ib_conn)
return;
iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state);
-
iser_conn_terminate(ib_conn);
}
@@ -614,9 +599,6 @@ static struct iscsi_transport iscsi_iser_transport = {
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_conn_stop,
- /* these are called as part of conn recovery */
- .suspend_conn_recv = NULL, /* FIXME is/how this relvant to iser? */
- .terminate_conn = iscsi_iser_conn_terminate,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_iser_conn_get_stats,
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 6bfa0cf..a86afd0 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -498,7 +498,7 @@ static int atkbd_set_repeat_rate(struct atkbd *atkbd)
i++;
dev->rep[REP_PERIOD] = period[i];
- while (j < ARRAY_SIZE(period) - 1 && delay[j] < dev->rep[REP_DELAY])
+ while (j < ARRAY_SIZE(delay) - 1 && delay[j] < dev->rep[REP_DELAY])
j++;
dev->rep[REP_DELAY] = delay[j];
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index a8efc1a..de0f46d 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -259,11 +259,11 @@ static int __init dmi_matched(struct dmi_system_id *dmi)
return 1;
}
-static struct key_entry keymap_empty[] __initdata = {
+static struct key_entry keymap_empty[] = {
{ KE_END, 0 }
};
-static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = {
+static struct key_entry keymap_fs_amilo_pro_v2000[] = {
{ KE_KEY, 0x01, KEY_HELP },
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
@@ -273,7 +273,7 @@ static struct key_entry keymap_fs_amilo_pro_v2000[] __initdata = {
{ KE_END, 0 }
};
-static struct key_entry keymap_fujitsu_n3510[] __initdata = {
+static struct key_entry keymap_fujitsu_n3510[] = {
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
{ KE_KEY, 0x36, KEY_WWW },
@@ -285,7 +285,7 @@ static struct key_entry keymap_fujitsu_n3510[] __initdata = {
{ KE_END, 0 }
};
-static struct key_entry keymap_wistron_ms2111[] __initdata = {
+static struct key_entry keymap_wistron_ms2111[] = {
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
{ KE_KEY, 0x13, KEY_PROG3 },
@@ -294,7 +294,7 @@ static struct key_entry keymap_wistron_ms2111[] __initdata = {
{ KE_END, 0 }
};
-static struct key_entry keymap_wistron_ms2141[] __initdata = {
+static struct key_entry keymap_wistron_ms2141[] = {
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
{ KE_WIFI, 0x30, 0 },
@@ -307,7 +307,7 @@ static struct key_entry keymap_wistron_ms2141[] __initdata = {
{ KE_END, 0 }
};
-static struct key_entry keymap_acer_aspire_1500[] __initdata = {
+static struct key_entry keymap_acer_aspire_1500[] = {
{ KE_KEY, 0x11, KEY_PROG1 },
{ KE_KEY, 0x12, KEY_PROG2 },
{ KE_WIFI, 0x30, 0 },
@@ -317,7 +317,7 @@ static struct key_entry keymap_acer_aspire_1500[] __initdata = {
{ KE_END, 0 }
};
-static struct key_entry keymap_acer_travelmate_240[] __initdata = {
+static struct key_entry keymap_acer_travelmate_240[] = {
{ KE_KEY, 0x31, KEY_MAIL },
{ KE_KEY, 0x36, KEY_WWW },
{ KE_KEY, 0x11, KEY_PROG1 },
@@ -327,7 +327,7 @@ static struct key_entry keymap_acer_travelmate_240[] __initdata = {
{ KE_END, 0 }
};
-static struct key_entry keymap_aopen_1559as[] __initdata = {
+static struct key_entry keymap_aopen_1559as[] = {
{ KE_KEY, 0x01, KEY_HELP },
{ KE_KEY, 0x06, KEY_PROG3 },
{ KE_KEY, 0x11, KEY_PROG1 },
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 8bc9f51..343afa3 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -485,13 +485,6 @@ static int im_explorer_detect(struct psmouse *psmouse, int set_properties)
param[0] = 40;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
- param[0] = 200;
- ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
- param[0] = 200;
- ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
- param[0] = 60;
- ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
-
if (set_properties) {
set_bit(BTN_MIDDLE, psmouse->dev->keybit);
set_bit(REL_WHEEL, psmouse->dev->relbit);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index be48ced..c54de98 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -255,7 +255,9 @@ static struct region *__rh_alloc(struct region_hash *rh, region_t region)
struct region *reg, *nreg;
read_unlock(&rh->hash_lock);
- nreg = mempool_alloc(rh->region_pool, GFP_NOIO);
+ nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
+ if (unlikely(!nreg))
+ nreg = kmalloc(sizeof(struct region), GFP_NOIO);
nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
RH_CLEAN : RH_NOSYNC;
nreg->rh = rh;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b6d1602..8dbab2e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1597,6 +1597,19 @@ void md_update_sb(mddev_t * mddev)
repeat:
spin_lock_irq(&mddev->write_lock);
+
+ if (mddev->degraded && mddev->sb_dirty == 3)
+ /* If the array is degraded, then skipping spares is both
+ * dangerous and fairly pointless.
+ * Dangerous because a device that was removed from the array
+ * might have a event_count that still looks up-to-date,
+ * so it can be re-added without a resync.
+ * Pointless because if there are any spares to skip,
+ * then a recovery will happen and soon that array won't
+ * be degraded any more and the spare can go back to sleep then.
+ */
+ mddev->sb_dirty = 1;
+
sync_req = mddev->in_sync;
mddev->utime = get_seconds();
if (mddev->sb_dirty == 3)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1efe22a..87bfe9e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1625,15 +1625,16 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return 0;
}
- /* before building a request, check if we can skip these blocks..
- * This call the bitmap_start_sync doesn't actually record anything
- */
if (mddev->bitmap == NULL &&
mddev->recovery_cp == MaxSector &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
conf->fullsync == 0) {
*skipped = 1;
return max_sector - sector_nr;
}
+ /* before building a request, check if we can skip these blocks..
+ * This call the bitmap_start_sync doesn't actually record anything
+ */
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index d4cb144..c537d71 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -640,7 +640,6 @@ typedef struct _MPT_ADAPTER
struct work_struct fc_setup_reset_work;
struct list_head fc_rports;
spinlock_t fc_rescan_work_lock;
- int fc_rescan_work_count;
struct work_struct fc_rescan_work;
char fc_rescan_work_q_name[KOBJ_NAME_LEN];
struct workqueue_struct *fc_rescan_work_q;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 90da7d6..85696f3 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -669,7 +669,10 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
* if still doing discovery,
* hang loose a while until finished
*/
- if (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) {
+ if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) ||
+ (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE &&
+ (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK)
+ == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) {
if (count-- > 0) {
msleep(100);
goto try_again;
@@ -895,59 +898,45 @@ mptfc_rescan_devices(void *arg)
{
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
int ii;
- int work_to_do;
u64 pn;
- unsigned long flags;
struct mptfc_rport_info *ri;
- do {
- /* start by tagging all ports as missing */
- list_for_each_entry(ri, &ioc->fc_rports, list) {
- if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
- ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
- }
+ /* start by tagging all ports as missing */
+ list_for_each_entry(ri, &ioc->fc_rports, list) {
+ if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
+ ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
}
+ }
- /*
- * now rescan devices known to adapter,
- * will reregister existing rports
- */
- for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
- (void) mptfc_GetFcPortPage0(ioc, ii);
- mptfc_init_host_attr(ioc,ii); /* refresh */
- mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev);
- }
+ /*
+ * now rescan devices known to adapter,
+ * will reregister existing rports
+ */
+ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
+ (void) mptfc_GetFcPortPage0(ioc, ii);
+ mptfc_init_host_attr(ioc, ii); /* refresh */
+ mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev);
+ }
- /* delete devices still missing */
- list_for_each_entry(ri, &ioc->fc_rports, list) {
- /* if newly missing, delete it */
- if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
+ /* delete devices still missing */
+ list_for_each_entry(ri, &ioc->fc_rports, list) {
+ /* if newly missing, delete it */
+ if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
- ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
- MPT_RPORT_INFO_FLAGS_MISSING);
- fc_remote_port_delete(ri->rport); /* won't sleep */
- ri->rport = NULL;
+ ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
+ MPT_RPORT_INFO_FLAGS_MISSING);
+ fc_remote_port_delete(ri->rport); /* won't sleep */
+ ri->rport = NULL;
- pn = (u64)ri->pg0.WWPN.High << 32 |
- (u64)ri->pg0.WWPN.Low;
- dfcprintk ((MYIOC_s_INFO_FMT
- "mptfc_rescan.%d: %llx deleted\n",
- ioc->name,
- ioc->sh->host_no,
- (unsigned long long)pn));
- }
+ pn = (u64)ri->pg0.WWPN.High << 32 |
+ (u64)ri->pg0.WWPN.Low;
+ dfcprintk ((MYIOC_s_INFO_FMT
+ "mptfc_rescan.%d: %llx deleted\n",
+ ioc->name,
+ ioc->sh->host_no,
+ (unsigned long long)pn));
}
-
- /*
- * allow multiple passes as target state
- * might have changed during scan
- */
- spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
- if (ioc->fc_rescan_work_count > 2) /* only need one more */
- ioc->fc_rescan_work_count = 2;
- work_to_do = --ioc->fc_rescan_work_count;
- spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
- } while (work_to_do);
+ }
}
static int
@@ -1159,7 +1148,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* by doing it via the workqueue, some locking is eliminated
*/
- ioc->fc_rescan_work_count = 1;
queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
flush_workqueue(ioc->fc_rescan_work_q);
@@ -1202,10 +1190,8 @@ mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
case MPI_EVENT_RESCAN:
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
if (ioc->fc_rescan_work_q) {
- if (ioc->fc_rescan_work_count++ == 0) {
- queue_work(ioc->fc_rescan_work_q,
- &ioc->fc_rescan_work);
- }
+ queue_work(ioc->fc_rescan_work_q,
+ &ioc->fc_rescan_work);
}
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
break;
@@ -1248,10 +1234,8 @@ mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
mptfc_SetFcPortPage1_defaults(ioc);
spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
if (ioc->fc_rescan_work_q) {
- if (ioc->fc_rescan_work_count++ == 0) {
- queue_work(ioc->fc_rescan_work_q,
- &ioc->fc_rescan_work);
- }
+ queue_work(ioc->fc_rescan_work_q,
+ &ioc->fc_rescan_work);
}
spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
}
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index d7897dc..a0ba07c 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -130,11 +130,13 @@ static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
if (ctrl & NAND_CTRL_CHANGE) {
unsigned long bits;
- bits = (~ctrl & NAND_NCE) << 2;
- bits |= (ctrl & NAND_CLE) << 7;
- bits |= (ctrl & NAND_ALE) << 6;
+ bits = (~ctrl & NAND_NCE) ? AMS_DELTA_LATCH2_NAND_NCE : 0;
+ bits |= (ctrl & NAND_CLE) ? AMS_DELTA_LATCH2_NAND_CLE : 0;
+ bits |= (ctrl & NAND_ALE) ? AMS_DELTA_LATCH2_NAND_ALE : 0;
- ams_delta_latch2_write(0xC2, bits);
+ ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_CLE |
+ AMS_DELTA_LATCH2_NAND_ALE |
+ AMS_DELTA_LATCH2_NAND_NCE, bits);
}
if (cmd != NAND_CMD_NONE)
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 62b8613..c8cbc00 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1093,9 +1093,10 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
ret = nand_do_read_ops(mtd, from, &chip->ops);
+ *retlen = chip->ops.retlen;
+
nand_release_device(mtd);
- *retlen = chip->ops.retlen;
return ret;
}
@@ -1691,9 +1692,10 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
ret = nand_do_write_ops(mtd, to, &chip->ops);
+ *retlen = chip->ops.retlen;
+
nand_release_device(mtd);
- *retlen = chip->ops.retlen;
return ret;
}
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 4532b17..aedfddf 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -1003,7 +1003,8 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
/* Calculate the next Tx descriptor entry. */
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry;
- unsigned long flags, i;
+ unsigned long flags;
+ int i;
if (vp->tx_full) /* No room to transmit with */
return 1;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 7e2ca95..257d3bc 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -899,7 +899,7 @@ memory_squeeze:
}
-static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
+static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
{
struct i596_cmd *ptr;
@@ -932,7 +932,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private
lp->scb.cmd = I596_NULL;
}
-static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr)
+static void i596_reset(struct net_device *dev, struct i596_private *lp,
+ int ioaddr)
{
unsigned long flags;
@@ -1578,7 +1579,7 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "i82596 debug mask");
-int init_module(void)
+int __init init_module(void)
{
if (debug >= 0)
i596_debug = debug;
@@ -1588,7 +1589,7 @@ int init_module(void)
return 0;
}
-void cleanup_module(void)
+void __exit cleanup_module(void)
{
unregister_netdev(dev_82596);
#ifdef __mc68000__
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3918990..30b3671 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1724,6 +1724,20 @@ config VIA_RHINE_MMIO
If unsure, say Y.
+config VIA_RHINE_NAPI
+ bool "Use Rx Polling (NAPI)"
+ depends on VIA_RHINE
+ help
+ NAPI is a new driver API designed to reduce CPU and interrupt load
+ when the driver is receiving lots of packets from the card.
+
+ If your estimated Rx load is 10kpps or more, or if the card will be
+ deployed on potentially unfriendly networks (e.g. in a firewall),
+ then say Y here.
+
+ See <file:Documentation/networking/NAPI_HOWTO.txt> for more
+ information.
+
config LAN_SAA9730
bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)"
depends on NET_PCI && EXPERIMENTAL && MIPS
@@ -2219,6 +2233,33 @@ config GFAR_NAPI
bool "NAPI Support"
depends on GIANFAR
+config UCC_GETH
+ tristate "Freescale QE UCC GETH"
+ depends on QUICC_ENGINE && UCC_FAST
+ help
+ This driver supports the Gigabit Ethernet mode of QE UCC.
+ QE can be found on MPC836x CPUs.
+
+config UGETH_NAPI
+ bool "NAPI Support"
+ depends on UCC_GETH
+
+config UGETH_MAGIC_PACKET
+ bool "Magic Packet detection support"
+ depends on UCC_GETH
+
+config UGETH_FILTERING
+ bool "Mac address filtering support"
+ depends on UCC_GETH
+
+config UGETH_TX_ON_DEMOND
+ bool "Transmit on Demond support"
+ depends on UCC_GETH
+
+config UGETH_HAS_GIGA
+ bool
+ depends on UCC_GETH && MPC836x
+
config MV643XX_ETH
tristate "MV-643XX Ethernet support"
depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index c91e951..8427bf9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -18,6 +18,9 @@ gianfar_driver-objs := gianfar.o \
gianfar_mii.o \
gianfar_sysfs.o
+obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
+ucc_geth_driver-objs := ucc_geth.o ucc_geth_phy.o
+
#
# link order important here
#
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 7952dc6..0fbbcb7 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -370,8 +370,7 @@ MODULE_PARM_DESC(mem, "Memory base address(es)");
MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
MODULE_LICENSE("GPL");
-int
-init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 1d01ac0..ae7f828 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -1030,7 +1030,7 @@ module_param(io, int, 0);
module_param(irq, int, 0);
module_param(board_type, int, 0);
-int init_module(void)
+int __init init_module(void)
{
if (io == 0)
printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 5d7929c7..4ca061c 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -901,7 +901,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
-int init_module(void)
+int __init init_module(void)
{
if (io == 0)
printk("at1700: You should not use auto-probing with insmod!\n");
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 47eecce..2dcca79b 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1905,8 +1905,7 @@ MODULE_LICENSE("GPL");
*/
-int
-init_module(void)
+int __init init_module(void)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
struct net_local *lp;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 1b758b7..3d76fa1 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -339,6 +339,17 @@ static void dm9000_timeout(struct net_device *dev)
spin_unlock_irqrestore(&db->lock,flags);
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ *Used by netconsole
+ */
+static void dm9000_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ dm9000_interrupt(dev->irq,dev,NULL);
+ enable_irq(dev->irq);
+}
+#endif
/* dm9000_release_board
*
@@ -538,6 +549,9 @@ dm9000_probe(struct platform_device *pdev)
ndev->stop = &dm9000_stop;
ndev->get_stats = &dm9000_get_stats;
ndev->set_multicast_list = &dm9000_hash_table;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ ndev->poll_controller = &dm9000_poll_controller;
+#endif
#ifdef DM9000_PROGRAM_EEPROM
program_eeprom(db);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 583518a..b3b9191 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -105,6 +105,33 @@ static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
uint16_t duplex);
static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
+static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw,
+ uint32_t segment);
+static int32_t e1000_get_software_flag(struct e1000_hw *hw);
+static int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
+static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
+static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
+static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
+ uint16_t words, uint16_t *data);
+static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
+ uint8_t* data);
+static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
+ uint16_t *data);
+static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr,
+ uint16_t *data);
+static void e1000_release_software_flag(struct e1000_hw *hw);
+static void e1000_release_software_semaphore(struct e1000_hw *hw);
+static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw,
+ uint32_t no_snoop);
+static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw,
+ uint32_t index, uint8_t byte);
+static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
+ uint16_t words, uint16_t *data);
+static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
+ uint8_t data);
+static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr,
+ uint16_t data);
+
/* IGP cable length table */
static const
uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
@@ -3233,7 +3260,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
return data;
}
-int32_t
+static int32_t
e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
{
uint32_t swfw_sync = 0;
@@ -3277,7 +3304,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
return E1000_SUCCESS;
}
-void
+static void
e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
{
uint32_t swfw_sync;
@@ -3575,7 +3602,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
return E1000_SUCCESS;
}
-int32_t
+static int32_t
e1000_read_kmrn_reg(struct e1000_hw *hw,
uint32_t reg_addr,
uint16_t *data)
@@ -3608,7 +3635,7 @@ e1000_read_kmrn_reg(struct e1000_hw *hw,
return E1000_SUCCESS;
}
-int32_t
+static int32_t
e1000_write_kmrn_reg(struct e1000_hw *hw,
uint32_t reg_addr,
uint16_t data)
@@ -3839,7 +3866,7 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
*
* hw - struct containing variables accessed by shared code
******************************************************************************/
-int32_t
+static int32_t
e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
{
int32_t ret_val;
@@ -4086,7 +4113,7 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
* hw - Struct containing variables accessed by shared code
* phy_info - PHY information structure
******************************************************************************/
-int32_t
+static int32_t
e1000_phy_ife_get_info(struct e1000_hw *hw,
struct e1000_phy_info *phy_info)
{
@@ -5643,6 +5670,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
* for the first 15 multicast addresses, and hashes the rest into the
* multicast table.
*****************************************************************************/
+#if 0
void
e1000_mc_addr_list_update(struct e1000_hw *hw,
uint8_t *mc_addr_list,
@@ -5719,6 +5747,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
}
DEBUGOUT("MC Update Complete\n");
}
+#endif /* 0 */
/******************************************************************************
* Hashes an address to determine its location in the multicast table
@@ -6587,6 +6616,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
* hw - Struct containing variables accessed by shared code
* offset - offset to read from
*****************************************************************************/
+#if 0
uint32_t
e1000_read_reg_io(struct e1000_hw *hw,
uint32_t offset)
@@ -6597,6 +6627,7 @@ e1000_read_reg_io(struct e1000_hw *hw,
e1000_io_write(hw, io_addr, offset);
return e1000_io_read(hw, io_data);
}
+#endif /* 0 */
/******************************************************************************
* Writes a value to one of the devices registers using port I/O (as opposed to
@@ -7909,6 +7940,7 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
* returns: - none.
*
***************************************************************************/
+#if 0
void
e1000_enable_pciex_master(struct e1000_hw *hw)
{
@@ -7923,6 +7955,7 @@ e1000_enable_pciex_master(struct e1000_hw *hw)
ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE;
E1000_WRITE_REG(hw, CTRL, ctrl);
}
+#endif /* 0 */
/*******************************************************************************
*
@@ -8148,7 +8181,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
* E1000_SUCCESS at any other case.
*
***************************************************************************/
-int32_t
+static int32_t
e1000_get_software_semaphore(struct e1000_hw *hw)
{
int32_t timeout = hw->eeprom.word_size + 1;
@@ -8183,7 +8216,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
* hw: Struct containing variables accessed by shared code
*
***************************************************************************/
-void
+static void
e1000_release_software_semaphore(struct e1000_hw *hw)
{
uint32_t swsm;
@@ -8265,7 +8298,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
* returns: E1000_SUCCESS
*
*****************************************************************************/
-int32_t
+static int32_t
e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
{
uint32_t gcr_reg = 0;
@@ -8306,7 +8339,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
* hw: Struct containing variables accessed by shared code
*
***************************************************************************/
-int32_t
+static int32_t
e1000_get_software_flag(struct e1000_hw *hw)
{
int32_t timeout = PHY_CFG_TIMEOUT;
@@ -8345,7 +8378,7 @@ e1000_get_software_flag(struct e1000_hw *hw)
* hw: Struct containing variables accessed by shared code
*
***************************************************************************/
-void
+static void
e1000_release_software_flag(struct e1000_hw *hw)
{
uint32_t extcnf_ctrl;
@@ -8369,6 +8402,7 @@ e1000_release_software_flag(struct e1000_hw *hw)
* hw: Struct containing variables accessed by shared code
*
***************************************************************************/
+#if 0
int32_t
e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
{
@@ -8388,6 +8422,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
return ret_val;
}
+#endif /* 0 */
/***************************************************************************
*
@@ -8397,6 +8432,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
* hw: Struct containing variables accessed by shared code
*
***************************************************************************/
+#if 0
int32_t
e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
{
@@ -8416,6 +8452,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
return ret_val;
}
+#endif /* 0 */
/******************************************************************************
* Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
@@ -8426,7 +8463,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
* data - word read from the EEPROM
* words - number of words to read
*****************************************************************************/
-int32_t
+static int32_t
e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
uint16_t *data)
{
@@ -8482,7 +8519,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
* words - number of words to write
* data - words to write to the EEPROM
*****************************************************************************/
-int32_t
+static int32_t
e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
uint16_t *data)
{
@@ -8529,7 +8566,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
*
* hw - The pointer to the hw structure
****************************************************************************/
-int32_t
+static int32_t
e1000_ich8_cycle_init(struct e1000_hw *hw)
{
union ich8_hws_flash_status hsfsts;
@@ -8596,7 +8633,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
*
* hw - The pointer to the hw structure
****************************************************************************/
-int32_t
+static int32_t
e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
{
union ich8_hws_flash_ctrl hsflctl;
@@ -8631,7 +8668,7 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
* size - Size of data to read, 1=byte 2=word
* data - Pointer to the word to store the value read.
*****************************************************************************/
-int32_t
+static int32_t
e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
uint32_t size, uint16_t* data)
{
@@ -8710,7 +8747,7 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
* size - Size of data to read, 1=byte 2=word
* data - The byte(s) to write to the NVM.
*****************************************************************************/
-int32_t
+static int32_t
e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
uint16_t data)
{
@@ -8785,7 +8822,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
* index - The index of the byte to read.
* data - Pointer to a byte to store the value read.
*****************************************************************************/
-int32_t
+static int32_t
e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
{
int32_t status = E1000_SUCCESS;
@@ -8808,7 +8845,7 @@ e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
* index - The index of the byte to write.
* byte - The byte to write to the NVM.
*****************************************************************************/
-int32_t
+static int32_t
e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
{
int32_t error = E1000_SUCCESS;
@@ -8839,7 +8876,7 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
* index - The index of the byte to read.
* data - The byte to write to the NVM.
*****************************************************************************/
-int32_t
+static int32_t
e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
{
int32_t status = E1000_SUCCESS;
@@ -8857,7 +8894,7 @@ e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
* index - The starting byte index of the word to read.
* data - Pointer to a word to store the value read.
*****************************************************************************/
-int32_t
+static int32_t
e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
{
int32_t status = E1000_SUCCESS;
@@ -8872,6 +8909,7 @@ e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
* index - The starting byte index of the word to read.
* data - The word to write to the NVM.
*****************************************************************************/
+#if 0
int32_t
e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
{
@@ -8879,6 +8917,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
status = e1000_write_ich8_data(hw, index, 2, data);
return status;
}
+#endif /* 0 */
/******************************************************************************
* Erases the bank specified. Each bank is a 4k block. Segments are 0 based.
@@ -8887,7 +8926,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
* hw - pointer to e1000_hw structure
* segment - 0 for first segment, 1 for second segment, etc.
*****************************************************************************/
-int32_t
+static int32_t
e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
{
union ich8_hws_flash_status hsfsts;
@@ -8984,6 +9023,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
* hw: Struct containing variables accessed by shared code
*
*****************************************************************************/
+#if 0
int32_t
e1000_duplex_reversal(struct e1000_hw *hw)
{
@@ -9012,8 +9052,9 @@ e1000_duplex_reversal(struct e1000_hw *hw)
return ret_val;
}
+#endif /* 0 */
-int32_t
+static int32_t
e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
uint32_t cnf_base_addr, uint32_t cnf_size)
{
@@ -9047,7 +9088,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
}
-int32_t
+static int32_t
e1000_init_lcd_from_nvm(struct e1000_hw *hw)
{
uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index f9341e3..375b955 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -323,13 +323,8 @@ int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t dat
int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
int32_t e1000_phy_reset(struct e1000_hw *hw);
void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
-int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
-int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size);
-int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
-int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
-int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
/* EEPROM Functions */
int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
@@ -400,13 +395,8 @@ int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
int32_t e1000_read_mac_addr(struct e1000_hw * hw);
-int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
-void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
-void e1000_release_software_flag(struct e1000_hw *hw);
-int32_t e1000_get_software_flag(struct e1000_hw *hw);
/* Filters (multicast, vlan, receive) */
-void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -431,31 +421,9 @@ void e1000_pci_clear_mwi(struct e1000_hw *hw);
void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
/* Port I/O is only supported on 82544 and newer */
-uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
-uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
-void e1000_enable_pciex_master(struct e1000_hw *hw);
int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
-int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
-void e1000_release_software_semaphore(struct e1000_hw *hw);
int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
-int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop);
-
-int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
- uint8_t *data);
-int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
- uint8_t byte);
-int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
- uint8_t byte);
-int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
- uint16_t *data);
-int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
- uint32_t size, uint16_t *data);
-int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
- uint16_t words, uint16_t *data);
-int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
- uint16_t words, uint16_t *data);
-int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment);
#define E1000_READ_REG_IO(a, reg) \
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 627f224..726f43d 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -4386,11 +4386,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
pci_write_config_word(adapter->pdev, reg, *value);
}
+#if 0
uint32_t
e1000_io_read(struct e1000_hw *hw, unsigned long port)
{
return inl(port);
}
+#endif /* 0 */
void
e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index e5c5cd2..e4e733a 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -425,8 +425,8 @@ MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-int
-init_module(void)
+
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 20d3143..8dc61d6 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1807,8 +1807,7 @@ MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
-int
-init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int i;
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 33291bc..0701c1d 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1698,7 +1698,7 @@ MODULE_LICENSE("GPL");
* are specified, we verify and then use them. If no parameters are given, we
* autoprobe for one card only.
*/
-int init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 6b0ab1e..fd7b32a 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -421,8 +421,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)");
MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
MODULE_LICENSE("GPL");
-int
-init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 4bf76f8..ca42efa 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1434,7 +1434,7 @@ MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
-int init_module(void)
+int __init init_module(void)
{
int this_dev, found = 0;
struct net_device *dev;
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 97d34fe..567e274 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
#include <asm/uaccess.h>
/* These identify the driver base version and may not be removed. */
-static char version[] __devinitdata =
+static char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
index d6dd3f2..02d4dc1 100644
--- a/drivers/net/fs_enet/Makefile
+++ b/drivers/net/fs_enet/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_FS_ENET) += fs_enet.o
-obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o
-obj-$(CONFIG_8260) += mac-fcc.o
+obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o
+obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o
-fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o
+fs_enet-objs := fs_enet-main.o
diff --git a/drivers/net/fs_enet/fec.h b/drivers/net/fs_enet/fec.h
new file mode 100644
index 0000000..e980527
--- /dev/null
+++ b/drivers/net/fs_enet/fec.h
@@ -0,0 +1,42 @@
+#ifndef FS_ENET_FEC_H
+#define FS_ENET_FEC_H
+
+/* CRC polynomium used by the FEC for the multicast group filtering */
+#define FEC_CRC_POLY 0x04C11DB7
+
+#define FEC_MAX_MULTICAST_ADDRS 64
+
+/* Interrupt events/masks.
+*/
+#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
+#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
+#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
+#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
+#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
+#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
+#define FEC_ENET_RXF 0x02000000U /* Full frame received */
+#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
+#define FEC_ENET_MII 0x00800000U /* MII interrupt */
+#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
+
+#define FEC_ECNTRL_PINMUX 0x00000004
+#define FEC_ECNTRL_ETHER_EN 0x00000002
+#define FEC_ECNTRL_RESET 0x00000001
+
+#define FEC_RCNTRL_BC_REJ 0x00000010
+#define FEC_RCNTRL_PROM 0x00000008
+#define FEC_RCNTRL_MII_MODE 0x00000004
+#define FEC_RCNTRL_DRT 0x00000002
+#define FEC_RCNTRL_LOOP 0x00000001
+
+#define FEC_TCNTRL_FDEN 0x00000004
+#define FEC_TCNTRL_HBC 0x00000002
+#define FEC_TCNTRL_GTS 0x00000001
+
+
+
+/*
+ * Delay to wait for FEC reset command to complete (in us)
+ */
+#define FEC_RESET_DELAY 50
+#endif
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index f6abff5..df62506 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -37,6 +37,7 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/phy.h>
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
@@ -682,35 +683,6 @@ static void fs_free_irq(struct net_device *dev, int irq)
(*fep->ops->post_free_irq)(dev, irq);
}
-/**********************************************************************************/
-
-/* This interrupt occurs when the PHY detects a link change. */
-static irqreturn_t
-fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
- struct net_device *dev = dev_id;
- struct fs_enet_private *fep;
- const struct fs_platform_info *fpi;
-
- fep = netdev_priv(dev);
- fpi = fep->fpi;
-
- /*
- * Acknowledge the interrupt if possible. If we have not
- * found the PHY yet we can't process or acknowledge the
- * interrupt now. Instead we ignore this interrupt for now,
- * which we can do since it is edge triggered. It will be
- * acknowledged later by fs_enet_open().
- */
- if (!fep->phy)
- return IRQ_NONE;
-
- fs_mii_ack_int(dev);
- fs_mii_link_status_change_check(dev, 0);
-
- return IRQ_HANDLED;
-}
-
static void fs_timeout(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
@@ -722,10 +694,13 @@ static void fs_timeout(struct net_device *dev)
spin_lock_irqsave(&fep->lock, flags);
if (dev->flags & IFF_UP) {
+ phy_stop(fep->phydev);
(*fep->ops->stop)(dev);
(*fep->ops->restart)(dev);
+ phy_start(fep->phydev);
}
+ phy_start(fep->phydev);
wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
@@ -733,35 +708,112 @@ static void fs_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+/*-----------------------------------------------------------------------------
+ * generic link-change handler - should be sufficient for most cases
+ *-----------------------------------------------------------------------------*/
+static void generic_adjust_link(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev = fep->phydev;
+ int new_state = 0;
+
+ if (phydev->link) {
+
+ /* adjust to duplex mode */
+ if (phydev->duplex != fep->oldduplex){
+ new_state = 1;
+ fep->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != fep->oldspeed) {
+ new_state = 1;
+ fep->oldspeed = phydev->speed;
+ }
+
+ if (!fep->oldlink) {
+ new_state = 1;
+ fep->oldlink = 1;
+ netif_schedule(dev);
+ netif_carrier_on(dev);
+ netif_start_queue(dev);
+ }
+
+ if (new_state)
+ fep->ops->restart(dev);
+
+ } else if (fep->oldlink) {
+ new_state = 1;
+ fep->oldlink = 0;
+ fep->oldspeed = 0;
+ fep->oldduplex = -1;
+ netif_carrier_off(dev);
+ netif_stop_queue(dev);
+ }
+
+ if (new_state && netif_msg_link(fep))
+ phy_print_status(phydev);
+}
+
+
+static void fs_adjust_link(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->lock, flags);
+
+ if(fep->ops->adjust_link)
+ fep->ops->adjust_link(dev);
+ else
+ generic_adjust_link(dev);
+
+ spin_unlock_irqrestore(&fep->lock, flags);
+}
+
+static int fs_init_phy(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct phy_device *phydev;
+
+ fep->oldlink = 0;
+ fep->oldspeed = 0;
+ fep->oldduplex = -1;
+ if(fep->fpi->bus_id)
+ phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0);
+ else {
+ printk("No phy bus ID specified in BSP code\n");
+ return -EINVAL;
+ }
+ if (IS_ERR(phydev)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ fep->phydev = phydev;
+
+ return 0;
+}
+
+
static int fs_enet_open(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
int r;
+ int err;
/* Install our interrupt handler. */
r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
if (r != 0) {
printk(KERN_ERR DRV_MODULE_NAME
- ": %s Could not allocate FEC IRQ!", dev->name);
+ ": %s Could not allocate FS_ENET IRQ!", dev->name);
return -EINVAL;
}
- /* Install our phy interrupt handler */
- if (fpi->phy_irq != -1) {
-
- r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt);
- if (r != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s Could not allocate PHY IRQ!", dev->name);
- fs_free_irq(dev, fep->interrupt);
- return -EINVAL;
- }
- }
+ err = fs_init_phy(dev);
+ if(err)
+ return err;
- fs_mii_startup(dev);
- netif_carrier_off(dev);
- fs_mii_link_status_change_check(dev, 1);
+ phy_start(fep->phydev);
return 0;
}
@@ -769,20 +821,19 @@ static int fs_enet_open(struct net_device *dev)
static int fs_enet_close(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
unsigned long flags;
netif_stop_queue(dev);
netif_carrier_off(dev);
- fs_mii_shutdown(dev);
+ phy_stop(fep->phydev);
spin_lock_irqsave(&fep->lock, flags);
(*fep->ops->stop)(dev);
spin_unlock_irqrestore(&fep->lock, flags);
/* release any irqs */
- if (fpi->phy_irq != -1)
- fs_free_irq(dev, fpi->phy_irq);
+ phy_disconnect(fep->phydev);
+ fep->phydev = NULL;
fs_free_irq(dev, fep->interrupt);
return 0;
@@ -830,33 +881,19 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct fs_enet_private *fep = netdev_priv(dev);
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&fep->lock, flags);
- rc = mii_ethtool_gset(&fep->mii_if, cmd);
- spin_unlock_irqrestore(&fep->lock, flags);
-
- return rc;
+ return phy_ethtool_gset(fep->phydev, cmd);
}
static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct fs_enet_private *fep = netdev_priv(dev);
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&fep->lock, flags);
- rc = mii_ethtool_sset(&fep->mii_if, cmd);
- spin_unlock_irqrestore(&fep->lock, flags);
-
- return rc;
+ phy_ethtool_sset(fep->phydev, cmd);
+ return 0;
}
static int fs_nway_reset(struct net_device *dev)
{
- struct fs_enet_private *fep = netdev_priv(dev);
- return mii_nway_restart(&fep->mii_if);
+ return 0;
}
static u32 fs_get_msglevel(struct net_device *dev)
@@ -898,7 +935,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EINVAL;
spin_lock_irqsave(&fep->lock, flags);
- rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
+ rc = phy_mii_ioctl(fep->phydev, mii, cmd);
spin_unlock_irqrestore(&fep->lock, flags);
return rc;
}
@@ -1030,12 +1067,6 @@ static struct net_device *fs_init_instance(struct device *dev,
}
registered = 1;
- err = fs_mii_connect(ndev);
- if (err != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s fs_mii_connect failed.\n", ndev->name);
- goto err;
- }
return ndev;
@@ -1073,8 +1104,6 @@ static int fs_cleanup_instance(struct net_device *ndev)
fpi = fep->fpi;
- fs_mii_disconnect(ndev);
-
unregister_netdev(ndev);
dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
@@ -1196,17 +1225,39 @@ static int __init fs_init(void)
r = setup_immap();
if (r != 0)
return r;
- r = driver_register(&fs_enet_fec_driver);
+
+#ifdef CONFIG_FS_ENET_HAS_FCC
+ /* let's insert mii stuff */
+ r = fs_enet_mdio_bb_init();
+
+ if (r != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ "BB PHY init failed.\n");
+ return r;
+ }
+ r = driver_register(&fs_enet_fcc_driver);
if (r != 0)
goto err;
+#endif
- r = driver_register(&fs_enet_fcc_driver);
+#ifdef CONFIG_FS_ENET_HAS_FEC
+ r = fs_enet_mdio_fec_init();
+ if (r != 0) {
+ printk(KERN_ERR DRV_MODULE_NAME
+ "FEC PHY init failed.\n");
+ return r;
+ }
+
+ r = driver_register(&fs_enet_fec_driver);
if (r != 0)
goto err;
+#endif
+#ifdef CONFIG_FS_ENET_HAS_SCC
r = driver_register(&fs_enet_scc_driver);
if (r != 0)
goto err;
+#endif
return 0;
err:
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
deleted file mode 100644
index b7e6e21..0000000
--- a/drivers/net/fs_enet/fs_enet-mii.c
+++ /dev/null
@@ -1,505 +0,0 @@
-/*
- * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
- *
- * Copyright (c) 2003 Intracom S.A.
- * by Pantelis Antoniou <panto@intracom.gr>
- *
- * 2005 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
- * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/bitops.h>
-
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-
-#include "fs_enet.h"
-
-/*************************************************/
-
-/*
- * Generic PHY support.
- * Should work for all PHYs, but link change is detected by polling
- */
-
-static void generic_timer_callback(unsigned long data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct fs_enet_private *fep = netdev_priv(dev);
-
- fep->phy_timer_list.expires = jiffies + HZ / 2;
-
- add_timer(&fep->phy_timer_list);
-
- fs_mii_link_status_change_check(dev, 0);
-}
-
-static void generic_startup(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
- fep->phy_timer_list.data = (unsigned long)dev;
- fep->phy_timer_list.function = generic_timer_callback;
- add_timer(&fep->phy_timer_list);
-}
-
-static void generic_shutdown(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- del_timer_sync(&fep->phy_timer_list);
-}
-
-/* ------------------------------------------------------------------------- */
-/* The Davicom DM9161 is used on the NETTA board */
-
-/* register definitions */
-
-#define MII_DM9161_ANAR 4 /* Aux. Config Register */
-#define MII_DM9161_ACR 16 /* Aux. Config Register */
-#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
-#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
-#define MII_DM9161_INTR 21 /* Interrupt Register */
-#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
-#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
-
-static void dm9161_startup(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
- /* Start autonegotiation */
- fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200);
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ*8);
-}
-
-static void dm9161_ack_int(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
-}
-
-static void dm9161_shutdown(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
-}
-
-/**********************************************************************************/
-
-static const struct phy_info phy_info[] = {
- {
- .id = 0x00181b88,
- .name = "DM9161",
- .startup = dm9161_startup,
- .ack_int = dm9161_ack_int,
- .shutdown = dm9161_shutdown,
- }, {
- .id = 0,
- .name = "GENERIC",
- .startup = generic_startup,
- .shutdown = generic_shutdown,
- },
-};
-
-/**********************************************************************************/
-
-static int phy_id_detect(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
- struct fs_enet_mii_bus *bus = fep->mii_bus;
- int i, r, start, end, phytype, physubtype;
- const struct phy_info *phy;
- int phy_hwid, phy_id;
-
- phy_hwid = -1;
- fep->phy = NULL;
-
- /* auto-detect? */
- if (fpi->phy_addr == -1) {
- start = 1;
- end = 32;
- } else { /* direct */
- start = fpi->phy_addr;
- end = start + 1;
- }
-
- for (phy_id = start; phy_id < end; phy_id++) {
- /* skip already used phy addresses on this bus */
- if (bus->usage_map & (1 << phy_id))
- continue;
- r = fs_mii_read(dev, phy_id, MII_PHYSID1);
- if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
- continue;
- r = fs_mii_read(dev, phy_id, MII_PHYSID2);
- if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
- continue;
- phy_hwid = (phytype << 16) | physubtype;
- if (phy_hwid != -1)
- break;
- }
-
- if (phy_hwid == -1) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s No PHY detected! range=0x%02x-0x%02x\n",
- dev->name, start, end);
- return -1;
- }
-
- for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
- if (phy->id == (phy_hwid >> 4) || phy->id == 0)
- break;
-
- if (i >= ARRAY_SIZE(phy_info)) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s PHY id 0x%08x is not supported!\n",
- dev->name, phy_hwid);
- return -1;
- }
-
- fep->phy = phy;
-
- /* mark this address as used */
- bus->usage_map |= (1 << phy_id);
-
- printk(KERN_INFO DRV_MODULE_NAME
- ": %s Phy @ 0x%x, type %s (0x%08x)%s\n",
- dev->name, phy_id, fep->phy->name, phy_hwid,
- fpi->phy_addr == -1 ? " (auto-detected)" : "");
-
- return phy_id;
-}
-
-void fs_mii_startup(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (fep->phy->startup)
- (*fep->phy->startup) (dev);
-}
-
-void fs_mii_shutdown(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (fep->phy->shutdown)
- (*fep->phy->shutdown) (dev);
-}
-
-void fs_mii_ack_int(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (fep->phy->ack_int)
- (*fep->phy->ack_int) (dev);
-}
-
-#define MII_LINK 0x0001
-#define MII_HALF 0x0002
-#define MII_FULL 0x0004
-#define MII_BASE4 0x0008
-#define MII_10M 0x0010
-#define MII_100M 0x0020
-#define MII_1G 0x0040
-#define MII_10G 0x0080
-
-/* return full mii info at one gulp, with a usable form */
-static unsigned int mii_full_status(struct mii_if_info *mii)
-{
- unsigned int status;
- int bmsr, adv, lpa, neg;
- struct fs_enet_private* fep = netdev_priv(mii->dev);
-
- /* first, a dummy read, needed to latch some MII phys */
- (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
- bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
-
- /* no link */
- if ((bmsr & BMSR_LSTATUS) == 0)
- return 0;
-
- status = MII_LINK;
-
- /* Lets look what ANEG says if it's supported - otherwize we shall
- take the right values from the platform info*/
- if(!mii->force_media) {
- /* autoneg not completed; don't bother */
- if ((bmsr & BMSR_ANEGCOMPLETE) == 0)
- return 0;
-
- adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE);
- lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA);
-
- neg = lpa & adv;
- } else {
- neg = fep->fpi->bus_info->lpa;
- }
-
- if (neg & LPA_100FULL)
- status |= MII_FULL | MII_100M;
- else if (neg & LPA_100BASE4)
- status |= MII_FULL | MII_BASE4 | MII_100M;
- else if (neg & LPA_100HALF)
- status |= MII_HALF | MII_100M;
- else if (neg & LPA_10FULL)
- status |= MII_FULL | MII_10M;
- else
- status |= MII_HALF | MII_10M;
-
- return status;
-}
-
-void fs_mii_link_status_change_check(struct net_device *dev, int init_media)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct mii_if_info *mii = &fep->mii_if;
- unsigned int mii_status;
- int ok_to_print, link, duplex, speed;
- unsigned long flags;
-
- ok_to_print = netif_msg_link(fep);
-
- mii_status = mii_full_status(mii);
-
- if (!init_media && mii_status == fep->last_mii_status)
- return;
-
- fep->last_mii_status = mii_status;
-
- link = !!(mii_status & MII_LINK);
- duplex = !!(mii_status & MII_FULL);
- speed = (mii_status & MII_100M) ? 100 : 10;
-
- if (link == 0) {
- netif_carrier_off(mii->dev);
- netif_stop_queue(dev);
- if (!init_media) {
- spin_lock_irqsave(&fep->lock, flags);
- (*fep->ops->stop)(dev);
- spin_unlock_irqrestore(&fep->lock, flags);
- }
-
- if (ok_to_print)
- printk(KERN_INFO "%s: link down\n", mii->dev->name);
-
- } else {
-
- mii->full_duplex = duplex;
-
- netif_carrier_on(mii->dev);
-
- spin_lock_irqsave(&fep->lock, flags);
- fep->duplex = duplex;
- fep->speed = speed;
- (*fep->ops->restart)(dev);
- spin_unlock_irqrestore(&fep->lock, flags);
-
- netif_start_queue(dev);
-
- if (ok_to_print)
- printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n",
- dev->name, speed, duplex ? "full" : "half");
- }
-}
-
-/**********************************************************************************/
-
-int fs_mii_read(struct net_device *dev, int phy_id, int location)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fs_enet_mii_bus *bus = fep->mii_bus;
-
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&bus->mii_lock, flags);
- ret = (*bus->mii_read)(bus, phy_id, location);
- spin_unlock_irqrestore(&bus->mii_lock, flags);
-
- return ret;
-}
-
-void fs_mii_write(struct net_device *dev, int phy_id, int location, int value)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fs_enet_mii_bus *bus = fep->mii_bus;
- unsigned long flags;
-
- spin_lock_irqsave(&bus->mii_lock, flags);
- (*bus->mii_write)(bus, phy_id, location, value);
- spin_unlock_irqrestore(&bus->mii_lock, flags);
-}
-
-/*****************************************************************************/
-
-/* list of all registered mii buses */
-static LIST_HEAD(fs_mii_bus_list);
-
-static struct fs_enet_mii_bus *lookup_bus(int method, int id)
-{
- struct list_head *ptr;
- struct fs_enet_mii_bus *bus;
-
- list_for_each(ptr, &fs_mii_bus_list) {
- bus = list_entry(ptr, struct fs_enet_mii_bus, list);
- if (bus->bus_info->method == method &&
- bus->bus_info->id == id)
- return bus;
- }
- return NULL;
-}
-
-static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
-{
- struct fs_enet_mii_bus *bus;
- int ret = 0;
-
- bus = kmalloc(sizeof(*bus), GFP_KERNEL);
- if (bus == NULL) {
- ret = -ENOMEM;
- goto err;
- }
- memset(bus, 0, sizeof(*bus));
- spin_lock_init(&bus->mii_lock);
- bus->bus_info = bi;
- bus->refs = 0;
- bus->usage_map = 0;
-
- /* perform initialization */
- switch (bi->method) {
-
- case fsmii_fixed:
- ret = fs_mii_fixed_init(bus);
- if (ret != 0)
- goto err;
- break;
-
- case fsmii_bitbang:
- ret = fs_mii_bitbang_init(bus);
- if (ret != 0)
- goto err;
- break;
-#ifdef CONFIG_FS_ENET_HAS_FEC
- case fsmii_fec:
- ret = fs_mii_fec_init(bus);
- if (ret != 0)
- goto err;
- break;
-#endif
- default:
- ret = -EINVAL;
- goto err;
- }
-
- list_add(&bus->list, &fs_mii_bus_list);
-
- return bus;
-
-err:
- kfree(bus);
- return ERR_PTR(ret);
-}
-
-static void destroy_bus(struct fs_enet_mii_bus *bus)
-{
- /* remove from bus list */
- list_del(&bus->list);
-
- /* nothing more needed */
- kfree(bus);
-}
-
-int fs_mii_connect(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
- struct fs_enet_mii_bus *bus = NULL;
-
- /* check method validity */
- switch (fpi->bus_info->method) {
- case fsmii_fixed:
- case fsmii_bitbang:
- break;
-#ifdef CONFIG_FS_ENET_HAS_FEC
- case fsmii_fec:
- break;
-#endif
- default:
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s Unknown MII bus method (%d)!\n",
- dev->name, fpi->bus_info->method);
- return -EINVAL;
- }
-
- bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id);
-
- /* if not found create new bus */
- if (bus == NULL) {
- bus = create_bus(fpi->bus_info);
- if (IS_ERR(bus)) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s MII bus creation failure!\n", dev->name);
- return PTR_ERR(bus);
- }
- }
-
- bus->refs++;
-
- fep->mii_bus = bus;
-
- fep->mii_if.dev = dev;
- fep->mii_if.phy_id_mask = 0x1f;
- fep->mii_if.reg_num_mask = 0x1f;
- fep->mii_if.mdio_read = fs_mii_read;
- fep->mii_if.mdio_write = fs_mii_write;
- fep->mii_if.force_media = fpi->bus_info->disable_aneg;
- fep->mii_if.phy_id = phy_id_detect(dev);
-
- return 0;
-}
-
-void fs_mii_disconnect(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fs_enet_mii_bus *bus = NULL;
-
- bus = fep->mii_bus;
- fep->mii_bus = NULL;
-
- if (--bus->refs <= 0)
- destroy_bus(bus);
-}
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index e7ec96c..95022c005 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -5,6 +5,7 @@
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/list.h>
+#include <linux/phy.h>
#include <linux/fs_enet_pd.h>
@@ -12,12 +13,30 @@
#ifdef CONFIG_CPM1
#include <asm/commproc.h>
+
+struct fec_info {
+ fec_t* fecp;
+ u32 mii_speed;
+};
#endif
#ifdef CONFIG_CPM2
#include <asm/cpm2.h>
#endif
+/* This is used to operate with pins.
+ Note that the actual port size may
+ be different; cpm(s) handle it OK */
+struct bb_info {
+ u8 mdio_dat_msk;
+ u8 mdio_dir_msk;
+ u8 *mdio_dir;
+ u8 *mdio_dat;
+ u8 mdc_msk;
+ u8 *mdc_dat;
+ int delay;
+};
+
/* hw driver ops */
struct fs_ops {
int (*setup_data)(struct net_device *dev);
@@ -25,6 +44,7 @@ struct fs_ops {
void (*free_bd)(struct net_device *dev);
void (*cleanup_data)(struct net_device *dev);
void (*set_multicast_list)(struct net_device *dev);
+ void (*adjust_link)(struct net_device *dev);
void (*restart)(struct net_device *dev);
void (*stop)(struct net_device *dev);
void (*pre_request_irq)(struct net_device *dev, int irq);
@@ -100,10 +120,6 @@ struct fs_enet_mii_bus {
};
};
-int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus);
-int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
-int fs_mii_fec_init(struct fs_enet_mii_bus *bus);
-
struct fs_enet_private {
struct device *dev; /* pointer back to the device (must be initialized first) */
spinlock_t lock; /* during all ops except TX pckt processing */
@@ -130,7 +146,8 @@ struct fs_enet_private {
struct fs_enet_mii_bus *mii_bus;
int interrupt;
- int duplex, speed; /* current settings */
+ struct phy_device *phydev;
+ int oldduplex, oldspeed, oldlink; /* current settings */
/* event masks */
u32 ev_napi_rx; /* mask of NAPI rx events */
@@ -168,15 +185,9 @@ struct fs_enet_private {
};
/***************************************************************************/
-
-int fs_mii_read(struct net_device *dev, int phy_id, int location);
-void fs_mii_write(struct net_device *dev, int phy_id, int location, int value);
-
-void fs_mii_startup(struct net_device *dev);
-void fs_mii_shutdown(struct net_device *dev);
-void fs_mii_ack_int(struct net_device *dev);
-
-void fs_mii_link_status_change_check(struct net_device *dev, int init_media);
+int fs_enet_mdio_bb_init(void);
+int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
+int fs_enet_mdio_fec_init(void);
void fs_init_bds(struct net_device *dev);
void fs_cleanup_bds(struct net_device *dev);
@@ -194,7 +205,6 @@ int fs_enet_platform_init(void);
void fs_enet_platform_cleanup(void);
/***************************************************************************/
-
/* buffer descriptor access macros */
/* access macros */
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 64e2098..1ff2597 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -34,6 +34,7 @@
#include <linux/bitops.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/phy.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
@@ -122,22 +123,32 @@ static int do_pd_setup(struct fs_enet_private *fep)
/* Attach the memory for the FCC Parameter RAM */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
- fep->fcc.ep = (void *)r->start;
-
+ fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1);
if (fep->fcc.ep == NULL)
return -EINVAL;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
- fep->fcc.fccp = (void *)r->start;
-
+ fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1);
if (fep->fcc.fccp == NULL)
return -EINVAL;
- fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
+ if (fep->fpi->fcc_regs_c) {
+
+ fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
+ } else {
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "fcc_regs_c");
+ fep->fcc.fcccp = (void *)ioremap(r->start,
+ r->end - r->start + 1);
+ }
if (fep->fcc.fcccp == NULL)
return -EINVAL;
+ fep->fcc.mem = (void *)fep->fpi->mem_offset;
+ if (fep->fcc.mem == NULL)
+ return -EINVAL;
+
return 0;
}
@@ -155,8 +166,6 @@ static int setup_data(struct net_device *dev)
if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
return -EINVAL;
- fep->fcc.mem = (void *)fpi->mem_offset;
-
if (do_pd_setup(fep) != 0)
return -EINVAL;
@@ -394,7 +403,7 @@ static void restart(struct net_device *dev)
/* adjust to speed (for RMII mode) */
if (fpi->use_rmii) {
- if (fep->speed == 100)
+ if (fep->phydev->speed == 100)
C8(fcccp, fcc_gfemr, 0x20);
else
S8(fcccp, fcc_gfemr, 0x20);
@@ -420,7 +429,7 @@ static void restart(struct net_device *dev)
S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
/* adjust to duplex mode */
- if (fep->duplex)
+ if (fep->phydev->duplex)
S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
@@ -486,7 +495,10 @@ static void rx_bd_done(struct net_device *dev)
static void tx_kickstart(struct net_device *dev)
{
- /* nothing */
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t *fccp = fep->fcc.fccp;
+
+ S32(fccp, fcc_ftodr, 0x80);
}
static u32 get_int_events(struct net_device *dev)
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index e095470..c2c5fd4 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -46,6 +46,7 @@
#endif
#include "fs_enet.h"
+#include "fec.h"
/*************************************************/
@@ -75,50 +76,8 @@
/* clear bits */
#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
-
-/* CRC polynomium used by the FEC for the multicast group filtering */
-#define FEC_CRC_POLY 0x04C11DB7
-
-#define FEC_MAX_MULTICAST_ADDRS 64
-
-/* Interrupt events/masks.
-*/
-#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
-#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
-#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
-#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
-#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
-#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
-#define FEC_ENET_RXF 0x02000000U /* Full frame received */
-#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
-#define FEC_ENET_MII 0x00800000U /* MII interrupt */
-#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
-
-#define FEC_ECNTRL_PINMUX 0x00000004
-#define FEC_ECNTRL_ETHER_EN 0x00000002
-#define FEC_ECNTRL_RESET 0x00000001
-
-#define FEC_RCNTRL_BC_REJ 0x00000010
-#define FEC_RCNTRL_PROM 0x00000008
-#define FEC_RCNTRL_MII_MODE 0x00000004
-#define FEC_RCNTRL_DRT 0x00000002
-#define FEC_RCNTRL_LOOP 0x00000001
-
-#define FEC_TCNTRL_FDEN 0x00000004
-#define FEC_TCNTRL_HBC 0x00000002
-#define FEC_TCNTRL_GTS 0x00000001
-
-
-/* Make MII read/write commands for the FEC.
-*/
-#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
-#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
-#define mk_mii_end 0
-
-#define FEC_MII_LOOPS 10000
-
/*
- * Delay to wait for FEC reset command to complete (in us)
+ * Delay to wait for FEC reset command to complete (in us)
*/
#define FEC_RESET_DELAY 50
@@ -303,13 +262,15 @@ static void restart(struct net_device *dev)
int r;
u32 addrhi, addrlo;
+ struct mii_bus* mii = fep->phydev->bus;
+ struct fec_info* fec_inf = mii->priv;
+
r = whack_reset(fep->fec.fecp);
if (r != 0)
printk(KERN_ERR DRV_MODULE_NAME
": %s FEC Reset FAILED!\n", dev->name);
-
/*
- * Set station address.
+ * Set station address.
*/
addrhi = ((u32) dev->dev_addr[0] << 24) |
((u32) dev->dev_addr[1] << 16) |
@@ -350,12 +311,12 @@ static void restart(struct net_device *dev)
FW(fecp, fun_code, 0x78000000);
/*
- * Set MII speed.
+ * Set MII speed.
*/
- FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed);
+ FW(fecp, mii_speed, fec_inf->mii_speed);
/*
- * Clear any outstanding interrupt.
+ * Clear any outstanding interrupt.
*/
FW(fecp, ievent, 0xffc0);
FW(fecp, ivec, (fep->interrupt / 2) << 29);
@@ -390,11 +351,12 @@ static void restart(struct net_device *dev)
}
#endif
+
FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
/*
- * adjust to duplex mode
+ * adjust to duplex mode
*/
- if (fep->duplex) {
+ if (fep->phydev->duplex) {
FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
} else {
@@ -418,9 +380,11 @@ static void restart(struct net_device *dev)
static void stop(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
+ const struct fs_platform_info *fpi = fep->fpi;
fec_t *fecp = fep->fec.fecp;
- struct fs_enet_mii_bus *bus = fep->mii_bus;
- const struct fs_mii_bus_info *bi = bus->bus_info;
+
+ struct fec_info* feci= fep->phydev->bus->priv;
+
int i;
if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
@@ -444,11 +408,11 @@ static void stop(struct net_device *dev)
fs_cleanup_bds(dev);
/* shut down FEC1? that's where the mii bus is */
- if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) {
+ if (fpi->has_phy) {
FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
FW(fecp, ievent, FEC_ENET_MII);
- FW(fecp, mii_speed, bus->fec.mii_speed);
+ FW(fecp, mii_speed, feci->mii_speed);
}
}
@@ -583,73 +547,3 @@ const struct fs_ops fs_fec_ops = {
.free_bd = free_bd,
};
-/***********************************************************************/
-
-static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
-{
- fec_t *fecp = bus->fec.fecp;
- int i, ret = -1;
-
- if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
- BUG();
-
- /* Add PHY address to register command. */
- FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
-
- for (i = 0; i < FEC_MII_LOOPS; i++)
- if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
- break;
-
- if (i < FEC_MII_LOOPS) {
- FW(fecp, ievent, FEC_ENET_MII);
- ret = FR(fecp, mii_data) & 0xffff;
- }
-
- return ret;
-}
-
-static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value)
-{
- fec_t *fecp = bus->fec.fecp;
- int i;
-
- /* this must never happen */
- if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
- BUG();
-
- /* Add PHY address to register command. */
- FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
-
- for (i = 0; i < FEC_MII_LOOPS; i++)
- if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
- break;
-
- if (i < FEC_MII_LOOPS)
- FW(fecp, ievent, FEC_ENET_MII);
-}
-
-int fs_mii_fec_init(struct fs_enet_mii_bus *bus)
-{
- bd_t *bd = (bd_t *)__res;
- const struct fs_mii_bus_info *bi = bus->bus_info;
- fec_t *fecp;
-
- if (bi->id != 0)
- return -1;
-
- bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec;
- bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2)
- & 0x3F) << 1;
-
- fecp = bus->fec.fecp;
-
- FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
- FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
- FW(fecp, ievent, FEC_ENET_MII);
- FW(fecp, mii_speed, bus->fec.mii_speed);
-
- bus->mii_read = mii_read;
- bus->mii_write = mii_write;
-
- return 0;
-}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index eaa24fa..95ec587 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -369,7 +369,7 @@ static void restart(struct net_device *dev)
W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
/* Set full duplex mode if needed */
- if (fep->duplex)
+ if (fep->phydev->duplex)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
@@ -500,6 +500,8 @@ static void tx_restart(struct net_device *dev)
scc_cr_cmd(fep, CPM_CR_RESTART_TX);
}
+
+
/*************************************************************************/
const struct fs_ops fs_scc_ops = {
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 48f9cf8..0b9b8b5 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -33,6 +33,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
+#include <linux/platform_device.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
@@ -40,129 +41,25 @@
#include "fs_enet.h"
-#ifdef CONFIG_8xx
-static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
+static int bitbang_prep_bit(u8 **datp, u8 *mskp,
+ struct fs_mii_bit *mii_bit)
{
- immap_t *im = (immap_t *)fs_enet_immap;
- void *dir, *dat, *ppar;
+ void *dat;
int adv;
u8 msk;
- switch (port) {
- case fsiop_porta:
- dir = &im->im_ioport.iop_padir;
- dat = &im->im_ioport.iop_padat;
- ppar = &im->im_ioport.iop_papar;
- break;
-
- case fsiop_portb:
- dir = &im->im_cpm.cp_pbdir;
- dat = &im->im_cpm.cp_pbdat;
- ppar = &im->im_cpm.cp_pbpar;
- break;
-
- case fsiop_portc:
- dir = &im->im_ioport.iop_pcdir;
- dat = &im->im_ioport.iop_pcdat;
- ppar = &im->im_ioport.iop_pcpar;
- break;
-
- case fsiop_portd:
- dir = &im->im_ioport.iop_pddir;
- dat = &im->im_ioport.iop_pddat;
- ppar = &im->im_ioport.iop_pdpar;
- break;
-
- case fsiop_porte:
- dir = &im->im_cpm.cp_pedir;
- dat = &im->im_cpm.cp_pedat;
- ppar = &im->im_cpm.cp_pepar;
- break;
-
- default:
- printk(KERN_ERR DRV_MODULE_NAME
- "Illegal port value %d!\n", port);
- return -EINVAL;
- }
-
- adv = bit >> 3;
- dir = (char *)dir + adv;
- dat = (char *)dat + adv;
- ppar = (char *)ppar + adv;
-
- msk = 1 << (7 - (bit & 7));
- if ((in_8(ppar) & msk) != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- "pin %d on port %d is not general purpose!\n", bit, port);
- return -EINVAL;
- }
-
- *dirp = dir;
- *datp = dat;
- *mskp = msk;
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_8260
-static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
-{
- iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport;
- void *dir, *dat, *ppar;
- int adv;
- u8 msk;
-
- switch (port) {
- case fsiop_porta:
- dir = &io->iop_pdira;
- dat = &io->iop_pdata;
- ppar = &io->iop_ppara;
- break;
-
- case fsiop_portb:
- dir = &io->iop_pdirb;
- dat = &io->iop_pdatb;
- ppar = &io->iop_pparb;
- break;
-
- case fsiop_portc:
- dir = &io->iop_pdirc;
- dat = &io->iop_pdatc;
- ppar = &io->iop_pparc;
- break;
-
- case fsiop_portd:
- dir = &io->iop_pdird;
- dat = &io->iop_pdatd;
- ppar = &io->iop_ppard;
- break;
-
- default:
- printk(KERN_ERR DRV_MODULE_NAME
- "Illegal port value %d!\n", port);
- return -EINVAL;
- }
+ dat = (void*) mii_bit->offset;
- adv = bit >> 3;
- dir = (char *)dir + adv;
+ adv = mii_bit->bit >> 3;
dat = (char *)dat + adv;
- ppar = (char *)ppar + adv;
- msk = 1 << (7 - (bit & 7));
- if ((in_8(ppar) & msk) != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- "pin %d on port %d is not general purpose!\n", bit, port);
- return -EINVAL;
- }
+ msk = 1 << (7 - (mii_bit->bit & 7));
- *dirp = dir;
*datp = dat;
*mskp = msk;
return 0;
}
-#endif
static inline void bb_set(u8 *p, u8 m)
{
@@ -179,44 +76,44 @@ static inline int bb_read(u8 *p, u8 m)
return (in_8(p) & m) != 0;
}
-static inline void mdio_active(struct fs_enet_mii_bus *bus)
+static inline void mdio_active(struct bb_info *bitbang)
{
- bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
+ bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk);
}
-static inline void mdio_tristate(struct fs_enet_mii_bus *bus)
+static inline void mdio_tristate(struct bb_info *bitbang )
{
- bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
+ bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk);
}
-static inline int mdio_read(struct fs_enet_mii_bus *bus)
+static inline int mdio_read(struct bb_info *bitbang )
{
- return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
+ return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk);
}
-static inline void mdio(struct fs_enet_mii_bus *bus, int what)
+static inline void mdio(struct bb_info *bitbang , int what)
{
if (what)
- bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
+ bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk);
else
- bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
+ bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk);
}
-static inline void mdc(struct fs_enet_mii_bus *bus, int what)
+static inline void mdc(struct bb_info *bitbang , int what)
{
if (what)
- bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
+ bb_set(bitbang->mdc_dat, bitbang->mdc_msk);
else
- bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
+ bb_clr(bitbang->mdc_dat, bitbang->mdc_msk);
}
-static inline void mii_delay(struct fs_enet_mii_bus *bus)
+static inline void mii_delay(struct bb_info *bitbang )
{
- udelay(bus->bus_info->i.bitbang.delay);
+ udelay(bitbang->delay);
}
/* Utility to send the preamble, address, and register (common to read and write). */
-static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
+static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg)
{
int j;
@@ -228,177 +125,284 @@ static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
* but it is safer and will be much more robust.
*/
- mdio_active(bus);
- mdio(bus, 1);
+ mdio_active(bitbang);
+ mdio(bitbang, 1);
for (j = 0; j < 32; j++) {
- mdc(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
}
/* send the start bit (01) and the read opcode (10) or write (10) */
- mdc(bus, 0);
- mdio(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
- mdc(bus, 0);
- mdio(bus, 1);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
- mdc(bus, 0);
- mdio(bus, read);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
- mdc(bus, 0);
- mdio(bus, !read);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 0);
+ mdio(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 0);
+ mdio(bitbang, read);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 0);
+ mdio(bitbang, !read);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
/* send the PHY address */
for (j = 0; j < 5; j++) {
- mdc(bus, 0);
- mdio(bus, (addr & 0x10) != 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio(bitbang, (addr & 0x10) != 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
addr <<= 1;
}
/* send the register address */
for (j = 0; j < 5; j++) {
- mdc(bus, 0);
- mdio(bus, (reg & 0x10) != 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio(bitbang, (reg & 0x10) != 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
reg <<= 1;
}
}
-static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
+static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location)
{
u16 rdreg;
int ret, j;
u8 addr = phy_id & 0xff;
u8 reg = location & 0xff;
+ struct bb_info* bitbang = bus->priv;
- bitbang_pre(bus, 1, addr, reg);
+ bitbang_pre(bitbang, 1, addr, reg);
/* tri-state our MDIO I/O pin so we can read */
- mdc(bus, 0);
- mdio_tristate(bus);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio_tristate(bitbang);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
/* check the turnaround bit: the PHY should be driving it to zero */
- if (mdio_read(bus) != 0) {
+ if (mdio_read(bitbang) != 0) {
/* PHY didn't drive TA low */
for (j = 0; j < 32; j++) {
- mdc(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
}
ret = -1;
goto out;
}
- mdc(bus, 0);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
/* read 16 bits of register data, MSB first */
rdreg = 0;
for (j = 0; j < 16; j++) {
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
rdreg <<= 1;
- rdreg |= mdio_read(bus);
- mdc(bus, 0);
- mii_delay(bus);
+ rdreg |= mdio_read(bitbang);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
}
- mdc(bus, 1);
- mii_delay(bus);
- mdc(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
ret = rdreg;
out:
return ret;
}
-static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
+static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val)
{
int j;
+ struct bb_info* bitbang = bus->priv;
+
u8 addr = phy_id & 0xff;
u8 reg = location & 0xff;
u16 value = val & 0xffff;
- bitbang_pre(bus, 0, addr, reg);
+ bitbang_pre(bitbang, 0, addr, reg);
/* send the turnaround (10) */
- mdc(bus, 0);
- mdio(bus, 1);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
- mdc(bus, 0);
- mdio(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ mdc(bitbang, 0);
+ mdio(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
/* write 16 bits of register data, MSB first */
for (j = 0; j < 16; j++) {
- mdc(bus, 0);
- mdio(bus, (value & 0x8000) != 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdc(bitbang, 0);
+ mdio(bitbang, (value & 0x8000) != 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
value <<= 1;
}
/*
* Tri-state the MDIO line.
*/
- mdio_tristate(bus);
- mdc(bus, 0);
- mii_delay(bus);
- mdc(bus, 1);
- mii_delay(bus);
+ mdio_tristate(bitbang);
+ mdc(bitbang, 0);
+ mii_delay(bitbang);
+ mdc(bitbang, 1);
+ mii_delay(bitbang);
+ return 0;
}
-int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus)
+static int fs_enet_mii_bb_reset(struct mii_bus *bus)
+{
+ /*nothing here - dunno how to reset it*/
+ return 0;
+}
+
+static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi)
{
- const struct fs_mii_bus_info *bi = bus->bus_info;
int r;
- r = bitbang_prep_bit(&bus->bitbang.mdio_dir,
- &bus->bitbang.mdio_dat,
- &bus->bitbang.mdio_msk,
- bi->i.bitbang.mdio_port,
- bi->i.bitbang.mdio_bit);
+ bitbang->delay = fmpi->delay;
+
+ r = bitbang_prep_bit(&bitbang->mdio_dir,
+ &bitbang->mdio_dir_msk,
+ &fmpi->mdio_dir);
if (r != 0)
return r;
- r = bitbang_prep_bit(&bus->bitbang.mdc_dir,
- &bus->bitbang.mdc_dat,
- &bus->bitbang.mdc_msk,
- bi->i.bitbang.mdc_port,
- bi->i.bitbang.mdc_bit);
+ r = bitbang_prep_bit(&bitbang->mdio_dat,
+ &bitbang->mdio_dat_msk,
+ &fmpi->mdio_dat);
if (r != 0)
return r;
- bus->mii_read = mii_read;
- bus->mii_write = mii_write;
+ r = bitbang_prep_bit(&bitbang->mdc_dat,
+ &bitbang->mdc_msk,
+ &fmpi->mdc_dat);
+ if (r != 0)
+ return r;
return 0;
}
+
+
+static int __devinit fs_enet_mdio_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fs_mii_bb_platform_info *pdata;
+ struct mii_bus *new_bus;
+ struct bb_info *bitbang;
+ int err = 0;
+
+ if (NULL == dev)
+ return -EINVAL;
+
+ new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
+
+ if (NULL == new_bus)
+ return -ENOMEM;
+
+ bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
+
+ if (NULL == bitbang)
+ return -ENOMEM;
+
+ new_bus->name = "BB MII Bus",
+ new_bus->read = &fs_enet_mii_bb_read,
+ new_bus->write = &fs_enet_mii_bb_write,
+ new_bus->reset = &fs_enet_mii_bb_reset,
+ new_bus->id = pdev->id;
+
+ new_bus->phy_mask = ~0x9;
+ pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;
+
+ if (NULL == pdata) {
+ printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
+ return -ENODEV;
+ }
+
+ /*set up workspace*/
+ fs_mii_bitbang_init(bitbang, pdata);
+
+ new_bus->priv = bitbang;
+
+ new_bus->irq = pdata->irq;
+
+ new_bus->dev = dev;
+ dev_set_drvdata(dev, new_bus);
+
+ err = mdiobus_register(new_bus);
+
+ if (0 != err) {
+ printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
+ new_bus->name);
+ goto bus_register_fail;
+ }
+
+ return 0;
+
+bus_register_fail:
+ kfree(bitbang);
+ kfree(new_bus);
+
+ return err;
+}
+
+
+static int fs_enet_mdio_remove(struct device *dev)
+{
+ struct mii_bus *bus = dev_get_drvdata(dev);
+
+ mdiobus_unregister(bus);
+
+ dev_set_drvdata(dev, NULL);
+
+ iounmap((void *) (&bus->priv));
+ bus->priv = NULL;
+ kfree(bus);
+
+ return 0;
+}
+
+static struct device_driver fs_enet_bb_mdio_driver = {
+ .name = "fsl-bb-mdio",
+ .bus = &platform_bus_type,
+ .probe = fs_enet_mdio_probe,
+ .remove = fs_enet_mdio_remove,
+};
+
+int fs_enet_mdio_bb_init(void)
+{
+ return driver_register(&fs_enet_bb_mdio_driver);
+}
+
+void fs_enet_mdio_bb_exit(void)
+{
+ driver_unregister(&fs_enet_bb_mdio_driver);
+}
+
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
new file mode 100644
index 0000000..1328e10
--- /dev/null
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -0,0 +1,243 @@
+/*
+ * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "fs_enet.h"
+#include "fec.h"
+
+/* Make MII read/write commands for the FEC.
+*/
+#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
+#define mk_mii_end 0
+
+#define FEC_MII_LOOPS 10000
+
+static int match_has_phy (struct device *dev, void* data)
+{
+ struct platform_device* pdev = container_of(dev, struct platform_device, dev);
+ struct fs_platform_info* fpi;
+ if(strcmp(pdev->name, (char*)data))
+ {
+ return 0;
+ }
+
+ fpi = pdev->dev.platform_data;
+ if((fpi)&&(fpi->has_phy))
+ return 1;
+ return 0;
+}
+
+static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi)
+{
+ struct resource *r;
+ fec_t *fecp;
+ char* name = "fsl-cpm-fec";
+
+ /* we need fec in order to be useful */
+ struct platform_device *fec_pdev =
+ container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy),
+ struct platform_device, dev);
+
+ if(fec_pdev == NULL) {
+ printk(KERN_ERR"Unable to find PHY for %s", name);
+ return -ENODEV;
+ }
+
+ r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs");
+
+ fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t));
+ fec->mii_speed = fmpi->mii_speed;
+
+ setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+ setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+ out_be32(&fecp->fec_mii_speed, fec->mii_speed);
+
+ return 0;
+}
+
+static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
+{
+ struct fec_info* fec = bus->priv;
+ fec_t *fecp = fec->fecp;
+ int i, ret = -1;
+
+ if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
+ BUG();
+
+ /* Add PHY address to register command. */
+ out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS) {
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+ ret = in_be32(&fecp->fec_mii_data) & 0xffff;
+ }
+
+ return ret;
+
+}
+
+static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
+{
+ struct fec_info* fec = bus->priv;
+ fec_t *fecp = fec->fecp;
+ int i;
+
+ /* this must never happen */
+ if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
+ BUG();
+
+ /* Add PHY address to register command. */
+ out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
+
+ for (i = 0; i < FEC_MII_LOOPS; i++)
+ if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
+ break;
+
+ if (i < FEC_MII_LOOPS)
+ out_be32(&fecp->fec_ievent, FEC_ENET_MII);
+
+ return 0;
+
+}
+
+static int fs_enet_fec_mii_reset(struct mii_bus *bus)
+{
+ /* nothing here - for now */
+ return 0;
+}
+
+static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fs_mii_fec_platform_info *pdata;
+ struct mii_bus *new_bus;
+ struct fec_info *fec;
+ int err = 0;
+ if (NULL == dev)
+ return -EINVAL;
+ new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
+
+ if (NULL == new_bus)
+ return -ENOMEM;
+
+ fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
+
+ if (NULL == fec)
+ return -ENOMEM;
+
+ new_bus->name = "FEC MII Bus",
+ new_bus->read = &fs_enet_fec_mii_read,
+ new_bus->write = &fs_enet_fec_mii_write,
+ new_bus->reset = &fs_enet_fec_mii_reset,
+ new_bus->id = pdev->id;
+
+ pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;
+
+ if (NULL == pdata) {
+ printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id);
+ return -ENODEV;
+ }
+
+ /*set up workspace*/
+
+ fs_mii_fec_init(fec, pdata);
+ new_bus->priv = fec;
+
+ new_bus->irq = pdata->irq;
+
+ new_bus->dev = dev;
+ dev_set_drvdata(dev, new_bus);
+
+ err = mdiobus_register(new_bus);
+
+ if (0 != err) {
+ printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
+ new_bus->name);
+ goto bus_register_fail;
+ }
+
+ return 0;
+
+bus_register_fail:
+ kfree(new_bus);
+
+ return err;
+}
+
+
+static int fs_enet_fec_mdio_remove(struct device *dev)
+{
+ struct mii_bus *bus = dev_get_drvdata(dev);
+
+ mdiobus_unregister(bus);
+
+ dev_set_drvdata(dev, NULL);
+ kfree(bus->priv);
+
+ bus->priv = NULL;
+ kfree(bus);
+
+ return 0;
+}
+
+static struct device_driver fs_enet_fec_mdio_driver = {
+ .name = "fsl-cpm-fec-mdio",
+ .bus = &platform_bus_type,
+ .probe = fs_enet_fec_mdio_probe,
+ .remove = fs_enet_fec_mdio_remove,
+};
+
+int fs_enet_mdio_fec_init(void)
+{
+ return driver_register(&fs_enet_fec_mdio_driver);
+}
+
+void fs_enet_mdio_fec_exit(void)
+{
+ driver_unregister(&fs_enet_fec_mdio_driver);
+}
+
diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c
deleted file mode 100644
index ae4a9c3..0000000
--- a/drivers/net/fs_enet/mii-fixed.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
- *
- * Copyright (c) 2003 Intracom S.A.
- * by Pantelis Antoniou <panto@intracom.gr>
- *
- * 2005 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/bitops.h>
-
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-
-#include "fs_enet.h"
-
-static const u16 mii_regs[7] = {
- 0x3100,
- 0x786d,
- 0x0fff,
- 0x0fff,
- 0x01e1,
- 0x45e1,
- 0x0003,
-};
-
-static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
-{
- int ret = 0;
-
- if ((unsigned int)location >= ARRAY_SIZE(mii_regs))
- return -1;
-
- if (location != 5)
- ret = mii_regs[location];
- else
- ret = bus->fixed.lpa;
-
- return ret;
-}
-
-static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
-{
- /* do nothing */
-}
-
-int fs_mii_fixed_init(struct fs_enet_mii_bus *bus)
-{
- const struct fs_mii_bus_info *bi = bus->bus_info;
-
- bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */
-
- /* if speed is fixed at 10Mb, remove 100Mb modes */
- if (bi->i.fixed.speed == 10)
- bus->fixed.lpa &= ~LPA_100;
-
- /* if duplex is half, remove full duplex modes */
- if (bi->i.fixed.duplex == 0)
- bus->fixed.lpa &= ~LPA_DUPLEX;
-
- bus->mii_read = mii_read;
- bus->mii_write = mii_write;
-
- return 0;
-}
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index c1c3452..5b4dbfe5 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -326,7 +326,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
-int init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 646e89f..c0ec7f6 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -406,7 +406,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)");
MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver");
MODULE_LICENSE("GPL");
-int init_module(void)
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index fa854c8..4d52ecf 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -1323,7 +1323,7 @@ MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
-int init_module(void)
+int __init init_module(void)
{
if(io <= 0x0 || !memend || !memstart || irq < 2) {
printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index bb42ff2..810cc57 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1253,7 +1253,7 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
MODULE_PARM_DESC(io, "ni6510 I/O base address");
MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
-int init_module(void)
+int __init init_module(void)
{
dev_ni65 = ni65_probe(-1);
return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 9bae77c..4122bb4 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -345,6 +345,7 @@ typedef struct local_info_t {
void __iomem *dingo_ccr; /* only used for CEM56 cards */
unsigned last_ptr_value; /* last packets transmitted value */
const char *manf_str;
+ struct work_struct tx_timeout_task;
} local_info_t;
/****************
@@ -352,6 +353,7 @@ typedef struct local_info_t {
*/
static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void do_tx_timeout(struct net_device *dev);
+static void xirc2ps_tx_timeout_task(void *data);
static struct net_device_stats *do_get_stats(struct net_device *dev);
static void set_addresses(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
@@ -589,6 +591,7 @@ xirc2ps_probe(struct pcmcia_device *link)
#ifdef HAVE_TX_TIMEOUT
dev->tx_timeout = do_tx_timeout;
dev->watchdog_timeo = TX_TIMEOUT;
+ INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
#endif
return xirc2ps_config(link);
@@ -1341,17 +1344,24 @@ xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/*====================================================================*/
static void
-do_tx_timeout(struct net_device *dev)
+xirc2ps_tx_timeout_task(void *data)
{
- local_info_t *lp = netdev_priv(dev);
- printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
- lp->stats.tx_errors++;
+ struct net_device *dev = data;
/* reset the card */
do_reset(dev,1);
dev->trans_start = jiffies;
netif_wake_queue(dev);
}
+static void
+do_tx_timeout(struct net_device *dev)
+{
+ local_info_t *lp = netdev_priv(dev);
+ lp->stats.tx_errors++;
+ printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
+ schedule_work(&lp->tx_timeout_task);
+}
+
static int
do_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 4daafe3..d50bcb8 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -202,6 +202,8 @@ static int homepna[MAX_UNITS];
#define CSR15 15
#define PCNET32_MC_FILTER 8
+#define PCNET32_79C970A 0x2621
+
/* The PCNET32 Rx and Tx ring descriptors. */
struct pcnet32_rx_head {
u32 base;
@@ -289,6 +291,7 @@ struct pcnet32_private {
/* each bit indicates an available PHY */
u32 phymask;
+ unsigned short chip_version; /* which variant this is */
};
static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
@@ -724,9 +727,11 @@ static u32 pcnet32_get_link(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
if (lp->mii) {
r = mii_link_ok(&lp->mii_if);
- } else {
+ } else if (lp->chip_version >= PCNET32_79C970A) {
ulong ioaddr = dev->base_addr; /* card base I/O address */
r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
+ } else { /* can not detect link on really old chips */
+ r = 1;
}
spin_unlock_irqrestore(&lp->lock, flags);
@@ -1091,6 +1096,10 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
ulong ioaddr = dev->base_addr;
int ticks;
+ /* really old chips have to be stopped. */
+ if (lp->chip_version < PCNET32_79C970A)
+ return 0;
+
/* set SUSPEND (SPND) - CSR5 bit 0 */
csr5 = a->read_csr(ioaddr, CSR5);
a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
@@ -1529,6 +1538,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
lp->mii_if.reg_num_mask = 0x1f;
lp->dxsuflo = dxsuflo;
lp->mii = mii;
+ lp->chip_version = chip_version;
lp->msg_enable = pcnet32_debug;
if ((cards_found >= MAX_UNITS)
|| (options[cards_found] > sizeof(options_mapping)))
@@ -1839,10 +1849,7 @@ static int pcnet32_open(struct net_device *dev)
val |= 2;
} else if (lp->options & PCNET32_PORT_ASEL) {
/* workaround of xSeries250, turn on for 79C975 only */
- i = ((lp->a.read_csr(ioaddr, 88) |
- (lp->a.
- read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
- if (i == 0x2627)
+ if (lp->chip_version == 0x2627)
val |= 3;
}
lp->a.write_bcr(ioaddr, 9, val);
@@ -1986,9 +1993,11 @@ static int pcnet32_open(struct net_device *dev)
netif_start_queue(dev);
- /* Print the link status and start the watchdog */
- pcnet32_check_media(dev, 1);
- mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+ if (lp->chip_version >= PCNET32_79C970A) {
+ /* Print the link status and start the watchdog */
+ pcnet32_check_media(dev, 1);
+ mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
+ }
i = 0;
while (i++ < 100)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 2ba6d3a..b79ec0d 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -56,5 +56,22 @@ config SMSC_PHY
---help---
Currently supports the LAN83C185 PHY
+config FIXED_PHY
+ tristate "Drivers for PHY emulation on fixed speed/link"
+ depends on PHYLIB
+ ---help---
+ Adds the driver to PHY layer to cover the boards that do not have any PHY bound,
+ but with the ability to manipulate with speed/link in software. The relavant MII
+ speed/duplex parameters could be effectively handled in user-specified fuction.
+ Currently tested with mpc866ads.
+
+config FIXED_MII_10_FDX
+ bool "Emulation for 10M Fdx fixed PHY behavior"
+ depends on FIXED_PHY
+
+config FIXED_MII_100_FDX
+ bool "Emulation for 100M Fdx fixed PHY behavior"
+ depends on FIXED_PHY
+
endmenu
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index a00e619..320f832 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_LXT_PHY) += lxt.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
new file mode 100644
index 0000000..341036d
--- /dev/null
+++ b/drivers/net/phy/fixed.c
@@ -0,0 +1,358 @@
+/*
+ * drivers/net/phy/fixed.c
+ *
+ * Driver for fixed PHYs, when transceiver is able to operate in one fixed mode.
+ *
+ * Author: Vitaly Bordug
+ *
+ * Copyright (c) 2006 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#define MII_REGS_NUM 7
+
+/*
+ The idea is to emulate normal phy behavior by responding with
+ pre-defined values to mii BMCR read, so that read_status hook could
+ take all the needed info.
+*/
+
+struct fixed_phy_status {
+ u8 link;
+ u16 speed;
+ u8 duplex;
+};
+
+/*-----------------------------------------------------------------------------
+ * Private information hoder for mii_bus
+ *-----------------------------------------------------------------------------*/
+struct fixed_info {
+ u16 *regs;
+ u8 regs_num;
+ struct fixed_phy_status phy_status;
+ struct phy_device *phydev; /* pointer to the container */
+ /* link & speed cb */
+ int(*link_update)(struct net_device*, struct fixed_phy_status*);
+
+};
+
+/*-----------------------------------------------------------------------------
+ * If something weird is required to be done with link/speed,
+ * network driver is able to assign a function to implement this.
+ * May be useful for PHY's that need to be software-driven.
+ *-----------------------------------------------------------------------------*/
+int fixed_mdio_set_link_update(struct phy_device* phydev,
+ int(*link_update)(struct net_device*, struct fixed_phy_status*))
+{
+ struct fixed_info *fixed;
+
+ if(link_update == NULL)
+ return -EINVAL;
+
+ if(phydev) {
+ if(phydev->bus) {
+ fixed = phydev->bus->priv;
+ fixed->link_update = link_update;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(fixed_mdio_set_link_update);
+
+/*-----------------------------------------------------------------------------
+ * This is used for updating internal mii regs from the status
+ *-----------------------------------------------------------------------------*/
+static int fixed_mdio_update_regs(struct fixed_info *fixed)
+{
+ u16 *regs = fixed->regs;
+ u16 bmsr = 0;
+ u16 bmcr = 0;
+
+ if(!regs) {
+ printk(KERN_ERR "%s: regs not set up", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if(fixed->phy_status.link)
+ bmsr |= BMSR_LSTATUS;
+
+ if(fixed->phy_status.duplex) {
+ bmcr |= BMCR_FULLDPLX;
+
+ switch ( fixed->phy_status.speed ) {
+ case 100:
+ bmsr |= BMSR_100FULL;
+ bmcr |= BMCR_SPEED100;
+ break;
+
+ case 10:
+ bmsr |= BMSR_10FULL;
+ break;
+ }
+ } else {
+ switch ( fixed->phy_status.speed ) {
+ case 100:
+ bmsr |= BMSR_100HALF;
+ bmcr |= BMCR_SPEED100;
+ break;
+
+ case 10:
+ bmsr |= BMSR_100HALF;
+ break;
+ }
+ }
+
+ regs[MII_BMCR] = bmcr;
+ regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/
+
+ return 0;
+}
+
+static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location)
+{
+ struct fixed_info *fixed = bus->priv;
+
+ /* if user has registered link update callback, use it */
+ if(fixed->phydev)
+ if(fixed->phydev->attached_dev) {
+ if(fixed->link_update) {
+ fixed->link_update(fixed->phydev->attached_dev,
+ &fixed->phy_status);
+ fixed_mdio_update_regs(fixed);
+ }
+ }
+
+ if ((unsigned int)location >= fixed->regs_num)
+ return -1;
+ return fixed->regs[location];
+}
+
+static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
+{
+ /* do nothing for now*/
+ return 0;
+}
+
+static int fixed_mii_reset(struct mii_bus *bus)
+{
+ /*nothing here - no way/need to reset it*/
+ return 0;
+}
+
+static int fixed_config_aneg(struct phy_device *phydev)
+{
+ /* :TODO:03/13/2006 09:45:37 PM::
+ The full autoneg funcionality can be emulated,
+ but no need to have anything here for now
+ */
+ return 0;
+}
+
+/*-----------------------------------------------------------------------------
+ * the manual bind will do the magic - with phy_id_mask == 0
+ * match will never return true...
+ *-----------------------------------------------------------------------------*/
+static struct phy_driver fixed_mdio_driver = {
+ .name = "Fixed PHY",
+ .features = PHY_BASIC_FEATURES,
+ .config_aneg = fixed_config_aneg,
+ .read_status = genphy_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+/*-----------------------------------------------------------------------------
+ * This func is used to create all the necessary stuff, bind
+ * the fixed phy driver and register all it on the mdio_bus_type.
+ * speed is either 10 or 100, duplex is boolean.
+ * number is used to create multiple fixed PHYs, so that several devices can
+ * utilize them simultaneously.
+ *-----------------------------------------------------------------------------*/
+static int fixed_mdio_register_device(int number, int speed, int duplex)
+{
+ struct mii_bus *new_bus;
+ struct fixed_info *fixed;
+ struct phy_device *phydev;
+ int err = 0;
+
+ struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+
+ if (NULL == dev)
+ return -ENOMEM;
+
+ new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
+
+ if (NULL == new_bus) {
+ kfree(dev);
+ return -ENOMEM;
+ }
+ fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL);
+
+ if (NULL == fixed) {
+ kfree(dev);
+ kfree(new_bus);
+ return -ENOMEM;
+ }
+
+ fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL);
+ fixed->regs_num = MII_REGS_NUM;
+ fixed->phy_status.speed = speed;
+ fixed->phy_status.duplex = duplex;
+ fixed->phy_status.link = 1;
+
+ new_bus->name = "Fixed MII Bus",
+ new_bus->read = &fixed_mii_read,
+ new_bus->write = &fixed_mii_write,
+ new_bus->reset = &fixed_mii_reset,
+
+ /*set up workspace*/
+ fixed_mdio_update_regs(fixed);
+ new_bus->priv = fixed;
+
+ new_bus->dev = dev;
+ dev_set_drvdata(dev, new_bus);
+
+ /* create phy_device and register it on the mdio bus */
+ phydev = phy_device_create(new_bus, 0, 0);
+
+ /*
+ Put the phydev pointer into the fixed pack so that bus read/write code could
+ be able to access for instance attached netdev. Well it doesn't have to do
+ so, only in case of utilizing user-specified link-update...
+ */
+ fixed->phydev = phydev;
+
+ if(NULL == phydev) {
+ err = -ENOMEM;
+ goto device_create_fail;
+ }
+
+ phydev->irq = -1;
+ phydev->dev.bus = &mdio_bus_type;
+
+ if(number)
+ snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
+ "fixed_%d@%d:%d", number, speed, duplex);
+ else
+ snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
+ "fixed@%d:%d", speed, duplex);
+ phydev->bus = new_bus;
+
+ err = device_register(&phydev->dev);
+ if(err) {
+ printk(KERN_ERR "Phy %s failed to register\n",
+ phydev->dev.bus_id);
+ goto bus_register_fail;
+ }
+
+ /*
+ the mdio bus has phy_id match... In order not to do it
+ artificially, we are binding the driver here by hand;
+ it will be the same for all the fixed phys anyway.
+ */
+ down_write(&phydev->dev.bus->subsys.rwsem);
+
+ phydev->dev.driver = &fixed_mdio_driver.driver;
+
+ err = phydev->dev.driver->probe(&phydev->dev);
+ if(err < 0) {
+ printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id);
+ up_write(&phydev->dev.bus->subsys.rwsem);
+ goto probe_fail;
+ }
+
+ device_bind_driver(&phydev->dev);
+ up_write(&phydev->dev.bus->subsys.rwsem);
+
+ return 0;
+
+probe_fail:
+ device_unregister(&phydev->dev);
+bus_register_fail:
+ kfree(phydev);
+device_create_fail:
+ kfree(dev);
+ kfree(new_bus);
+ kfree(fixed);
+
+ return err;
+}
+
+
+MODULE_DESCRIPTION("Fixed PHY device & driver for PAL");
+MODULE_AUTHOR("Vitaly Bordug");
+MODULE_LICENSE("GPL");
+
+static int __init fixed_init(void)
+{
+ int ret;
+ int duplex = 0;
+
+ /* register on the bus... Not expected to be matched with anything there... */
+ phy_driver_register(&fixed_mdio_driver);
+
+ /* So let the fun begin...
+ We will create several mdio devices here, and will bound the upper
+ driver to them.
+
+ Then the external software can lookup the phy bus by searching
+ fixed@speed:duplex, e.g. fixed@100:1, to be connected to the
+ virtual 100M Fdx phy.
+
+ In case several virtual PHYs required, the bus_id will be in form
+ fixed_<num>@<speed>:<duplex>, which make it able even to define
+ driver-specific link control callback, if for instance PHY is completely
+ SW-driven.
+
+ */
+
+#ifdef CONFIG_FIXED_MII_DUPLEX
+ duplex = 1;
+#endif
+
+#ifdef CONFIG_FIXED_MII_100_FDX
+ fixed_mdio_register_device(0, 100, 1);
+#endif
+
+#ifdef CONFIX_FIXED_MII_10_FDX
+ fixed_mdio_register_device(0, 10, 1);
+#endif
+ return 0;
+}
+
+static void __exit fixed_exit(void)
+{
+ phy_driver_unregister(&fixed_mdio_driver);
+ /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */
+}
+
+module_init(fixed_init);
+module_exit(fixed_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 1dde390..cf6660c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -159,6 +159,7 @@ struct bus_type mdio_bus_type = {
.suspend = mdio_bus_suspend,
.resume = mdio_bus_resume,
};
+EXPORT_SYMBOL(mdio_bus_type);
int __init mdio_bus_init(void)
{
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1bc1e03..2d1ecfd 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -45,6 +45,35 @@ static struct phy_driver genphy_driver;
extern int mdio_bus_init(void);
extern void mdio_bus_exit(void);
+struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+{
+ struct phy_device *dev;
+ /* We allocate the device, and initialize the
+ * default values */
+ dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
+
+ if (NULL == dev)
+ return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
+
+ dev->speed = 0;
+ dev->duplex = -1;
+ dev->pause = dev->asym_pause = 0;
+ dev->link = 1;
+
+ dev->autoneg = AUTONEG_ENABLE;
+
+ dev->addr = addr;
+ dev->phy_id = phy_id;
+ dev->bus = bus;
+
+ dev->state = PHY_DOWN;
+
+ spin_lock_init(&dev->lock);
+
+ return dev;
+}
+EXPORT_SYMBOL(phy_device_create);
+
/* get_phy_device
*
* description: Reads the ID registers of the PHY at addr on the
@@ -78,27 +107,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
if (0xffffffff == phy_id)
return NULL;
- /* Otherwise, we allocate the device, and initialize the
- * default values */
- dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
-
- if (NULL == dev)
- return ERR_PTR(-ENOMEM);
-
- dev->speed = 0;
- dev->duplex = -1;
- dev->pause = dev->asym_pause = 0;
- dev->link = 1;
-
- dev->autoneg = AUTONEG_ENABLE;
-
- dev->addr = addr;
- dev->phy_id = phy_id;
- dev->bus = bus;
-
- dev->state = PHY_DOWN;
-
- spin_lock_init(&dev->lock);
+ dev = phy_device_create(bus, addr, phy_id);
return dev;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 132ed32..e72e0e0 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -71,6 +71,7 @@
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/div64.h>
+#include <asm/irq.h>
/* local include */
#include "s2io.h"
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index efd0f23..01392bc 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -742,7 +742,7 @@ module_param(irq, int, 0);
MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
-int init_module(void)
+int __init init_module(void)
{
dev_seeq = seeq8005_probe(-1);
if (IS_ERR(dev_seeq))
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 7de9a07..ad878df 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2211,6 +2211,7 @@ static int skge_up(struct net_device *dev)
skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
skge_led(skge, LED_MODE_ON);
+ netif_poll_enable(dev);
return 0;
free_rx_ring:
@@ -2279,6 +2280,7 @@ static int skge_down(struct net_device *dev)
skge_led(skge, LED_MODE_OFF);
+ netif_poll_disable(dev);
skge_tx_clean(skge);
skge_rx_clean(skge);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index de91609..933e87f 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -233,6 +233,8 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
if (hw->ports > 1)
reg1 |= PCI_Y2_PHY2_COMA;
}
+ sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ udelay(100);
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
sky2_pci_write32(hw, PCI_DEV_REG3, 0);
@@ -242,9 +244,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
sky2_pci_write32(hw, PCI_DEV_REG5, 0);
}
- sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
- udelay(100);
-
break;
case PCI_D3hot:
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index d37bd86..0b15290 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1092,6 +1092,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs
/* Spurious interrupt check */
if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
(INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
return IRQ_NONE;
}
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 3d8dcb6..cf62373 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -321,12 +321,12 @@ static void smc_reset(struct net_device *dev)
DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
/* Disable all interrupts, block TX tasklet */
- spin_lock(&lp->lock);
+ spin_lock_irq(&lp->lock);
SMC_SELECT_BANK(2);
SMC_SET_INT_MASK(0);
pending_skb = lp->pending_tx_skb;
lp->pending_tx_skb = NULL;
- spin_unlock(&lp->lock);
+ spin_unlock_irq(&lp->lock);
/* free any pending tx skb */
if (pending_skb) {
@@ -448,12 +448,12 @@ static void smc_shutdown(struct net_device *dev)
DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
/* no more interrupts for me */
- spin_lock(&lp->lock);
+ spin_lock_irq(&lp->lock);
SMC_SELECT_BANK(2);
SMC_SET_INT_MASK(0);
pending_skb = lp->pending_tx_skb;
lp->pending_tx_skb = NULL;
- spin_unlock(&lp->lock);
+ spin_unlock_irq(&lp->lock);
if (pending_skb)
dev_kfree_skb(pending_skb);
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 4ec4b4d..7aa7fba 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -136,14 +136,9 @@
#define SMC_CAN_USE_32BIT 0
#define SMC_IO_SHIFT 0
#define SMC_NOWAIT 1
-#define SMC_USE_PXA_DMA 1
-#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_outw(v, a, r) writew(v, (a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
@@ -189,16 +184,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
#define SMC_IO_SHIFT 0
#define SMC_NOWAIT 1
-#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
#define SMC_outw(v, a, r) writew(v, (a) + (r))
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
-#define SMC_inl(a, r) readl((a) + (r))
-#define SMC_outl(v, a, r) writel(v, (a) + (r))
-#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
-#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#include <asm/mach-types.h>
#include <asm/arch/cpu.h>
@@ -372,6 +361,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
#define SMC_IRQ_FLAGS (0)
+#elif defined(CONFIG_ARCH_VERSATILE)
+
+#define SMC_CAN_USE_8BIT 1
+#define SMC_CAN_USE_16BIT 1
+#define SMC_CAN_USE_32BIT 1
+#define SMC_NOWAIT 1
+
+#define SMC_inb(a, r) readb((a) + (r))
+#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inl(a, r) readl((a) + (r))
+#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) writew(v, (a) + (r))
+#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
+#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
+
+#define SMC_IRQ_FLAGS (0)
+
#else
#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 647f62e..8890721 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1611,13 +1611,12 @@ spider_net_open(struct net_device *netdev)
int result;
result = -ENOMEM;
- if (spider_net_init_chain(card, &card->tx_chain,
- card->descr,
- PCI_DMA_TODEVICE, tx_descriptors))
+ if (spider_net_init_chain(card, &card->tx_chain, card->descr,
+ PCI_DMA_TODEVICE, card->tx_desc))
goto alloc_tx_failed;
if (spider_net_init_chain(card, &card->rx_chain,
- card->descr + tx_descriptors,
- PCI_DMA_FROMDEVICE, rx_descriptors))
+ card->descr + card->rx_desc,
+ PCI_DMA_FROMDEVICE, card->rx_desc))
goto alloc_rx_failed;
/* allocate rx skbs */
@@ -2005,6 +2004,9 @@ spider_net_setup_netdev(struct spider_net_card *card)
card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
+ card->tx_desc = tx_descriptors;
+ card->rx_desc = rx_descriptors;
+
spider_net_setup_netdev_ops(netdev);
netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index f6dcf18..30407cd 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -440,6 +440,9 @@ struct spider_net_card {
/* for ethtool */
int msg_enable;
+ int rx_desc;
+ int tx_desc;
+
struct spider_net_descr descr[0];
};
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index a5bb0b76..0220922 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -130,6 +130,18 @@ spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
return 0;
}
+static void
+spider_net_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering)
+{
+ struct spider_net_card *card = netdev->priv;
+
+ ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
+ ering->tx_pending = card->tx_desc;
+ ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
+ ering->rx_pending = card->rx_desc;
+}
+
struct ethtool_ops spider_net_ethtool_ops = {
.get_settings = spider_net_ethtool_get_settings,
.get_drvinfo = spider_net_ethtool_get_drvinfo,
@@ -141,5 +153,6 @@ struct ethtool_ops spider_net_ethtool_ops = {
.set_rx_csum = spider_net_ethtool_set_rx_csum,
.get_tx_csum = spider_net_ethtool_get_tx_csum,
.set_tx_csum = spider_net_ethtool_set_tx_csum,
+ .get_ringparam = spider_net_ethtool_get_ringparam,
};
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index ac17377..698568e 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -107,7 +107,7 @@ static char *media[MAX_UNITS];
#endif
/* These identify the driver base version and may not be removed. */
-static char version[] __devinitdata =
+static char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
KERN_INFO " http://www.scyld.com/network/sundance.html\n";
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 9f49156..4470025 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -140,7 +140,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */
/* version and credits */
#ifndef PCMCIA
-static char version[] __initdata =
+static char version[] __devinitdata =
"\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n"
" v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n"
" v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n"
@@ -216,7 +216,7 @@ static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0};
static int __devinitdata turbo_searched = 0;
#ifndef PCMCIA
-static __u32 ibmtr_mem_base __initdata = 0xd0000;
+static __u32 ibmtr_mem_base __devinitdata = 0xd0000;
#endif
static void __devinit PrtChanID(char *pcid, short stride)
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index cd2e025..85a7f79 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -5666,7 +5666,7 @@ module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param(ringspeed, int, 0);
-static struct net_device *setup_card(int n)
+static struct net_device * __init setup_card(int n)
{
struct net_device *dev = alloc_trdev(sizeof(struct net_local));
int err;
@@ -5696,9 +5696,8 @@ out:
free_netdev(dev);
return ERR_PTR(err);
}
-
-int init_module(void)
+int __init init_module(void)
{
int i, found = 0;
struct net_device *dev;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 7f41481..eba9083 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -138,7 +138,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#include <asm/irq.h>
/* These identify the driver base version and may not be removed. */
-static char version[] __devinitdata =
+static char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
KERN_INFO " http://www.scyld.com/network/drivers.html\n";
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index f874e4f..cf43390 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1264,8 +1264,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
static int __init xircom_init(void)
{
- pci_register_driver(&xircom_ops);
- return 0;
+ return pci_register_driver(&xircom_ops);
}
static void __exit xircom_exit(void)
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
new file mode 100644
index 0000000..47f49ef
--- /dev/null
+++ b/drivers/net/ucc_geth.c
@@ -0,0 +1,4278 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * QE UCC Gigabit Ethernet Driver
+ *
+ * Changelog:
+ * Jul 6, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/mii.h>
+
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+#include <asm/ucc.h>
+#include <asm/ucc_fast.h>
+
+#include "ucc_geth.h"
+#include "ucc_geth_phy.h"
+
+#undef DEBUG
+
+#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:June 20, 2006"
+#define DRV_NAME "ucc_geth"
+
+#define ugeth_printk(level, format, arg...) \
+ printk(level format "\n", ## arg)
+
+#define ugeth_dbg(format, arg...) \
+ ugeth_printk(KERN_DEBUG , format , ## arg)
+#define ugeth_err(format, arg...) \
+ ugeth_printk(KERN_ERR , format , ## arg)
+#define ugeth_info(format, arg...) \
+ ugeth_printk(KERN_INFO , format , ## arg)
+#define ugeth_warn(format, arg...) \
+ ugeth_printk(KERN_WARNING , format , ## arg)
+
+#ifdef UGETH_VERBOSE_DEBUG
+#define ugeth_vdbg ugeth_dbg
+#else
+#define ugeth_vdbg(fmt, args...) do { } while (0)
+#endif /* UGETH_VERBOSE_DEBUG */
+
+static DEFINE_SPINLOCK(ugeth_lock);
+
+static ucc_geth_info_t ugeth_primary_info = {
+ .uf_info = {
+ .bd_mem_part = MEM_PART_SYSTEM,
+ .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
+ .max_rx_buf_length = 1536,
+/* FIXME: should be changed in run time for 1G and 100M */
+#ifdef CONFIG_UGETH_HAS_GIGA
+ .urfs = UCC_GETH_URFS_GIGA_INIT,
+ .urfet = UCC_GETH_URFET_GIGA_INIT,
+ .urfset = UCC_GETH_URFSET_GIGA_INIT,
+ .utfs = UCC_GETH_UTFS_GIGA_INIT,
+ .utfet = UCC_GETH_UTFET_GIGA_INIT,
+ .utftt = UCC_GETH_UTFTT_GIGA_INIT,
+#else
+ .urfs = UCC_GETH_URFS_INIT,
+ .urfet = UCC_GETH_URFET_INIT,
+ .urfset = UCC_GETH_URFSET_INIT,
+ .utfs = UCC_GETH_UTFS_INIT,
+ .utfet = UCC_GETH_UTFET_INIT,
+ .utftt = UCC_GETH_UTFTT_INIT,
+#endif
+ .ufpt = 256,
+ .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
+ .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
+ .tenc = UCC_FAST_TX_ENCODING_NRZ,
+ .renc = UCC_FAST_RX_ENCODING_NRZ,
+ .tcrc = UCC_FAST_16_BIT_CRC,
+ .synl = UCC_FAST_SYNC_LEN_NOT_USED,
+ },
+ .numQueuesTx = 1,
+ .numQueuesRx = 1,
+ .extendedFilteringChainPointer = ((uint32_t) NULL),
+ .typeorlen = 3072 /*1536 */ ,
+ .nonBackToBackIfgPart1 = 0x40,
+ .nonBackToBackIfgPart2 = 0x60,
+ .miminumInterFrameGapEnforcement = 0x50,
+ .backToBackInterFrameGap = 0x60,
+ .mblinterval = 128,
+ .nortsrbytetime = 5,
+ .fracsiz = 1,
+ .strictpriorityq = 0xff,
+ .altBebTruncation = 0xa,
+ .excessDefer = 1,
+ .maxRetransmission = 0xf,
+ .collisionWindow = 0x37,
+ .receiveFlowControl = 1,
+ .maxGroupAddrInHash = 4,
+ .maxIndAddrInHash = 4,
+ .prel = 7,
+ .maxFrameLength = 1518,
+ .minFrameLength = 64,
+ .maxD1Length = 1520,
+ .maxD2Length = 1520,
+ .vlantype = 0x8100,
+ .ecamptr = ((uint32_t) NULL),
+ .eventRegMask = UCCE_OTHER,
+ .pausePeriod = 0xf000,
+ .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
+ .bdRingLenTx = {
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN,
+ TX_BD_RING_LEN},
+
+ .bdRingLenRx = {
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN,
+ RX_BD_RING_LEN},
+
+ .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
+ .largestexternallookupkeysize =
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
+ .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
+ .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
+ .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
+ .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
+ .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
+ .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
+ .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
+ .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
+ .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+ .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
+};
+
+static ucc_geth_info_t ugeth_info[8];
+
+#ifdef DEBUG
+static void mem_disp(u8 *addr, int size)
+{
+ u8 *i;
+ int size16Aling = (size >> 4) << 4;
+ int size4Aling = (size >> 2) << 2;
+ int notAlign = 0;
+ if (size % 16)
+ notAlign = 1;
+
+ for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
+ printk("0x%08x: %08x %08x %08x %08x\r\n",
+ (u32) i,
+ *((u32 *) (i)),
+ *((u32 *) (i + 4)),
+ *((u32 *) (i + 8)), *((u32 *) (i + 12)));
+ if (notAlign == 1)
+ printk("0x%08x: ", (u32) i);
+ for (; (u32) i < (u32) addr + size4Aling; i += 4)
+ printk("%08x ", *((u32 *) (i)));
+ for (; (u32) i < (u32) addr + size; i++)
+ printk("%02x", *((u8 *) (i)));
+ if (notAlign == 1)
+ printk("\r\n");
+}
+#endif /* DEBUG */
+
+#ifdef CONFIG_UGETH_FILTERING
+static void enqueue(struct list_head *node, struct list_head *lh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(ugeth_lock, flags);
+ list_add_tail(node, lh);
+ spin_unlock_irqrestore(ugeth_lock, flags);
+}
+#endif /* CONFIG_UGETH_FILTERING */
+
+static struct list_head *dequeue(struct list_head *lh)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(ugeth_lock, flags);
+ if (!list_empty(lh)) {
+ struct list_head *node = lh->next;
+ list_del(node);
+ spin_unlock_irqrestore(ugeth_lock, flags);
+ return node;
+ } else {
+ spin_unlock_irqrestore(ugeth_lock, flags);
+ return NULL;
+ }
+}
+
+static int get_interface_details(enet_interface_e enet_interface,
+ enet_speed_e *speed,
+ int *r10m,
+ int *rmm,
+ int *rpm,
+ int *tbi, int *limited_to_full_duplex)
+{
+ /* Analyze enet_interface according to Interface Mode
+ Configuration table */
+ switch (enet_interface) {
+ case ENET_10_MII:
+ *speed = ENET_SPEED_10BT;
+ break;
+ case ENET_10_RMII:
+ *speed = ENET_SPEED_10BT;
+ *r10m = 1;
+ *rmm = 1;
+ break;
+ case ENET_10_RGMII:
+ *speed = ENET_SPEED_10BT;
+ *rpm = 1;
+ *r10m = 1;
+ *limited_to_full_duplex = 1;
+ break;
+ case ENET_100_MII:
+ *speed = ENET_SPEED_100BT;
+ break;
+ case ENET_100_RMII:
+ *speed = ENET_SPEED_100BT;
+ *rmm = 1;
+ break;
+ case ENET_100_RGMII:
+ *speed = ENET_SPEED_100BT;
+ *rpm = 1;
+ *limited_to_full_duplex = 1;
+ break;
+ case ENET_1000_GMII:
+ *speed = ENET_SPEED_1000BT;
+ *limited_to_full_duplex = 1;
+ break;
+ case ENET_1000_RGMII:
+ *speed = ENET_SPEED_1000BT;
+ *rpm = 1;
+ *limited_to_full_duplex = 1;
+ break;
+ case ENET_1000_TBI:
+ *speed = ENET_SPEED_1000BT;
+ *tbi = 1;
+ *limited_to_full_duplex = 1;
+ break;
+ case ENET_1000_RTBI:
+ *speed = ENET_SPEED_1000BT;
+ *rpm = 1;
+ *tbi = 1;
+ *limited_to_full_duplex = 1;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd)
+{
+ struct sk_buff *skb = NULL;
+
+ skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT);
+
+ if (skb == NULL)
+ return NULL;
+
+ /* We need the data buffer to be aligned properly. We will reserve
+ * as many bytes as needed to align the data properly
+ */
+ skb_reserve(skb,
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT -
+ (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
+ 1)));
+
+ skb->dev = ugeth->dev;
+
+ BD_BUFFER_SET(bd,
+ dma_map_single(NULL,
+ skb->data,
+ ugeth->ug_info->uf_info.max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE));
+
+ BD_STATUS_AND_LENGTH_SET(bd,
+ (R_E | R_I |
+ (BD_STATUS_AND_LENGTH(bd) & R_W)));
+
+ return skb;
+}
+
+static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
+{
+ u8 *bd;
+ u32 bd_status;
+ struct sk_buff *skb;
+ int i;
+
+ bd = ugeth->p_rx_bd_ring[rxQ];
+ i = 0;
+
+ do {
+ bd_status = BD_STATUS_AND_LENGTH(bd);
+ skb = get_new_skb(ugeth, bd);
+
+ if (!skb) /* If can not allocate data buffer,
+ abort. Cleanup will be elsewhere */
+ return -ENOMEM;
+
+ ugeth->rx_skbuff[rxQ][i] = skb;
+
+ /* advance the BD pointer */
+ bd += UCC_GETH_SIZE_OF_BD;
+ i++;
+ } while (!(bd_status & R_W));
+
+ return 0;
+}
+
+static int fill_init_enet_entries(ucc_geth_private_t *ugeth,
+ volatile u32 *p_start,
+ u8 num_entries,
+ u32 thread_size,
+ u32 thread_alignment,
+ qe_risc_allocation_e risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ if ((snum = qe_get_snum()) < 0) {
+ ugeth_err("fill_init_enet_entries: Can not get SNUM.");
+ return snum;
+ }
+ if ((i == 0) && skip_page_for_first_entry)
+ /* First entry of Rx does not have page */
+ init_enet_offset = 0;
+ else {
+ init_enet_offset =
+ qe_muram_alloc(thread_size, thread_alignment);
+ if (IS_MURAM_ERR(init_enet_offset)) {
+ ugeth_err
+ ("fill_init_enet_entries: Can not allocate DPRAM memory.");
+ qe_put_snum((u8) snum);
+ return -ENOMEM;
+ }
+ }
+ *(p_start++) =
+ ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
+ | risc;
+ }
+
+ return 0;
+}
+
+static int return_init_enet_entries(ucc_geth_private_t *ugeth,
+ volatile u32 *p_start,
+ u8 num_entries,
+ qe_risc_allocation_e risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ /* Check that this entry was actually valid --
+ needed in case failed in allocations */
+ if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ snum =
+ (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
+ ENET_INIT_PARAM_SNUM_SHIFT;
+ qe_put_snum((u8) snum);
+ if (!((i == 0) && skip_page_for_first_entry)) {
+ /* First entry of Rx does not have page */
+ init_enet_offset =
+ (in_be32(p_start) &
+ ENET_INIT_PARAM_PTR_MASK);
+ qe_muram_free(init_enet_offset);
+ }
+ *(p_start++) = 0; /* Just for cosmetics */
+ }
+ }
+
+ return 0;
+}
+
+#ifdef DEBUG
+static int dump_init_enet_entries(ucc_geth_private_t *ugeth,
+ volatile u32 *p_start,
+ u8 num_entries,
+ u32 thread_size,
+ qe_risc_allocation_e risc,
+ int skip_page_for_first_entry)
+{
+ u32 init_enet_offset;
+ u8 i;
+ int snum;
+
+ for (i = 0; i < num_entries; i++) {
+ /* Check that this entry was actually valid --
+ needed in case failed in allocations */
+ if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
+ snum =
+ (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
+ ENET_INIT_PARAM_SNUM_SHIFT;
+ qe_put_snum((u8) snum);
+ if (!((i == 0) && skip_page_for_first_entry)) {
+ /* First entry of Rx does not have page */
+ init_enet_offset =
+ (in_be32(p_start) &
+ ENET_INIT_PARAM_PTR_MASK);
+ ugeth_info("Init enet entry %d:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32)
+ qe_muram_addr(init_enet_offset));
+ mem_disp(qe_muram_addr(init_enet_offset),
+ thread_size);
+ }
+ p_start++;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_UGETH_FILTERING
+static enet_addr_container_t *get_enet_addr_container(void)
+{
+ enet_addr_container_t *enet_addr_cont;
+
+ /* allocate memory */
+ enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL);
+ if (!enet_addr_cont) {
+ ugeth_err("%s: No memory for enet_addr_container_t object.",
+ __FUNCTION__);
+ return NULL;
+ }
+
+ return enet_addr_cont;
+}
+#endif /* CONFIG_UGETH_FILTERING */
+
+static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont)
+{
+ kfree(enet_addr_cont);
+}
+
+#ifdef CONFIG_UGETH_FILTERING
+static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr, u8 paddr_num)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+
+ if (!(paddr_num < NUM_OF_PADDRS)) {
+ ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ /* Ethernet frames are defined in Little Endian mode, */
+ /* therefore to insert the address we reverse the bytes. */
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].h,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
+ (u16) (*p_enet_addr)[4]));
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].m,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
+ (u16) (*p_enet_addr)[2]));
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].l,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
+ (u16) (*p_enet_addr)[0]));
+
+ return 0;
+}
+#endif /* CONFIG_UGETH_FILTERING */
+
+static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+
+ if (!(paddr_num < NUM_OF_PADDRS)) {
+ ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ /* Writing address ff.ff.ff.ff.ff.ff disables address
+ recognition for this register */
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
+ out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
+
+ return 0;
+}
+
+static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ u32 cecr_subblock;
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+
+ /* Ethernet frames are defined in Little Endian mode,
+ therefor to insert */
+ /* the address to the hash (Big Endian mode), we reverse the bytes.*/
+ out_be16(&p_82xx_addr_filt->taddr.h,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
+ (u16) (*p_enet_addr)[4]));
+ out_be16(&p_82xx_addr_filt->taddr.m,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
+ (u16) (*p_enet_addr)[2]));
+ out_be16(&p_82xx_addr_filt->taddr.l,
+ (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
+ (u16) (*p_enet_addr)[0]));
+
+ qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
+ (u8) QE_CR_PROTOCOL_ETHERNET, 0);
+}
+
+#ifdef CONFIG_UGETH_MAGIC_PACKET
+static void magic_packet_detection_enable(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_private_t *uccf;
+ ucc_geth_t *ug_regs;
+ u32 maccfg2, uccm;
+
+ uccf = ugeth->uccf;
+ ug_regs = ugeth->ug_regs;
+
+ /* Enable interrupts for magic packet detection */
+ uccm = in_be32(uccf->p_uccm);
+ uccm |= UCCE_MPD;
+ out_be32(uccf->p_uccm, uccm);
+
+ /* Enable magic packet detection */
+ maccfg2 = in_be32(&ug_regs->maccfg2);
+ maccfg2 |= MACCFG2_MPE;
+ out_be32(&ug_regs->maccfg2, maccfg2);
+}
+
+static void magic_packet_detection_disable(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_private_t *uccf;
+ ucc_geth_t *ug_regs;
+ u32 maccfg2, uccm;
+
+ uccf = ugeth->uccf;
+ ug_regs = ugeth->ug_regs;
+
+ /* Disable interrupts for magic packet detection */
+ uccm = in_be32(uccf->p_uccm);
+ uccm &= ~UCCE_MPD;
+ out_be32(uccf->p_uccm, uccm);
+
+ /* Disable magic packet detection */
+ maccfg2 = in_be32(&ug_regs->maccfg2);
+ maccfg2 &= ~MACCFG2_MPE;
+ out_be32(&ug_regs->maccfg2, maccfg2);
+}
+#endif /* MAGIC_PACKET */
+
+static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2)
+{
+ return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
+}
+
+#ifdef DEBUG
+static void get_statistics(ucc_geth_private_t *ugeth,
+ ucc_geth_tx_firmware_statistics_t *
+ tx_firmware_statistics,
+ ucc_geth_rx_firmware_statistics_t *
+ rx_firmware_statistics,
+ ucc_geth_hardware_statistics_t *hardware_statistics)
+{
+ ucc_fast_t *uf_regs;
+ ucc_geth_t *ug_regs;
+ ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
+ ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
+
+ ug_regs = ugeth->ug_regs;
+ uf_regs = (ucc_fast_t *) ug_regs;
+ p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
+ p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
+
+ /* Tx firmware only if user handed pointer and driver actually
+ gathers Tx firmware statistics */
+ if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
+ tx_firmware_statistics->sicoltx =
+ in_be32(&p_tx_fw_statistics_pram->sicoltx);
+ tx_firmware_statistics->mulcoltx =
+ in_be32(&p_tx_fw_statistics_pram->mulcoltx);
+ tx_firmware_statistics->latecoltxfr =
+ in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
+ tx_firmware_statistics->frabortduecol =
+ in_be32(&p_tx_fw_statistics_pram->frabortduecol);
+ tx_firmware_statistics->frlostinmactxer =
+ in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
+ tx_firmware_statistics->carriersenseertx =
+ in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
+ tx_firmware_statistics->frtxok =
+ in_be32(&p_tx_fw_statistics_pram->frtxok);
+ tx_firmware_statistics->txfrexcessivedefer =
+ in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
+ tx_firmware_statistics->txpkts256 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts256);
+ tx_firmware_statistics->txpkts512 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts512);
+ tx_firmware_statistics->txpkts1024 =
+ in_be32(&p_tx_fw_statistics_pram->txpkts1024);
+ tx_firmware_statistics->txpktsjumbo =
+ in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
+ }
+
+ /* Rx firmware only if user handed pointer and driver actually
+ * gathers Rx firmware statistics */
+ if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
+ int i;
+ rx_firmware_statistics->frrxfcser =
+ in_be32(&p_rx_fw_statistics_pram->frrxfcser);
+ rx_firmware_statistics->fraligner =
+ in_be32(&p_rx_fw_statistics_pram->fraligner);
+ rx_firmware_statistics->inrangelenrxer =
+ in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
+ rx_firmware_statistics->outrangelenrxer =
+ in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
+ rx_firmware_statistics->frtoolong =
+ in_be32(&p_rx_fw_statistics_pram->frtoolong);
+ rx_firmware_statistics->runt =
+ in_be32(&p_rx_fw_statistics_pram->runt);
+ rx_firmware_statistics->verylongevent =
+ in_be32(&p_rx_fw_statistics_pram->verylongevent);
+ rx_firmware_statistics->symbolerror =
+ in_be32(&p_rx_fw_statistics_pram->symbolerror);
+ rx_firmware_statistics->dropbsy =
+ in_be32(&p_rx_fw_statistics_pram->dropbsy);
+ for (i = 0; i < 0x8; i++)
+ rx_firmware_statistics->res0[i] =
+ p_rx_fw_statistics_pram->res0[i];
+ rx_firmware_statistics->mismatchdrop =
+ in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
+ rx_firmware_statistics->underpkts =
+ in_be32(&p_rx_fw_statistics_pram->underpkts);
+ rx_firmware_statistics->pkts256 =
+ in_be32(&p_rx_fw_statistics_pram->pkts256);
+ rx_firmware_statistics->pkts512 =
+ in_be32(&p_rx_fw_statistics_pram->pkts512);
+ rx_firmware_statistics->pkts1024 =
+ in_be32(&p_rx_fw_statistics_pram->pkts1024);
+ rx_firmware_statistics->pktsjumbo =
+ in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
+ rx_firmware_statistics->frlossinmacer =
+ in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
+ rx_firmware_statistics->pausefr =
+ in_be32(&p_rx_fw_statistics_pram->pausefr);
+ for (i = 0; i < 0x4; i++)
+ rx_firmware_statistics->res1[i] =
+ p_rx_fw_statistics_pram->res1[i];
+ rx_firmware_statistics->removevlan =
+ in_be32(&p_rx_fw_statistics_pram->removevlan);
+ rx_firmware_statistics->replacevlan =
+ in_be32(&p_rx_fw_statistics_pram->replacevlan);
+ rx_firmware_statistics->insertvlan =
+ in_be32(&p_rx_fw_statistics_pram->insertvlan);
+ }
+
+ /* Hardware only if user handed pointer and driver actually
+ gathers hardware statistics */
+ if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
+ hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
+ hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
+ hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
+ hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
+ hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
+ hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
+ hardware_statistics->txok = in_be32(&ug_regs->txok);
+ hardware_statistics->txcf = in_be16(&ug_regs->txcf);
+ hardware_statistics->tmca = in_be32(&ug_regs->tmca);
+ hardware_statistics->tbca = in_be32(&ug_regs->tbca);
+ hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
+ hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
+ hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
+ hardware_statistics->rmca = in_be32(&ug_regs->rmca);
+ hardware_statistics->rbca = in_be32(&ug_regs->rbca);
+ }
+}
+
+static void dump_bds(ucc_geth_private_t *ugeth)
+{
+ int i;
+ int length;
+
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ if (ugeth->p_tx_bd_ring[i]) {
+ length =
+ (ugeth->ug_info->bdRingLenTx[i] *
+ UCC_GETH_SIZE_OF_BD);
+ ugeth_info("TX BDs[%d]", i);
+ mem_disp(ugeth->p_tx_bd_ring[i], length);
+ }
+ }
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ length =
+ (ugeth->ug_info->bdRingLenRx[i] *
+ UCC_GETH_SIZE_OF_BD);
+ ugeth_info("RX BDs[%d]", i);
+ mem_disp(ugeth->p_rx_bd_ring[i], length);
+ }
+ }
+}
+
+static void dump_regs(ucc_geth_private_t *ugeth)
+{
+ int i;
+
+ ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
+
+ ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->maccfg1,
+ in_be32(&ugeth->ug_regs->maccfg1));
+ ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->maccfg2,
+ in_be32(&ugeth->ug_regs->maccfg2));
+ ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->ipgifg,
+ in_be32(&ugeth->ug_regs->ipgifg));
+ ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->hafdup,
+ in_be32(&ugeth->ug_regs->hafdup));
+ ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimcfg,
+ in_be32(&ugeth->ug_regs->miimng.miimcfg));
+ ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimcom,
+ in_be32(&ugeth->ug_regs->miimng.miimcom));
+ ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimadd,
+ in_be32(&ugeth->ug_regs->miimng.miimadd));
+ ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimcon,
+ in_be32(&ugeth->ug_regs->miimng.miimcon));
+ ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimstat,
+ in_be32(&ugeth->ug_regs->miimng.miimstat));
+ ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->miimng.miimind,
+ in_be32(&ugeth->ug_regs->miimng.miimind));
+ ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->ifctl,
+ in_be32(&ugeth->ug_regs->ifctl));
+ ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->ifstat,
+ in_be32(&ugeth->ug_regs->ifstat));
+ ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->macstnaddr1,
+ in_be32(&ugeth->ug_regs->macstnaddr1));
+ ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->macstnaddr2,
+ in_be32(&ugeth->ug_regs->macstnaddr2));
+ ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->uempr,
+ in_be32(&ugeth->ug_regs->uempr));
+ ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->utbipar,
+ in_be32(&ugeth->ug_regs->utbipar));
+ ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->ug_regs->uescr,
+ in_be16(&ugeth->ug_regs->uescr));
+ ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tx64,
+ in_be32(&ugeth->ug_regs->tx64));
+ ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tx127,
+ in_be32(&ugeth->ug_regs->tx127));
+ ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tx255,
+ in_be32(&ugeth->ug_regs->tx255));
+ ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rx64,
+ in_be32(&ugeth->ug_regs->rx64));
+ ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rx127,
+ in_be32(&ugeth->ug_regs->rx127));
+ ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rx255,
+ in_be32(&ugeth->ug_regs->rx255));
+ ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->txok,
+ in_be32(&ugeth->ug_regs->txok));
+ ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->ug_regs->txcf,
+ in_be16(&ugeth->ug_regs->txcf));
+ ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tmca,
+ in_be32(&ugeth->ug_regs->tmca));
+ ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->tbca,
+ in_be32(&ugeth->ug_regs->tbca));
+ ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rxfok,
+ in_be32(&ugeth->ug_regs->rxfok));
+ ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rxbok,
+ in_be32(&ugeth->ug_regs->rxbok));
+ ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rbyt,
+ in_be32(&ugeth->ug_regs->rbyt));
+ ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rmca,
+ in_be32(&ugeth->ug_regs->rmca));
+ ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->rbca,
+ in_be32(&ugeth->ug_regs->rbca));
+ ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->scar,
+ in_be32(&ugeth->ug_regs->scar));
+ ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->ug_regs->scam,
+ in_be32(&ugeth->ug_regs->scam));
+
+ if (ugeth->p_thread_data_tx) {
+ int numThreadsTxNumerical;
+ switch (ugeth->ug_info->numThreadsTx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsTxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsTxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsTxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsTxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsTxNumerical = 8;
+ break;
+ default:
+ numThreadsTxNumerical = 0;
+ break;
+ }
+
+ ugeth_info("Thread data TXs:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_thread_data_tx);
+ for (i = 0; i < numThreadsTxNumerical; i++) {
+ ugeth_info("Thread data TX[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_thread_data_tx[i]);
+ mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
+ sizeof(ucc_geth_thread_data_tx_t));
+ }
+ }
+ if (ugeth->p_thread_data_rx) {
+ int numThreadsRxNumerical;
+ switch (ugeth->ug_info->numThreadsRx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsRxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsRxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsRxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsRxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsRxNumerical = 8;
+ break;
+ default:
+ numThreadsRxNumerical = 0;
+ break;
+ }
+
+ ugeth_info("Thread data RX:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_thread_data_rx);
+ for (i = 0; i < numThreadsRxNumerical; i++) {
+ ugeth_info("Thread data RX[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_thread_data_rx[i]);
+ mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
+ sizeof(ucc_geth_thread_data_rx_t));
+ }
+ }
+ if (ugeth->p_exf_glbl_param) {
+ ugeth_info("EXF global param:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_exf_glbl_param);
+ mem_disp((u8 *) ugeth->p_exf_glbl_param,
+ sizeof(*ugeth->p_exf_glbl_param));
+ }
+ if (ugeth->p_tx_glbl_pram) {
+ ugeth_info("TX global param:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
+ ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_tx_glbl_pram->temoder,
+ in_be16(&ugeth->p_tx_glbl_pram->temoder));
+ ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->sqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->sqptr));
+ ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ in_be32(&ugeth->p_tx_glbl_pram->
+ schedulerbasepointer));
+ ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
+ ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->tstate,
+ in_be32(&ugeth->p_tx_glbl_pram->tstate));
+ ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
+ ugeth->p_tx_glbl_pram->iphoffset[0]);
+ ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
+ ugeth->p_tx_glbl_pram->iphoffset[1]);
+ ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
+ ugeth->p_tx_glbl_pram->iphoffset[2]);
+ ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
+ ugeth->p_tx_glbl_pram->iphoffset[3]);
+ ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
+ ugeth->p_tx_glbl_pram->iphoffset[4]);
+ ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
+ ugeth->p_tx_glbl_pram->iphoffset[5]);
+ ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
+ ugeth->p_tx_glbl_pram->iphoffset[6]);
+ ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
+ ugeth->p_tx_glbl_pram->iphoffset[7]);
+ ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
+ ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
+ ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
+ ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
+ ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
+ ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
+ ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
+ ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
+ in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
+ ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_tx_glbl_pram->tqptr,
+ in_be32(&ugeth->p_tx_glbl_pram->tqptr));
+ }
+ if (ugeth->p_rx_glbl_pram) {
+ ugeth_info("RX global param:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
+ ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->remoder,
+ in_be32(&ugeth->p_rx_glbl_pram->remoder));
+ ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->rqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rqptr));
+ ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->typeorlen,
+ in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
+ ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
+ ugeth->p_rx_glbl_pram->rxgstpack);
+ ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
+ ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
+ in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
+ ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
+ (u32) & ugeth->p_rx_glbl_pram->rstate,
+ ugeth->p_rx_glbl_pram->rstate);
+ ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->mrblr,
+ in_be16(&ugeth->p_rx_glbl_pram->mrblr));
+ ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
+ in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
+ ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->mflr,
+ in_be16(&ugeth->p_rx_glbl_pram->mflr));
+ ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->minflr,
+ in_be16(&ugeth->p_rx_glbl_pram->minflr));
+ ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->maxd1,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd1));
+ ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->maxd2,
+ in_be16(&ugeth->p_rx_glbl_pram->maxd2));
+ ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->ecamptr,
+ in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
+ ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l2qt,
+ in_be32(&ugeth->p_rx_glbl_pram->l2qt));
+ ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
+ ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
+ ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
+ ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
+ ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
+ ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
+ ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
+ ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
+ in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
+ ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->vlantype,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantype));
+ ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
+ (u32) & ugeth->p_rx_glbl_pram->vlantci,
+ in_be16(&ugeth->p_rx_glbl_pram->vlantci));
+ for (i = 0; i < 64; i++)
+ ugeth_info
+ ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
+ i,
+ (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
+ ugeth->p_rx_glbl_pram->addressfiltering[i]);
+ ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
+ in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
+ }
+ if (ugeth->p_send_q_mem_reg) {
+ ugeth_info("Send Q memory registers:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_send_q_mem_reg);
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ ugeth_info("SQQD[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
+ mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
+ sizeof(ucc_geth_send_queue_qd_t));
+ }
+ }
+ if (ugeth->p_scheduler) {
+ ugeth_info("Scheduler:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
+ mem_disp((u8 *) ugeth->p_scheduler,
+ sizeof(*ugeth->p_scheduler));
+ }
+ if (ugeth->p_tx_fw_statistics_pram) {
+ ugeth_info("TX FW statistics pram:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_tx_fw_statistics_pram);
+ mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
+ sizeof(*ugeth->p_tx_fw_statistics_pram));
+ }
+ if (ugeth->p_rx_fw_statistics_pram) {
+ ugeth_info("RX FW statistics pram:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_rx_fw_statistics_pram);
+ mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
+ sizeof(*ugeth->p_rx_fw_statistics_pram));
+ }
+ if (ugeth->p_rx_irq_coalescing_tbl) {
+ ugeth_info("RX IRQ coalescing tables:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_rx_irq_coalescing_tbl);
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ ugeth_info("RX IRQ coalescing table entry[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i]);
+ ugeth_info
+ ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingmaxvalue,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingmaxvalue));
+ ugeth_info
+ ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].interruptcoalescingcounter,
+ in_be32(&ugeth->p_rx_irq_coalescing_tbl->
+ coalescingentry[i].
+ interruptcoalescingcounter));
+ }
+ }
+ if (ugeth->p_rx_bd_qs_tbl) {
+ ugeth_info("RX BD QS tables:");
+ ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ ugeth_info("RX BD QS table[%d]:", i);
+ ugeth_info("Base address: 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i]);
+ ugeth_info
+ ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
+ ugeth_info
+ ("bdptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
+ ugeth_info
+ ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].
+ externalbdbaseptr));
+ ugeth_info
+ ("externalbdptr : addr - 0x%08x, val - 0x%08x",
+ (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
+ in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
+ ugeth_info("ucode RX Prefetched BDs:");
+ ugeth_info("Base address: 0x%08x",
+ (u32)
+ qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)));
+ mem_disp((u8 *)
+ qe_muram_addr(in_be32
+ (&ugeth->p_rx_bd_qs_tbl[i].
+ bdbaseptr)),
+ sizeof(ucc_geth_rx_prefetched_bds_t));
+ }
+ }
+ if (ugeth->p_init_enet_param_shadow) {
+ int size;
+ ugeth_info("Init enet param shadow:");
+ ugeth_info("Base address: 0x%08x",
+ (u32) ugeth->p_init_enet_param_shadow);
+ mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
+ sizeof(*ugeth->p_init_enet_param_shadow));
+
+ size = sizeof(ucc_geth_thread_rx_pram_t);
+ if (ugeth->ug_info->rxExtendedFiltering) {
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
+ if (ugeth->ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
+ if (ugeth->ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
+ }
+
+ dump_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_TX,
+ sizeof(ucc_geth_thread_tx_pram_t),
+ ugeth->ug_info->riscTx, 0);
+ dump_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ rxthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
+ ugeth->ug_info->riscRx, 1);
+ }
+}
+#endif /* DEBUG */
+
+static void init_default_reg_vals(volatile u32 *upsmr_register,
+ volatile u32 *maccfg1_register,
+ volatile u32 *maccfg2_register)
+{
+ out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
+ out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
+ out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
+}
+
+static int init_half_duplex_params(int alt_beb,
+ int back_pressure_no_backoff,
+ int no_backoff,
+ int excess_defer,
+ u8 alt_beb_truncation,
+ u8 max_retransmissions,
+ u8 collision_window,
+ volatile u32 *hafdup_register)
+{
+ u32 value = 0;
+
+ if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
+ (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
+ (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
+ return -EINVAL;
+
+ value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
+
+ if (alt_beb)
+ value |= HALFDUP_ALT_BEB;
+ if (back_pressure_no_backoff)
+ value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
+ if (no_backoff)
+ value |= HALFDUP_NO_BACKOFF;
+ if (excess_defer)
+ value |= HALFDUP_EXCESSIVE_DEFER;
+
+ value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
+
+ value |= collision_window;
+
+ out_be32(hafdup_register, value);
+ return 0;
+}
+
+static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
+ u8 non_btb_ipg,
+ u8 min_ifg,
+ u8 btb_ipg,
+ volatile u32 *ipgifg_register)
+{
+ u32 value = 0;
+
+ /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
+ IPG part 2 */
+ if (non_btb_cs_ipg > non_btb_ipg)
+ return -EINVAL;
+
+ if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
+ (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
+ /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
+ (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
+ return -EINVAL;
+
+ value |=
+ ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
+ IPGIFG_NBTB_CS_IPG_MASK);
+ value |=
+ ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
+ IPGIFG_NBTB_IPG_MASK);
+ value |=
+ ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
+ IPGIFG_MIN_IFG_MASK);
+ value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
+
+ out_be32(ipgifg_register, value);
+ return 0;
+}
+
+static int init_flow_control_params(u32 automatic_flow_control_mode,
+ int rx_flow_control_enable,
+ int tx_flow_control_enable,
+ u16 pause_period,
+ u16 extension_field,
+ volatile u32 *upsmr_register,
+ volatile u32 *uempr_register,
+ volatile u32 *maccfg1_register)
+{
+ u32 value = 0;
+
+ /* Set UEMPR register */
+ value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
+ value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
+ out_be32(uempr_register, value);
+
+ /* Set UPSMR register */
+ value = in_be32(upsmr_register);
+ value |= automatic_flow_control_mode;
+ out_be32(upsmr_register, value);
+
+ value = in_be32(maccfg1_register);
+ if (rx_flow_control_enable)
+ value |= MACCFG1_FLOW_RX;
+ if (tx_flow_control_enable)
+ value |= MACCFG1_FLOW_TX;
+ out_be32(maccfg1_register, value);
+
+ return 0;
+}
+
+static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
+ int auto_zero_hardware_statistics,
+ volatile u32 *upsmr_register,
+ volatile u16 *uescr_register)
+{
+ u32 upsmr_value = 0;
+ u16 uescr_value = 0;
+ /* Enable hardware statistics gathering if requested */
+ if (enable_hardware_statistics) {
+ upsmr_value = in_be32(upsmr_register);
+ upsmr_value |= UPSMR_HSE;
+ out_be32(upsmr_register, upsmr_value);
+ }
+
+ /* Clear hardware statistics counters */
+ uescr_value = in_be16(uescr_register);
+ uescr_value |= UESCR_CLRCNT;
+ /* Automatically zero hardware statistics counters on read,
+ if requested */
+ if (auto_zero_hardware_statistics)
+ uescr_value |= UESCR_AUTOZ;
+ out_be16(uescr_register, uescr_value);
+
+ return 0;
+}
+
+static int init_firmware_statistics_gathering_mode(int
+ enable_tx_firmware_statistics,
+ int enable_rx_firmware_statistics,
+ volatile u32 *tx_rmon_base_ptr,
+ u32 tx_firmware_statistics_structure_address,
+ volatile u32 *rx_rmon_base_ptr,
+ u32 rx_firmware_statistics_structure_address,
+ volatile u16 *temoder_register,
+ volatile u32 *remoder_register)
+{
+ /* Note: this function does not check if */
+ /* the parameters it receives are NULL */
+ u16 temoder_value;
+ u32 remoder_value;
+
+ if (enable_tx_firmware_statistics) {
+ out_be32(tx_rmon_base_ptr,
+ tx_firmware_statistics_structure_address);
+ temoder_value = in_be16(temoder_register);
+ temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
+ out_be16(temoder_register, temoder_value);
+ }
+
+ if (enable_rx_firmware_statistics) {
+ out_be32(rx_rmon_base_ptr,
+ rx_firmware_statistics_structure_address);
+ remoder_value = in_be32(remoder_register);
+ remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
+ out_be32(remoder_register, remoder_value);
+ }
+
+ return 0;
+}
+
+static int init_mac_station_addr_regs(u8 address_byte_0,
+ u8 address_byte_1,
+ u8 address_byte_2,
+ u8 address_byte_3,
+ u8 address_byte_4,
+ u8 address_byte_5,
+ volatile u32 *macstnaddr1_register,
+ volatile u32 *macstnaddr2_register)
+{
+ u32 value = 0;
+
+ /* Example: for a station address of 0x12345678ABCD, */
+ /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
+
+ /* MACSTNADDR1 Register: */
+
+ /* 0 7 8 15 */
+ /* station address byte 5 station address byte 4 */
+ /* 16 23 24 31 */
+ /* station address byte 3 station address byte 2 */
+ value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
+ value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
+ value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
+ value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
+
+ out_be32(macstnaddr1_register, value);
+
+ /* MACSTNADDR2 Register: */
+
+ /* 0 7 8 15 */
+ /* station address byte 1 station address byte 0 */
+ /* 16 23 24 31 */
+ /* reserved reserved */
+ value = 0;
+ value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
+ value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
+
+ out_be32(macstnaddr2_register, value);
+
+ return 0;
+}
+
+static int init_mac_duplex_mode(int full_duplex,
+ int limited_to_full_duplex,
+ volatile u32 *maccfg2_register)
+{
+ u32 value = 0;
+
+ /* some interfaces must work in full duplex mode */
+ if ((full_duplex == 0) && (limited_to_full_duplex == 1))
+ return -EINVAL;
+
+ value = in_be32(maccfg2_register);
+
+ if (full_duplex)
+ value |= MACCFG2_FDX;
+ else
+ value &= ~MACCFG2_FDX;
+
+ out_be32(maccfg2_register, value);
+ return 0;
+}
+
+static int init_check_frame_length_mode(int length_check,
+ volatile u32 *maccfg2_register)
+{
+ u32 value = 0;
+
+ value = in_be32(maccfg2_register);
+
+ if (length_check)
+ value |= MACCFG2_LC;
+ else
+ value &= ~MACCFG2_LC;
+
+ out_be32(maccfg2_register, value);
+ return 0;
+}
+
+static int init_preamble_length(u8 preamble_length,
+ volatile u32 *maccfg2_register)
+{
+ u32 value = 0;
+
+ if ((preamble_length < 3) || (preamble_length > 7))
+ return -EINVAL;
+
+ value = in_be32(maccfg2_register);
+ value &= ~MACCFG2_PREL_MASK;
+ value |= (preamble_length << MACCFG2_PREL_SHIFT);
+ out_be32(maccfg2_register, value);
+ return 0;
+}
+
+static int init_mii_management_configuration(int reset_mgmt,
+ int preamble_supress,
+ volatile u32 *miimcfg_register,
+ volatile u32 *miimind_register)
+{
+ unsigned int timeout = PHY_INIT_TIMEOUT;
+ u32 value = 0;
+
+ value = in_be32(miimcfg_register);
+ if (reset_mgmt) {
+ value |= MIIMCFG_RESET_MANAGEMENT;
+ out_be32(miimcfg_register, value);
+ }
+
+ value = 0;
+
+ if (preamble_supress)
+ value |= MIIMCFG_NO_PREAMBLE;
+
+ value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT;
+ out_be32(miimcfg_register, value);
+
+ /* Wait until the bus is free */
+ while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--)
+ cpu_relax();
+
+ if (timeout <= 0) {
+ ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int init_rx_parameters(int reject_broadcast,
+ int receive_short_frames,
+ int promiscuous, volatile u32 *upsmr_register)
+{
+ u32 value = 0;
+
+ value = in_be32(upsmr_register);
+
+ if (reject_broadcast)
+ value |= UPSMR_BRO;
+ else
+ value &= ~UPSMR_BRO;
+
+ if (receive_short_frames)
+ value |= UPSMR_RSH;
+ else
+ value &= ~UPSMR_RSH;
+
+ if (promiscuous)
+ value |= UPSMR_PRO;
+ else
+ value &= ~UPSMR_PRO;
+
+ out_be32(upsmr_register, value);
+
+ return 0;
+}
+
+static int init_max_rx_buff_len(u16 max_rx_buf_len,
+ volatile u16 *mrblr_register)
+{
+ /* max_rx_buf_len value must be a multiple of 128 */
+ if ((max_rx_buf_len == 0)
+ || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
+ return -EINVAL;
+
+ out_be16(mrblr_register, max_rx_buf_len);
+ return 0;
+}
+
+static int init_min_frame_len(u16 min_frame_length,
+ volatile u16 *minflr_register,
+ volatile u16 *mrblr_register)
+{
+ u16 mrblr_value = 0;
+
+ mrblr_value = in_be16(mrblr_register);
+ if (min_frame_length >= (mrblr_value - 4))
+ return -EINVAL;
+
+ out_be16(minflr_register, min_frame_length);
+ return 0;
+}
+
+static int adjust_enet_interface(ucc_geth_private_t *ugeth)
+{
+ ucc_geth_info_t *ug_info;
+ ucc_geth_t *ug_regs;
+ ucc_fast_t *uf_regs;
+ enet_speed_e speed;
+ int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm =
+ 0, limited_to_full_duplex = 0;
+ u32 upsmr, maccfg2, utbipar, tbiBaseAddress;
+ u16 value;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ug_info = ugeth->ug_info;
+ ug_regs = ugeth->ug_regs;
+ uf_regs = ugeth->uccf->uf_regs;
+
+ /* Analyze enet_interface according to Interface Mode Configuration
+ table */
+ ret_val =
+ get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm,
+ &rpm, &tbi, &limited_to_full_duplex);
+ if (ret_val != 0) {
+ ugeth_err
+ ("%s: half duplex not supported in requested configuration.",
+ __FUNCTION__);
+ return ret_val;
+ }
+
+ /* Set MACCFG2 */
+ maccfg2 = in_be32(&ug_regs->maccfg2);
+ maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
+ if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT))
+ maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
+ else if (speed == ENET_SPEED_1000BT)
+ maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
+ maccfg2 |= ug_info->padAndCrc;
+ out_be32(&ug_regs->maccfg2, maccfg2);
+
+ /* Set UPSMR */
+ upsmr = in_be32(&uf_regs->upsmr);
+ upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
+ if (rpm)
+ upsmr |= UPSMR_RPM;
+ if (r10m)
+ upsmr |= UPSMR_R10M;
+ if (tbi)
+ upsmr |= UPSMR_TBIM;
+ if (rmm)
+ upsmr |= UPSMR_RMM;
+ out_be32(&uf_regs->upsmr, upsmr);
+
+ /* Set UTBIPAR */
+ utbipar = in_be32(&ug_regs->utbipar);
+ utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
+ if (tbi)
+ utbipar |=
+ (ug_info->phy_address +
+ ugeth->ug_info->uf_info.
+ ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
+ else
+ utbipar |=
+ (0x10 +
+ ugeth->ug_info->uf_info.
+ ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
+ out_be32(&ug_regs->utbipar, utbipar);
+
+ /* Disable autonegotiation in tbi mode, because by default it
+ comes up in autonegotiation mode. */
+ /* Note that this depends on proper setting in utbipar register. */
+ if (tbi) {
+ tbiBaseAddress = in_be32(&ug_regs->utbipar);
+ tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
+ tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
+ value =
+ ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress,
+ ENET_TBI_MII_CR);
+ value &= ~0x1000; /* Turn off autonegotiation */
+ ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress,
+ ENET_TBI_MII_CR, value);
+ }
+
+ ret_val = init_mac_duplex_mode(1,
+ limited_to_full_duplex,
+ &ug_regs->maccfg2);
+ if (ret_val != 0) {
+ ugeth_err
+ ("%s: half duplex not supported in requested configuration.",
+ __FUNCTION__);
+ return ret_val;
+ }
+
+ init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
+
+ ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
+ if (ret_val != 0) {
+ ugeth_err
+ ("%s: Preamble length must be between 3 and 7 inclusive.",
+ __FUNCTION__);
+ return ret_val;
+ }
+
+ return 0;
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state. The PHY code conveys this
+ * information through variables in the ugeth structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void adjust_link(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ ucc_geth_t *ug_regs;
+ u32 tempval;
+ struct ugeth_mii_info *mii_info = ugeth->mii_info;
+
+ ug_regs = ugeth->ug_regs;
+
+ if (mii_info->link) {
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (mii_info->duplex != ugeth->oldduplex) {
+ if (!(mii_info->duplex)) {
+ tempval = in_be32(&ug_regs->maccfg2);
+ tempval &= ~(MACCFG2_FDX);
+ out_be32(&ug_regs->maccfg2, tempval);
+
+ ugeth_info("%s: Half Duplex", dev->name);
+ } else {
+ tempval = in_be32(&ug_regs->maccfg2);
+ tempval |= MACCFG2_FDX;
+ out_be32(&ug_regs->maccfg2, tempval);
+
+ ugeth_info("%s: Full Duplex", dev->name);
+ }
+
+ ugeth->oldduplex = mii_info->duplex;
+ }
+
+ if (mii_info->speed != ugeth->oldspeed) {
+ switch (mii_info->speed) {
+ case 1000:
+#ifdef CONFIG_MPC836x
+/* FIXME: This code is for 100Mbs BUG fixing,
+remove this when it is fixed!!! */
+ if (ugeth->ug_info->enet_interface ==
+ ENET_1000_GMII)
+ /* Run the commands which initialize the PHY */
+ {
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->
+ dev, mii_info->mii_id, 0x1b);
+ tempval |= 0x000f;
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, 0x1b,
+ (u16) tempval);
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->
+ dev, mii_info->mii_id,
+ MII_BMCR);
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, MII_BMCR,
+ (u16) (tempval | BMCR_RESET));
+ } else if (ugeth->ug_info->enet_interface ==
+ ENET_1000_RGMII)
+ /* Run the commands which initialize the PHY */
+ {
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->
+ dev, mii_info->mii_id, 0x1b);
+ tempval = (tempval & ~0x000f) | 0x000b;
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, 0x1b,
+ (u16) tempval);
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->
+ dev, mii_info->mii_id,
+ MII_BMCR);
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, MII_BMCR,
+ (u16) (tempval | BMCR_RESET));
+ }
+ msleep(4000);
+#endif /* CONFIG_MPC8360 */
+ adjust_enet_interface(ugeth);
+ break;
+ case 100:
+ case 10:
+#ifdef CONFIG_MPC836x
+/* FIXME: This code is for 100Mbs BUG fixing,
+remove this lines when it will be fixed!!! */
+ ugeth->ug_info->enet_interface = ENET_100_RGMII;
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->dev,
+ mii_info->mii_id,
+ 0x1b);
+ tempval = (tempval & ~0x000f) | 0x000b;
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, 0x1b,
+ (u16) tempval);
+ tempval =
+ (u32) mii_info->mdio_read(ugeth->dev,
+ mii_info->mii_id,
+ MII_BMCR);
+ mii_info->mdio_write(ugeth->dev,
+ mii_info->mii_id, MII_BMCR,
+ (u16) (tempval |
+ BMCR_RESET));
+ msleep(4000);
+#endif /* CONFIG_MPC8360 */
+ adjust_enet_interface(ugeth);
+ break;
+ default:
+ ugeth_warn
+ ("%s: Ack! Speed (%d) is not 10/100/1000!",
+ dev->name, mii_info->speed);
+ break;
+ }
+
+ ugeth_info("%s: Speed %dBT", dev->name,
+ mii_info->speed);
+
+ ugeth->oldspeed = mii_info->speed;
+ }
+
+ if (!ugeth->oldlink) {
+ ugeth_info("%s: Link is up", dev->name);
+ ugeth->oldlink = 1;
+ netif_carrier_on(dev);
+ netif_schedule(dev);
+ }
+ } else {
+ if (ugeth->oldlink) {
+ ugeth_info("%s: Link is down", dev->name);
+ ugeth->oldlink = 0;
+ ugeth->oldspeed = 0;
+ ugeth->oldduplex = -1;
+ netif_carrier_off(dev);
+ }
+ }
+}
+
+/* Configure the PHY for dev.
+ * returns 0 if success. -1 if failure
+ */
+static int init_phy(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ struct phy_info *curphy;
+ ucc_mii_mng_t *mii_regs;
+ struct ugeth_mii_info *mii_info;
+ int err;
+
+ mii_regs = &ugeth->ug_regs->miimng;
+
+ ugeth->oldlink = 0;
+ ugeth->oldspeed = 0;
+ ugeth->oldduplex = -1;
+
+ mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL);
+
+ if (NULL == mii_info) {
+ ugeth_err("%s: Could not allocate mii_info", dev->name);
+ return -ENOMEM;
+ }
+
+ mii_info->mii_regs = mii_regs;
+ mii_info->speed = SPEED_1000;
+ mii_info->duplex = DUPLEX_FULL;
+ mii_info->pause = 0;
+ mii_info->link = 0;
+
+ mii_info->advertising = (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ mii_info->autoneg = 1;
+
+ mii_info->mii_id = ugeth->ug_info->phy_address;
+
+ mii_info->dev = dev;
+
+ mii_info->mdio_read = &read_phy_reg;
+ mii_info->mdio_write = &write_phy_reg;
+
+ ugeth->mii_info = mii_info;
+
+ spin_lock_irq(&ugeth->lock);
+
+ /* Set this UCC to be the master of the MII managment */
+ ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
+
+ if (init_mii_management_configuration(1,
+ ugeth->ug_info->
+ miiPreambleSupress,
+ &mii_regs->miimcfg,
+ &mii_regs->miimind)) {
+ ugeth_err("%s: The MII Bus is stuck!", dev->name);
+ err = -1;
+ goto bus_fail;
+ }
+
+ spin_unlock_irq(&ugeth->lock);
+
+ /* get info for this PHY */
+ curphy = get_phy_info(ugeth->mii_info);
+
+ if (curphy == NULL) {
+ ugeth_err("%s: No PHY found", dev->name);
+ err = -1;
+ goto no_phy;
+ }
+
+ mii_info->phyinfo = curphy;
+
+ /* Run the commands which initialize the PHY */
+ if (curphy->init) {
+ err = curphy->init(ugeth->mii_info);
+ if (err)
+ goto phy_init_fail;
+ }
+
+ return 0;
+
+ phy_init_fail:
+ no_phy:
+ bus_fail:
+ kfree(mii_info);
+
+ return err;
+}
+
+#ifdef CONFIG_UGETH_TX_ON_DEMOND
+static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_transmit_on_demand(ugeth->uccf);
+
+ return 0;
+}
+#endif
+
+static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_private_t *uccf;
+ u32 cecr_subblock;
+ u32 temp;
+
+ uccf = ugeth->uccf;
+
+ /* Mask GRACEFUL STOP TX interrupt bit and clear it */
+ temp = in_be32(uccf->p_uccm);
+ temp &= ~UCCE_GRA;
+ out_be32(uccf->p_uccm, temp);
+ out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
+
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
+ (u8) QE_CR_PROTOCOL_ETHERNET, 0);
+
+ /* Wait for command to complete */
+ do {
+ temp = in_be32(uccf->p_ucce);
+ } while (!(temp & UCCE_GRA));
+
+ uccf->stopped_tx = 1;
+
+ return 0;
+}
+
+static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
+{
+ ucc_fast_private_t *uccf;
+ u32 cecr_subblock;
+ u8 temp;
+
+ uccf = ugeth->uccf;
+
+ /* Clear acknowledge bit */
+ temp = ugeth->p_rx_glbl_pram->rxgstpack;
+ temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
+ ugeth->p_rx_glbl_pram->rxgstpack = temp;
+
+ /* Keep issuing command and checking acknowledge bit until
+ it is asserted, according to spec */
+ do {
+ /* Issue host command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
+ ucc_num);
+ qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
+ (u8) QE_CR_PROTOCOL_ETHERNET, 0);
+
+ temp = ugeth->p_rx_glbl_pram->rxgstpack;
+ } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
+
+ uccf->stopped_rx = 1;
+
+ return 0;
+}
+
+static int ugeth_restart_tx(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_private_t *uccf;
+ u32 cecr_subblock;
+
+ uccf = ugeth->uccf;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
+ 0);
+ uccf->stopped_tx = 0;
+
+ return 0;
+}
+
+static int ugeth_restart_rx(ucc_geth_private_t *ugeth)
+{
+ ucc_fast_private_t *uccf;
+ u32 cecr_subblock;
+
+ uccf = ugeth->uccf;
+
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
+ 0);
+ uccf->stopped_rx = 0;
+
+ return 0;
+}
+
+static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode)
+{
+ ucc_fast_private_t *uccf;
+ int enabled_tx, enabled_rx;
+
+ uccf = ugeth->uccf;
+
+ /* check if the UCC number is in range. */
+ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ enabled_tx = uccf->enabled_tx;
+ enabled_rx = uccf->enabled_rx;
+
+ /* Get Tx and Rx going again, in case this channel was actively
+ disabled. */
+ if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
+ ugeth_restart_tx(ugeth);
+ if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
+ ugeth_restart_rx(ugeth);
+
+ ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
+
+ return 0;
+
+}
+
+static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode)
+{
+ ucc_fast_private_t *uccf;
+
+ uccf = ugeth->uccf;
+
+ /* check if the UCC number is in range. */
+ if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
+ ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Stop any transmissions */
+ if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
+ ugeth_graceful_stop_tx(ugeth);
+
+ /* Stop any receptions */
+ if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
+ ugeth_graceful_stop_rx(ugeth);
+
+ ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
+
+ return 0;
+}
+
+static void ugeth_dump_regs(ucc_geth_private_t *ugeth)
+{
+#ifdef DEBUG
+ ucc_fast_dump_regs(ugeth->uccf);
+ dump_regs(ugeth);
+ dump_bds(ugeth);
+#endif
+}
+
+#ifdef CONFIG_UGETH_FILTERING
+static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t *
+ p_UccGethTadParams,
+ qe_fltr_tad_t *qe_fltr_tad)
+{
+ u16 temp;
+
+ /* Zero serialized TAD */
+ memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
+
+ qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
+ if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
+ (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
+ || (p_UccGethTadParams->vnontag_op !=
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
+ )
+ qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
+ if (p_UccGethTadParams->reject_frame)
+ qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
+ temp =
+ (u16) (((u16) p_UccGethTadParams->
+ vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
+ qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
+
+ qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
+ if (p_UccGethTadParams->vnontag_op ==
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
+ qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
+ qe_fltr_tad->serialized[1] |=
+ p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
+
+ qe_fltr_tad->serialized[2] |=
+ p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
+ /* upper bits */
+ qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
+ /* lower bits */
+ qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
+
+ return 0;
+}
+
+static enet_addr_container_t
+ *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr)
+{
+ enet_addr_container_t *enet_addr_cont;
+ struct list_head *p_lh;
+ u16 i, num;
+ int32_t j;
+ u8 *p_counter;
+
+ if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
+ p_lh = &ugeth->group_hash_q;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else {
+ p_lh = &ugeth->ind_hash_q;
+ p_counter = &(ugeth->numIndAddrInHash);
+ }
+
+ if (!p_lh)
+ return NULL;
+
+ num = *p_counter;
+
+ for (i = 0; i < num; i++) {
+ enet_addr_cont =
+ (enet_addr_container_t *)
+ ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
+ for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
+ if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
+ break;
+ if (j == 0)
+ return enet_addr_cont; /* Found */
+ }
+ enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
+ }
+ return NULL;
+}
+
+static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr)
+{
+ ucc_geth_enet_address_recognition_location_e location;
+ enet_addr_container_t *enet_addr_cont;
+ struct list_head *p_lh;
+ u8 i;
+ u32 limit;
+ u8 *p_counter;
+
+ if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
+ p_lh = &ugeth->group_hash_q;
+ limit = ugeth->ug_info->maxGroupAddrInHash;
+ location =
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else {
+ p_lh = &ugeth->ind_hash_q;
+ limit = ugeth->ug_info->maxIndAddrInHash;
+ location =
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
+ p_counter = &(ugeth->numIndAddrInHash);
+ }
+
+ if ((enet_addr_cont =
+ ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
+ list_add(p_lh, &enet_addr_cont->node); /* Put it back */
+ return 0;
+ }
+ if ((!p_lh) || (!(*p_counter < limit)))
+ return -EBUSY;
+ if (!(enet_addr_cont = get_enet_addr_container()))
+ return -ENOMEM;
+ for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
+ (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
+ enet_addr_cont->location = location;
+ enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
+ ++(*p_counter);
+
+ hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
+
+ return 0;
+}
+
+static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ enet_addr_container_t *enet_addr_cont;
+ ucc_fast_private_t *uccf;
+ comm_dir_e comm_dir;
+ u16 i, num;
+ struct list_head *p_lh;
+ u32 *addr_h, *addr_l;
+ u8 *p_counter;
+
+ uccf = ugeth->uccf;
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ if (!
+ (enet_addr_cont =
+ ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
+ return -ENOENT;
+
+ /* It's been found and removed from the CQ. */
+ /* Now destroy its container */
+ put_enet_addr_container(enet_addr_cont);
+
+ if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
+ addr_h = &(p_82xx_addr_filt->gaddr_h);
+ addr_l = &(p_82xx_addr_filt->gaddr_l);
+ p_lh = &ugeth->group_hash_q;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else {
+ addr_h = &(p_82xx_addr_filt->iaddr_h);
+ addr_l = &(p_82xx_addr_filt->iaddr_l);
+ p_lh = &ugeth->ind_hash_q;
+ p_counter = &(ugeth->numIndAddrInHash);
+ }
+
+ comm_dir = 0;
+ if (uccf->enabled_tx)
+ comm_dir |= COMM_DIR_TX;
+ if (uccf->enabled_rx)
+ comm_dir |= COMM_DIR_RX;
+ if (comm_dir)
+ ugeth_disable(ugeth, comm_dir);
+
+ /* Clear the hash table. */
+ out_be32(addr_h, 0x00000000);
+ out_be32(addr_l, 0x00000000);
+
+ /* Add all remaining CQ elements back into hash */
+ num = --(*p_counter);
+ for (i = 0; i < num; i++) {
+ enet_addr_cont =
+ (enet_addr_container_t *)
+ ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
+ hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
+ enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
+ }
+
+ if (comm_dir)
+ ugeth_enable(ugeth, comm_dir);
+
+ return 0;
+}
+#endif /* CONFIG_UGETH_FILTERING */
+
+static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
+ ugeth,
+ enet_addr_type_e
+ enet_addr_type)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ ucc_fast_private_t *uccf;
+ comm_dir_e comm_dir;
+ struct list_head *p_lh;
+ u16 i, num;
+ u32 *addr_h, *addr_l;
+ u8 *p_counter;
+
+ uccf = ugeth->uccf;
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
+ addressfiltering;
+
+ if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
+ addr_h = &(p_82xx_addr_filt->gaddr_h);
+ addr_l = &(p_82xx_addr_filt->gaddr_l);
+ p_lh = &ugeth->group_hash_q;
+ p_counter = &(ugeth->numGroupAddrInHash);
+ } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
+ addr_h = &(p_82xx_addr_filt->iaddr_h);
+ addr_l = &(p_82xx_addr_filt->iaddr_l);
+ p_lh = &ugeth->ind_hash_q;
+ p_counter = &(ugeth->numIndAddrInHash);
+ } else
+ return -EINVAL;
+
+ comm_dir = 0;
+ if (uccf->enabled_tx)
+ comm_dir |= COMM_DIR_TX;
+ if (uccf->enabled_rx)
+ comm_dir |= COMM_DIR_RX;
+ if (comm_dir)
+ ugeth_disable(ugeth, comm_dir);
+
+ /* Clear the hash table. */
+ out_be32(addr_h, 0x00000000);
+ out_be32(addr_l, 0x00000000);
+
+ if (!p_lh)
+ return 0;
+
+ num = *p_counter;
+
+ /* Delete all remaining CQ elements */
+ for (i = 0; i < num; i++)
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
+
+ *p_counter = 0;
+
+ if (comm_dir)
+ ugeth_enable(ugeth, comm_dir);
+
+ return 0;
+}
+
+#ifdef CONFIG_UGETH_FILTERING
+static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth,
+ enet_addr_t *p_enet_addr,
+ u8 paddr_num)
+{
+ int i;
+
+ if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
+ ugeth_warn
+ ("%s: multicast address added to paddr will have no "
+ "effect - is this what you wanted?",
+ __FUNCTION__);
+
+ ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
+ /* store address in our database */
+ for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
+ ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
+ /* put in hardware */
+ return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
+}
+#endif /* CONFIG_UGETH_FILTERING */
+
+static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth,
+ u8 paddr_num)
+{
+ ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
+ return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
+}
+
+static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
+{
+ u16 i, j;
+ u8 *bd;
+
+ if (!ugeth)
+ return;
+
+ if (ugeth->uccf)
+ ucc_fast_free(ugeth->uccf);
+
+ if (ugeth->p_thread_data_tx) {
+ qe_muram_free(ugeth->thread_dat_tx_offset);
+ ugeth->p_thread_data_tx = NULL;
+ }
+ if (ugeth->p_thread_data_rx) {
+ qe_muram_free(ugeth->thread_dat_rx_offset);
+ ugeth->p_thread_data_rx = NULL;
+ }
+ if (ugeth->p_exf_glbl_param) {
+ qe_muram_free(ugeth->exf_glbl_param_offset);
+ ugeth->p_exf_glbl_param = NULL;
+ }
+ if (ugeth->p_rx_glbl_pram) {
+ qe_muram_free(ugeth->rx_glbl_pram_offset);
+ ugeth->p_rx_glbl_pram = NULL;
+ }
+ if (ugeth->p_tx_glbl_pram) {
+ qe_muram_free(ugeth->tx_glbl_pram_offset);
+ ugeth->p_tx_glbl_pram = NULL;
+ }
+ if (ugeth->p_send_q_mem_reg) {
+ qe_muram_free(ugeth->send_q_mem_reg_offset);
+ ugeth->p_send_q_mem_reg = NULL;
+ }
+ if (ugeth->p_scheduler) {
+ qe_muram_free(ugeth->scheduler_offset);
+ ugeth->p_scheduler = NULL;
+ }
+ if (ugeth->p_tx_fw_statistics_pram) {
+ qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
+ ugeth->p_tx_fw_statistics_pram = NULL;
+ }
+ if (ugeth->p_rx_fw_statistics_pram) {
+ qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
+ ugeth->p_rx_fw_statistics_pram = NULL;
+ }
+ if (ugeth->p_rx_irq_coalescing_tbl) {
+ qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
+ ugeth->p_rx_irq_coalescing_tbl = NULL;
+ }
+ if (ugeth->p_rx_bd_qs_tbl) {
+ qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
+ ugeth->p_rx_bd_qs_tbl = NULL;
+ }
+ if (ugeth->p_init_enet_param_shadow) {
+ return_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ rxthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_RX,
+ ugeth->ug_info->riscRx, 1);
+ return_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]),
+ ENET_INIT_PARAM_MAX_ENTRIES_TX,
+ ugeth->ug_info->riscTx, 0);
+ kfree(ugeth->p_init_enet_param_shadow);
+ ugeth->p_init_enet_param_shadow = NULL;
+ }
+ for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ bd = ugeth->p_tx_bd_ring[i];
+ for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
+ if (ugeth->tx_skbuff[i][j]) {
+ dma_unmap_single(NULL,
+ BD_BUFFER_ARG(bd),
+ (BD_STATUS_AND_LENGTH(bd) &
+ BD_LENGTH_MASK),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
+ ugeth->tx_skbuff[i][j] = NULL;
+ }
+ }
+
+ kfree(ugeth->tx_skbuff[i]);
+
+ if (ugeth->p_tx_bd_ring[i]) {
+ if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_SYSTEM)
+ kfree((void *)ugeth->tx_bd_ring_offset[i]);
+ else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM)
+ qe_muram_free(ugeth->tx_bd_ring_offset[i]);
+ ugeth->p_tx_bd_ring[i] = NULL;
+ }
+ }
+ for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ if (ugeth->p_rx_bd_ring[i]) {
+ /* Return existing data buffers in ring */
+ bd = ugeth->p_rx_bd_ring[i];
+ for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
+ if (ugeth->rx_skbuff[i][j]) {
+ dma_unmap_single(NULL, BD_BUFFER(bd),
+ ugeth->ug_info->
+ uf_info.
+ max_rx_buf_length +
+ UCC_GETH_RX_DATA_BUF_ALIGNMENT,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(ugeth->
+ rx_skbuff[i][j]);
+ ugeth->rx_skbuff[i][j] = NULL;
+ }
+ bd += UCC_GETH_SIZE_OF_BD;
+ }
+
+ kfree(ugeth->rx_skbuff[i]);
+
+ if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_SYSTEM)
+ kfree((void *)ugeth->rx_bd_ring_offset[i]);
+ else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM)
+ qe_muram_free(ugeth->rx_bd_ring_offset[i]);
+ ugeth->p_rx_bd_ring[i] = NULL;
+ }
+ }
+ while (!list_empty(&ugeth->group_hash_q))
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY
+ (dequeue(&ugeth->group_hash_q)));
+ while (!list_empty(&ugeth->ind_hash_q))
+ put_enet_addr_container(ENET_ADDR_CONT_ENTRY
+ (dequeue(&ugeth->ind_hash_q)));
+
+}
+
+static void ucc_geth_set_multi(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth;
+ struct dev_mc_list *dmi;
+ ucc_fast_t *uf_regs;
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ enet_addr_t tempaddr;
+ u8 *mcptr, *tdptr;
+ int i, j;
+
+ ugeth = netdev_priv(dev);
+
+ uf_regs = ugeth->uccf->uf_regs;
+
+ if (dev->flags & IFF_PROMISC) {
+
+ /* Log any net taps. */
+ printk("%s: Promiscuous mode enabled.\n", dev->name);
+ uf_regs->upsmr |= UPSMR_PRO;
+
+ } else {
+
+ uf_regs->upsmr &= ~UPSMR_PRO;
+
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
+ p_rx_glbl_pram->addressfiltering;
+
+ if (dev->flags & IFF_ALLMULTI) {
+ /* Catch all multicast addresses, so set the
+ * filter to all 1's.
+ */
+ out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
+ out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
+ } else {
+ /* Clear filter and add the addresses in the list.
+ */
+ out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
+ out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
+
+ dmi = dev->mc_list;
+
+ for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
+
+ /* Only support group multicast for now.
+ */
+ if (!(dmi->dmi_addr[0] & 1))
+ continue;
+
+ /* The address in dmi_addr is LSB first,
+ * and taddr is MSB first. We have to
+ * copy bytes MSB first from dmi_addr.
+ */
+ mcptr = (u8 *) dmi->dmi_addr + 5;
+ tdptr = (u8 *) & tempaddr;
+ for (j = 0; j < 6; j++)
+ *tdptr++ = *mcptr--;
+
+ /* Ask CPM to run CRC and set bit in
+ * filter mask.
+ */
+ hw_add_addr_in_hash(ugeth, &tempaddr);
+
+ }
+ }
+ }
+}
+
+static void ucc_geth_stop(ucc_geth_private_t *ugeth)
+{
+ ucc_geth_t *ug_regs = ugeth->ug_regs;
+ u32 tempval;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ /* Disable the controller */
+ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+ /* Tell the kernel the link is down */
+ ugeth->mii_info->link = 0;
+ adjust_link(ugeth->dev);
+
+ /* Mask all interrupts */
+ out_be32(ugeth->uccf->p_ucce, 0x00000000);
+
+ /* Clear all interrupts */
+ out_be32(ugeth->uccf->p_ucce, 0xffffffff);
+
+ /* Disable Rx and Tx */
+ tempval = in_be32(&ug_regs->maccfg1);
+ tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
+ out_be32(&ug_regs->maccfg1, tempval);
+
+ if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
+ /* Clear any pending interrupts */
+ mii_clear_phy_interrupt(ugeth->mii_info);
+
+ /* Disable PHY Interrupts */
+ mii_configure_phy_interrupt(ugeth->mii_info,
+ MII_INTERRUPT_DISABLED);
+ }
+
+ free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
+
+ if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
+ free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev);
+ } else {
+ del_timer_sync(&ugeth->phy_info_timer);
+ }
+
+ ucc_geth_memclean(ugeth);
+}
+
+static int ucc_geth_startup(ucc_geth_private_t *ugeth)
+{
+ ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
+ ucc_geth_init_pram_t *p_init_enet_pram;
+ ucc_fast_private_t *uccf;
+ ucc_geth_info_t *ug_info;
+ ucc_fast_info_t *uf_info;
+ ucc_fast_t *uf_regs;
+ ucc_geth_t *ug_regs;
+ int ret_val = -EINVAL;
+ u32 remoder = UCC_GETH_REMODER_INIT;
+ u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
+ u32 ifstat, i, j, size, l2qt, l3qt, length;
+ u16 temoder = UCC_GETH_TEMODER_INIT;
+ u16 test;
+ u8 function_code = 0;
+ u8 *bd, *endOfRing;
+ u8 numThreadsRxNumerical, numThreadsTxNumerical;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ug_info = ugeth->ug_info;
+ uf_info = &ug_info->uf_info;
+
+ if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
+ (uf_info->bd_mem_part == MEM_PART_MURAM))) {
+ ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Rx BD lengths */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
+ (ug_info->bdRingLenRx[i] %
+ UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
+ ugeth_err
+ ("%s: Rx BD ring length must be multiple of 4,"
+ " no smaller than 8.", __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ /* Tx BD lengths */
+ for (i = 0; i < ug_info->numQueuesTx; i++) {
+ if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
+ ugeth_err
+ ("%s: Tx BD ring length must be no smaller than 2.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ /* mrblr */
+ if ((uf_info->max_rx_buf_length == 0) ||
+ (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
+ ugeth_err
+ ("%s: max_rx_buf_length must be non-zero multiple of 128.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* num Tx queues */
+ if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
+ ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* num Rx queues */
+ if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
+ ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* l2qt */
+ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
+ if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
+ ugeth_err
+ ("%s: VLAN priority table entry must not be"
+ " larger than number of Rx queues.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ /* l3qt */
+ for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
+ if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
+ ugeth_err
+ ("%s: IP priority table entry must not be"
+ " larger than number of Rx queues.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+ }
+
+ if (ug_info->cam && !ug_info->ecamptr) {
+ ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ if ((ug_info->numStationAddresses !=
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
+ && ug_info->rxExtendedFiltering) {
+ ugeth_err("%s: Number of station addresses greater than 1 "
+ "not allowed in extended parsing mode.",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Generate uccm_mask for receive */
+ uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
+ for (i = 0; i < ug_info->numQueuesRx; i++)
+ uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
+
+ for (i = 0; i < ug_info->numQueuesTx; i++)
+ uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
+ /* Initialize the general fast UCC block. */
+ if (ucc_fast_init(uf_info, &uccf)) {
+ ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ ugeth->uccf = uccf;
+
+ switch (ug_info->numThreadsRx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsRxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsRxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsRxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsRxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsRxNumerical = 8;
+ break;
+ default:
+ ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -EINVAL;
+ break;
+ }
+
+ switch (ug_info->numThreadsTx) {
+ case UCC_GETH_NUM_OF_THREADS_1:
+ numThreadsTxNumerical = 1;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_2:
+ numThreadsTxNumerical = 2;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_4:
+ numThreadsTxNumerical = 4;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_6:
+ numThreadsTxNumerical = 6;
+ break;
+ case UCC_GETH_NUM_OF_THREADS_8:
+ numThreadsTxNumerical = 8;
+ break;
+ default:
+ ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -EINVAL;
+ break;
+ }
+
+ /* Calculate rx_extended_features */
+ ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
+ ug_info->ipAddressAlignment ||
+ (ug_info->numStationAddresses !=
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
+
+ ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
+ (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
+ || (ug_info->vlanOperationNonTagged !=
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
+
+ uf_regs = uccf->uf_regs;
+ ug_regs = (ucc_geth_t *) (uccf->uf_regs);
+ ugeth->ug_regs = ug_regs;
+
+ init_default_reg_vals(&uf_regs->upsmr,
+ &ug_regs->maccfg1, &ug_regs->maccfg2);
+
+ /* Set UPSMR */
+ /* For more details see the hardware spec. */
+ init_rx_parameters(ug_info->bro,
+ ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
+
+ /* We're going to ignore other registers for now, */
+ /* except as needed to get up and running */
+
+ /* Set MACCFG1 */
+ /* For more details see the hardware spec. */
+ init_flow_control_params(ug_info->aufc,
+ ug_info->receiveFlowControl,
+ 1,
+ ug_info->pausePeriod,
+ ug_info->extensionField,
+ &uf_regs->upsmr,
+ &ug_regs->uempr, &ug_regs->maccfg1);
+
+ maccfg1 = in_be32(&ug_regs->maccfg1);
+ maccfg1 |= MACCFG1_ENABLE_RX;
+ maccfg1 |= MACCFG1_ENABLE_TX;
+ out_be32(&ug_regs->maccfg1, maccfg1);
+
+ /* Set IPGIFG */
+ /* For more details see the hardware spec. */
+ ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
+ ug_info->nonBackToBackIfgPart2,
+ ug_info->
+ miminumInterFrameGapEnforcement,
+ ug_info->backToBackInterFrameGap,
+ &ug_regs->ipgifg);
+ if (ret_val != 0) {
+ ugeth_err("%s: IPGIFG initialization parameter too large.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return ret_val;
+ }
+
+ /* Set HAFDUP */
+ /* For more details see the hardware spec. */
+ ret_val = init_half_duplex_params(ug_info->altBeb,
+ ug_info->backPressureNoBackoff,
+ ug_info->noBackoff,
+ ug_info->excessDefer,
+ ug_info->altBebTruncation,
+ ug_info->maxRetransmission,
+ ug_info->collisionWindow,
+ &ug_regs->hafdup);
+ if (ret_val != 0) {
+ ugeth_err("%s: Half Duplex initialization parameter too large.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return ret_val;
+ }
+
+ /* Set IFSTAT */
+ /* For more details see the hardware spec. */
+ /* Read only - resets upon read */
+ ifstat = in_be32(&ug_regs->ifstat);
+
+ /* Clear UEMPR */
+ /* For more details see the hardware spec. */
+ out_be32(&ug_regs->uempr, 0);
+
+ /* Set UESCR */
+ /* For more details see the hardware spec. */
+ init_hw_statistics_gathering_mode((ug_info->statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
+ 0, &uf_regs->upsmr, &ug_regs->uescr);
+
+ /* Allocate Tx bds */
+ for (j = 0; j < ug_info->numQueuesTx; j++) {
+ /* Allocate in multiple of
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
+ according to spec */
+ length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD)
+ / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
+ * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) %
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
+ length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
+ if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
+ u32 align = 4;
+ if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
+ align = UCC_GETH_TX_BD_RING_ALIGNMENT;
+ ugeth->tx_bd_ring_offset[j] =
+ (u32) (kmalloc((u32) (length + align),
+ GFP_KERNEL));
+ if (ugeth->tx_bd_ring_offset[j] != 0)
+ ugeth->p_tx_bd_ring[j] =
+ (void*)((ugeth->tx_bd_ring_offset[j] +
+ align) & ~(align - 1));
+ } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
+ ugeth->tx_bd_ring_offset[j] =
+ qe_muram_alloc(length,
+ UCC_GETH_TX_BD_RING_ALIGNMENT);
+ if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j]))
+ ugeth->p_tx_bd_ring[j] =
+ (u8 *) qe_muram_addr(ugeth->
+ tx_bd_ring_offset[j]);
+ }
+ if (!ugeth->p_tx_bd_ring[j]) {
+ ugeth_err
+ ("%s: Can not allocate memory for Tx bd rings.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ /* Zero unused end of bd ring, according to spec */
+ memset(ugeth->p_tx_bd_ring[j] +
+ ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0,
+ length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD);
+ }
+
+ /* Allocate Rx bds */
+ for (j = 0; j < ug_info->numQueuesRx; j++) {
+ length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD;
+ if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
+ u32 align = 4;
+ if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
+ align = UCC_GETH_RX_BD_RING_ALIGNMENT;
+ ugeth->rx_bd_ring_offset[j] =
+ (u32) (kmalloc((u32) (length + align), GFP_KERNEL));
+ if (ugeth->rx_bd_ring_offset[j] != 0)
+ ugeth->p_rx_bd_ring[j] =
+ (void*)((ugeth->rx_bd_ring_offset[j] +
+ align) & ~(align - 1));
+ } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
+ ugeth->rx_bd_ring_offset[j] =
+ qe_muram_alloc(length,
+ UCC_GETH_RX_BD_RING_ALIGNMENT);
+ if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j]))
+ ugeth->p_rx_bd_ring[j] =
+ (u8 *) qe_muram_addr(ugeth->
+ rx_bd_ring_offset[j]);
+ }
+ if (!ugeth->p_rx_bd_ring[j]) {
+ ugeth_err
+ ("%s: Can not allocate memory for Rx bd rings.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ }
+
+ /* Init Tx bds */
+ for (j = 0; j < ug_info->numQueuesTx; j++) {
+ /* Setup the skbuff rings */
+ ugeth->tx_skbuff[j] =
+ (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
+ ugeth->ug_info->bdRingLenTx[j],
+ GFP_KERNEL);
+
+ if (ugeth->tx_skbuff[j] == NULL) {
+ ugeth_err("%s: Could not allocate tx_skbuff",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
+ ugeth->tx_skbuff[j][i] = NULL;
+
+ ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
+ bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
+ BD_BUFFER_CLEAR(bd);
+ BD_STATUS_AND_LENGTH_SET(bd, 0);
+ bd += UCC_GETH_SIZE_OF_BD;
+ }
+ bd -= UCC_GETH_SIZE_OF_BD;
+ BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */
+ }
+
+ /* Init Rx bds */
+ for (j = 0; j < ug_info->numQueuesRx; j++) {
+ /* Setup the skbuff rings */
+ ugeth->rx_skbuff[j] =
+ (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
+ ugeth->ug_info->bdRingLenRx[j],
+ GFP_KERNEL);
+
+ if (ugeth->rx_skbuff[j] == NULL) {
+ ugeth_err("%s: Could not allocate rx_skbuff",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
+ ugeth->rx_skbuff[j][i] = NULL;
+
+ ugeth->skb_currx[j] = 0;
+ bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
+ for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
+ BD_STATUS_AND_LENGTH_SET(bd, R_I);
+ BD_BUFFER_CLEAR(bd);
+ bd += UCC_GETH_SIZE_OF_BD;
+ }
+ bd -= UCC_GETH_SIZE_OF_BD;
+ BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */
+ }
+
+ /*
+ * Global PRAM
+ */
+ /* Tx global PRAM */
+ /* Allocate global tx parameter RAM page */
+ ugeth->tx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t),
+ UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ ugeth->p_tx_glbl_pram =
+ (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth->
+ tx_glbl_pram_offset);
+ /* Zero out p_tx_glbl_pram */
+ memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t));
+
+ /* Fill global PRAM */
+
+ /* TQPTR */
+ /* Size varies with number of Tx threads */
+ ugeth->thread_dat_tx_offset =
+ qe_muram_alloc(numThreadsTxNumerical *
+ sizeof(ucc_geth_thread_data_tx_t) +
+ 32 * (numThreadsTxNumerical == 1),
+ UCC_GETH_THREAD_DATA_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_thread_data_tx =
+ (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth->
+ thread_dat_tx_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
+
+ /* vtagtable */
+ for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
+ out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
+ ug_info->vtagtable[i]);
+
+ /* iphoffset */
+ for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
+ ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
+
+ /* SQPTR */
+ /* Size varies with number of Tx queues */
+ ugeth->send_q_mem_reg_offset =
+ qe_muram_alloc(ug_info->numQueuesTx *
+ sizeof(ucc_geth_send_queue_qd_t),
+ UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_send_q_mem_reg =
+ (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth->
+ send_q_mem_reg_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
+
+ /* Setup the table */
+ /* Assume BD rings are already established */
+ for (i = 0; i < ug_info->numQueuesTx; i++) {
+ endOfRing =
+ ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
+ 1) * UCC_GETH_SIZE_OF_BD;
+ if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) virt_to_phys(endOfRing));
+ } else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM) {
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) immrbar_virt_to_phys(ugeth->
+ p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) immrbar_virt_to_phys(endOfRing));
+ }
+ }
+
+ /* schedulerbasepointer */
+
+ if (ug_info->numQueuesTx > 1) {
+ /* scheduler exists only if more than 1 tx queue */
+ ugeth->scheduler_offset =
+ qe_muram_alloc(sizeof(ucc_geth_scheduler_t),
+ UCC_GETH_SCHEDULER_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_scheduler.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_scheduler =
+ (ucc_geth_scheduler_t *) qe_muram_addr(ugeth->
+ scheduler_offset);
+ out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
+ ugeth->scheduler_offset);
+ /* Zero out p_scheduler */
+ memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t));
+
+ /* Set values in scheduler */
+ out_be32(&ugeth->p_scheduler->mblinterval,
+ ug_info->mblinterval);
+ out_be16(&ugeth->p_scheduler->nortsrbytetime,
+ ug_info->nortsrbytetime);
+ ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
+ ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
+ ugeth->p_scheduler->txasap = ug_info->txasap;
+ ugeth->p_scheduler->extrabw = ug_info->extrabw;
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ ugeth->p_scheduler->weightfactor[i] =
+ ug_info->weightfactor[i];
+
+ /* Set pointers to cpucount registers in scheduler */
+ ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
+ ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
+ ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
+ ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
+ ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
+ ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
+ ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
+ ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
+ }
+
+ /* schedulerbasepointer */
+ /* TxRMON_PTR (statistics) */
+ if (ug_info->
+ statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
+ ugeth->tx_fw_statistics_pram_offset =
+ qe_muram_alloc(sizeof
+ (ucc_geth_tx_firmware_statistics_pram_t),
+ UCC_GETH_TX_STATISTICS_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_tx_fw_statistics_pram.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ ugeth->p_tx_fw_statistics_pram =
+ (ucc_geth_tx_firmware_statistics_pram_t *)
+ qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
+ /* Zero out p_tx_fw_statistics_pram */
+ memset(ugeth->p_tx_fw_statistics_pram,
+ 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t));
+ }
+
+ /* temoder */
+ /* Already has speed set */
+
+ if (ug_info->numQueuesTx > 1)
+ temoder |= TEMODER_SCHEDULER_ENABLE;
+ if (ug_info->ipCheckSumGenerate)
+ temoder |= TEMODER_IP_CHECKSUM_GENERATE;
+ temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
+ out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
+
+ test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
+
+ /* Function code register value to be used later */
+ function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
+ /* Required for QE */
+
+ /* function code register */
+ out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
+
+ /* Rx global PRAM */
+ /* Allocate global rx parameter RAM page */
+ ugeth->rx_glbl_pram_offset =
+ qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t),
+ UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ ugeth->p_rx_glbl_pram =
+ (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth->
+ rx_glbl_pram_offset);
+ /* Zero out p_rx_glbl_pram */
+ memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t));
+
+ /* Fill global PRAM */
+
+ /* RQPTR */
+ /* Size varies with number of Rx threads */
+ ugeth->thread_dat_rx_offset =
+ qe_muram_alloc(numThreadsRxNumerical *
+ sizeof(ucc_geth_thread_data_rx_t),
+ UCC_GETH_THREAD_DATA_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_thread_data_rx =
+ (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth->
+ thread_dat_rx_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
+
+ /* typeorlen */
+ out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
+
+ /* rxrmonbaseptr (statistics) */
+ if (ug_info->
+ statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
+ ugeth->rx_fw_statistics_pram_offset =
+ qe_muram_alloc(sizeof
+ (ucc_geth_rx_firmware_statistics_pram_t),
+ UCC_GETH_RX_STATISTICS_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_rx_fw_statistics_pram.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ ugeth->p_rx_fw_statistics_pram =
+ (ucc_geth_rx_firmware_statistics_pram_t *)
+ qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
+ /* Zero out p_rx_fw_statistics_pram */
+ memset(ugeth->p_rx_fw_statistics_pram, 0,
+ sizeof(ucc_geth_rx_firmware_statistics_pram_t));
+ }
+
+ /* intCoalescingPtr */
+
+ /* Size varies with number of Rx queues */
+ ugeth->rx_irq_coalescing_tbl_offset =
+ qe_muram_alloc(ug_info->numQueuesRx *
+ sizeof(ucc_geth_rx_interrupt_coalescing_entry_t),
+ UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_rx_irq_coalescing_tbl.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_rx_irq_coalescing_tbl =
+ (ucc_geth_rx_interrupt_coalescing_table_t *)
+ qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
+ ugeth->rx_irq_coalescing_tbl_offset);
+
+ /* Fill interrupt coalescing table */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
+ interruptcoalescingmaxvalue,
+ ug_info->interruptcoalescingmaxvalue[i]);
+ out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
+ interruptcoalescingcounter,
+ ug_info->interruptcoalescingmaxvalue[i]);
+ }
+
+ /* MRBLR */
+ init_max_rx_buff_len(uf_info->max_rx_buf_length,
+ &ugeth->p_rx_glbl_pram->mrblr);
+ /* MFLR */
+ out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
+ /* MINFLR */
+ init_min_frame_len(ug_info->minFrameLength,
+ &ugeth->p_rx_glbl_pram->minflr,
+ &ugeth->p_rx_glbl_pram->mrblr);
+ /* MAXD1 */
+ out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
+ /* MAXD2 */
+ out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
+
+ /* l2qt */
+ l2qt = 0;
+ for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
+ l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
+ out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
+
+ /* l3qt */
+ for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
+ l3qt = 0;
+ for (i = 0; i < 8; i++)
+ l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
+ out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt);
+ }
+
+ /* vlantype */
+ out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
+
+ /* vlantci */
+ out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
+
+ /* ecamptr */
+ out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
+
+ /* RBDQPTR */
+ /* Size varies with number of Rx queues */
+ ugeth->rx_bd_qs_tbl_offset =
+ qe_muram_alloc(ug_info->numQueuesRx *
+ (sizeof(ucc_geth_rx_bd_queues_entry_t) +
+ sizeof(ucc_geth_rx_prefetched_bds_t)),
+ UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_rx_bd_qs_tbl =
+ (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth->
+ rx_bd_qs_tbl_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
+ /* Zero out p_rx_bd_qs_tbl */
+ memset(ugeth->p_rx_bd_qs_tbl,
+ 0,
+ ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) +
+ sizeof(ucc_geth_rx_prefetched_bds_t)));
+
+ /* Setup the table */
+ /* Assume BD rings are already established */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
+ } else if (ugeth->ug_info->uf_info.bd_mem_part ==
+ MEM_PART_MURAM) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) immrbar_virt_to_phys(ugeth->
+ p_rx_bd_ring[i]));
+ }
+ /* rest of fields handled by QE */
+ }
+
+ /* remoder */
+ /* Already has speed set */
+
+ if (ugeth->rx_extended_features)
+ remoder |= REMODER_RX_EXTENDED_FEATURES;
+ if (ug_info->rxExtendedFiltering)
+ remoder |= REMODER_RX_EXTENDED_FILTERING;
+ if (ug_info->dynamicMaxFrameLength)
+ remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
+ if (ug_info->dynamicMinFrameLength)
+ remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
+ remoder |=
+ ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
+ remoder |=
+ ug_info->
+ vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
+ remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
+ remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
+ if (ug_info->ipCheckSumCheck)
+ remoder |= REMODER_IP_CHECKSUM_CHECK;
+ if (ug_info->ipAddressAlignment)
+ remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
+ out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
+
+ /* Note that this function must be called */
+ /* ONLY AFTER p_tx_fw_statistics_pram */
+ /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
+ init_firmware_statistics_gathering_mode((ug_info->
+ statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
+ (ug_info->statisticsMode &
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
+ &ugeth->p_tx_glbl_pram->txrmonbaseptr,
+ ugeth->tx_fw_statistics_pram_offset,
+ &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
+ ugeth->rx_fw_statistics_pram_offset,
+ &ugeth->p_tx_glbl_pram->temoder,
+ &ugeth->p_rx_glbl_pram->remoder);
+
+ /* function code register */
+ ugeth->p_rx_glbl_pram->rstate = function_code;
+
+ /* initialize extended filtering */
+ if (ug_info->rxExtendedFiltering) {
+ if (!ug_info->extendedFilteringChainPointer) {
+ ugeth_err("%s: Null Extended Filtering Chain Pointer.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for extended filtering Mode Global
+ Parameters */
+ ugeth->exf_glbl_param_offset =
+ qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t),
+ UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
+ if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for"
+ " p_exf_glbl_param.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+
+ ugeth->p_exf_glbl_param =
+ (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth->
+ exf_glbl_param_offset);
+ out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
+ ugeth->exf_glbl_param_offset);
+ out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
+ (u32) ug_info->extendedFilteringChainPointer);
+
+ } else { /* initialize 82xx style address filtering */
+
+ /* Init individual address recognition registers to disabled */
+
+ for (j = 0; j < NUM_OF_PADDRS; j++)
+ ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
+
+ /* Create CQs for hash tables */
+ if (ug_info->maxGroupAddrInHash > 0) {
+ INIT_LIST_HEAD(&ugeth->group_hash_q);
+ }
+ if (ug_info->maxIndAddrInHash > 0) {
+ INIT_LIST_HEAD(&ugeth->ind_hash_q);
+ }
+ p_82xx_addr_filt =
+ (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
+ p_rx_glbl_pram->addressfiltering;
+
+ ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
+ ENET_ADDR_TYPE_GROUP);
+ ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
+ ENET_ADDR_TYPE_INDIVIDUAL);
+ }
+
+ /*
+ * Initialize UCC at QE level
+ */
+
+ command = QE_INIT_TX_RX;
+
+ /* Allocate shadow InitEnet command parameter structure.
+ * This is needed because after the InitEnet command is executed,
+ * the structure in DPRAM is released, because DPRAM is a premium
+ * resource.
+ * This shadow structure keeps a copy of what was done so that the
+ * allocated resources can be released when the channel is freed.
+ */
+ if (!(ugeth->p_init_enet_param_shadow =
+ (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t),
+ GFP_KERNEL))) {
+ ugeth_err
+ ("%s: Can not allocate memory for"
+ " p_UccInitEnetParamShadows.", __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ /* Zero out *p_init_enet_param_shadow */
+ memset((char *)ugeth->p_init_enet_param_shadow,
+ 0, sizeof(ucc_geth_init_pram_t));
+
+ /* Fill shadow InitEnet command parameter structure */
+
+ ugeth->p_init_enet_param_shadow->resinit1 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT1;
+ ugeth->p_init_enet_param_shadow->resinit2 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT2;
+ ugeth->p_init_enet_param_shadow->resinit3 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT3;
+ ugeth->p_init_enet_param_shadow->resinit4 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT4;
+ ugeth->p_init_enet_param_shadow->resinit5 =
+ ENET_INIT_PARAM_MAGIC_RES_INIT5;
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
+
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
+ ugeth->rx_glbl_pram_offset | ug_info->riscRx;
+ if ((ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
+ && (ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ && (ug_info->largestexternallookupkeysize !=
+ QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
+ ugeth_err("%s: Invalid largest External Lookup Key Size.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -EINVAL;
+ }
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
+ ug_info->largestexternallookupkeysize;
+ size = sizeof(ucc_geth_thread_rx_pram_t);
+ if (ug_info->rxExtendedFiltering) {
+ size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
+ if (ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
+ if (ug_info->largestexternallookupkeysize ==
+ QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+ size +=
+ THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
+ }
+
+ if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
+ p_init_enet_param_shadow->rxthread[0]),
+ (u8) (numThreadsRxNumerical + 1)
+ /* Rx needs one extra for terminator */
+ , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
+ ug_info->riscRx, 1)) != 0) {
+ ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return ret_val;
+ }
+
+ ugeth->p_init_enet_param_shadow->txglobal =
+ ugeth->tx_glbl_pram_offset | ug_info->riscTx;
+ if ((ret_val =
+ fill_init_enet_entries(ugeth,
+ &(ugeth->p_init_enet_param_shadow->
+ txthread[0]), numThreadsTxNumerical,
+ sizeof(ucc_geth_thread_tx_pram_t),
+ UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
+ ug_info->riscTx, 0)) != 0) {
+ ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return ret_val;
+ }
+
+ /* Load Rx bds with buffers */
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
+ ugeth_err("%s: Can not fill Rx bds with buffers.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return ret_val;
+ }
+ }
+
+ /* Allocate InitEnet command parameter structure */
+ init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4);
+ if (IS_MURAM_ERR(init_enet_pram_offset)) {
+ ugeth_err
+ ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
+ __FUNCTION__);
+ ucc_geth_memclean(ugeth);
+ return -ENOMEM;
+ }
+ p_init_enet_pram =
+ (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset);
+
+ /* Copy shadow InitEnet command parameter structure into PRAM */
+ p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
+ p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
+ p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
+ p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
+ out_be16(&p_init_enet_pram->resinit5,
+ ugeth->p_init_enet_param_shadow->resinit5);
+ p_init_enet_pram->largestexternallookupkeysize =
+ ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
+ out_be32(&p_init_enet_pram->rgftgfrxglobal,
+ ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
+ for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
+ out_be32(&p_init_enet_pram->rxthread[i],
+ ugeth->p_init_enet_param_shadow->rxthread[i]);
+ out_be32(&p_init_enet_pram->txglobal,
+ ugeth->p_init_enet_param_shadow->txglobal);
+ for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
+ out_be32(&p_init_enet_pram->txthread[i],
+ ugeth->p_init_enet_param_shadow->txthread[i]);
+
+ /* Issue QE command */
+ cecr_subblock =
+ ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
+ qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
+ init_enet_pram_offset);
+
+ /* Free InitEnet command parameter */
+ qe_muram_free(init_enet_pram_offset);
+
+ return 0;
+}
+
+/* returns a net_device_stats structure pointer */
+static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+
+ return &(ugeth->stats);
+}
+
+/* ucc_geth_timeout gets called when a packet has not been
+ * transmitted after a set amount of time.
+ * For now, assume that clearing out all the structures, and
+ * starting over will fix the problem. */
+static void ucc_geth_timeout(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ugeth->stats.tx_errors++;
+
+ ugeth_dump_regs(ugeth);
+
+ if (dev->flags & IFF_UP) {
+ ucc_geth_stop(ugeth);
+ ucc_geth_startup(ugeth);
+ }
+
+ netif_schedule(dev);
+}
+
+/* This is called by the kernel when a frame is ready for transmission. */
+/* It is pointed to by the dev->hard_start_xmit function pointer */
+static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ u8 *bd; /* BD pointer */
+ u32 bd_status;
+ u8 txQ = 0;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock_irq(&ugeth->lock);
+
+ ugeth->stats.tx_bytes += skb->len;
+
+ /* Start from the next BD that should be filled */
+ bd = ugeth->txBd[txQ];
+ bd_status = BD_STATUS_AND_LENGTH(bd);
+ /* Save the skb pointer so we can free it later */
+ ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
+
+ /* Update the current skb pointer (wrapping if this was the last) */
+ ugeth->skb_curtx[txQ] =
+ (ugeth->skb_curtx[txQ] +
+ 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
+
+ /* set up the buffer descriptor */
+ BD_BUFFER_SET(bd,
+ dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
+
+ //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data);
+
+ bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
+
+ BD_STATUS_AND_LENGTH_SET(bd, bd_status);
+
+ dev->trans_start = jiffies;
+
+ /* Move to next BD in the ring */
+ if (!(bd_status & T_W))
+ ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD;
+ else
+ ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
+
+ /* If the next BD still needs to be cleaned up, then the bds
+ are full. We need to tell the kernel to stop sending us stuff. */
+ if (bd == ugeth->confBd[txQ]) {
+ if (!netif_queue_stopped(dev))
+ netif_stop_queue(dev);
+ }
+
+ if (ugeth->p_scheduler) {
+ ugeth->cpucount[txQ]++;
+ /* Indicate to QE that there are more Tx bds ready for
+ transmission */
+ /* This is done by writing a running counter of the bd
+ count to the scheduler PRAM. */
+ out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
+ }
+
+ spin_unlock_irq(&ugeth->lock);
+
+ return 0;
+}
+
+static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
+{
+ struct sk_buff *skb;
+ u8 *bd;
+ u16 length, howmany = 0;
+ u32 bd_status;
+ u8 *bdBuffer;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock(&ugeth->lock);
+ /* collect received buffers */
+ bd = ugeth->rxBd[rxQ];
+
+ bd_status = BD_STATUS_AND_LENGTH(bd);
+
+ /* while there are received buffers and BD is full (~R_E) */
+ while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
+ bdBuffer = (u8 *) BD_BUFFER(bd);
+ length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
+ skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
+
+ /* determine whether buffer is first, last, first and last
+ (single buffer frame) or middle (not first and not last) */
+ if (!skb ||
+ (!(bd_status & (R_F | R_L))) ||
+ (bd_status & R_ERRORS_FATAL)) {
+ ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
+ __FUNCTION__, __LINE__, (u32) skb);
+ if (skb)
+ dev_kfree_skb_any(skb);
+
+ ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
+ ugeth->stats.rx_dropped++;
+ } else {
+ ugeth->stats.rx_packets++;
+ howmany++;
+
+ /* Prep the skb for the packet */
+ skb_put(skb, length);
+
+ /* Tell the skb what kind of packet this is */
+ skb->protocol = eth_type_trans(skb, ugeth->dev);
+
+ ugeth->stats.rx_bytes += length;
+ /* Send the packet up the stack */
+#ifdef CONFIG_UGETH_NAPI
+ netif_receive_skb(skb);
+#else
+ netif_rx(skb);
+#endif /* CONFIG_UGETH_NAPI */
+ }
+
+ ugeth->dev->last_rx = jiffies;
+
+ skb = get_new_skb(ugeth, bd);
+ if (!skb) {
+ ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
+ spin_unlock(&ugeth->lock);
+ ugeth->stats.rx_dropped++;
+ break;
+ }
+
+ ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
+
+ /* update to point at the next skb */
+ ugeth->skb_currx[rxQ] =
+ (ugeth->skb_currx[rxQ] +
+ 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
+
+ if (bd_status & R_W)
+ bd = ugeth->p_rx_bd_ring[rxQ];
+ else
+ bd += UCC_GETH_SIZE_OF_BD;
+
+ bd_status = BD_STATUS_AND_LENGTH(bd);
+ }
+
+ ugeth->rxBd[rxQ] = bd;
+ spin_unlock(&ugeth->lock);
+ return howmany;
+}
+
+static int ucc_geth_tx(struct net_device *dev, u8 txQ)
+{
+ /* Start from the next BD that should be filled */
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ u8 *bd; /* BD pointer */
+ u32 bd_status;
+
+ bd = ugeth->confBd[txQ];
+ bd_status = BD_STATUS_AND_LENGTH(bd);
+
+ /* Normal processing. */
+ while ((bd_status & T_R) == 0) {
+ /* BD contains already transmitted buffer. */
+ /* Handle the transmitted buffer and release */
+ /* the BD to be used with the current frame */
+
+ if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
+ break;
+
+ ugeth->stats.tx_packets++;
+
+ /* Free the sk buffer associated with this TxBD */
+ dev_kfree_skb_irq(ugeth->
+ tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
+ ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
+ ugeth->skb_dirtytx[txQ] =
+ (ugeth->skb_dirtytx[txQ] +
+ 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
+
+ /* We freed a buffer, so now we can restart transmission */
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ /* Advance the confirmation BD pointer */
+ if (!(bd_status & T_W))
+ ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD;
+ else
+ ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
+ }
+ return 0;
+}
+
+#ifdef CONFIG_UGETH_NAPI
+static int ucc_geth_poll(struct net_device *dev, int *budget)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ int howmany;
+ int rx_work_limit = *budget;
+ u8 rxQ = 0;
+
+ if (rx_work_limit > dev->quota)
+ rx_work_limit = dev->quota;
+
+ howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit);
+
+ dev->quota -= howmany;
+ rx_work_limit -= howmany;
+ *budget -= howmany;
+
+ if (rx_work_limit >= 0)
+ netif_rx_complete(dev);
+
+ return (rx_work_limit < 0) ? 1 : 0;
+}
+#endif /* CONFIG_UGETH_NAPI */
+
+static irqreturn_t ucc_geth_irq_handler(int irq, void *info,
+ struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)info;
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ ucc_fast_private_t *uccf;
+ ucc_geth_info_t *ug_info;
+ register u32 ucce = 0;
+ register u32 bit_mask = UCCE_RXBF_SINGLE_MASK;
+ register u32 tx_mask = UCCE_TXBF_SINGLE_MASK;
+ register u8 i;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ if (!ugeth)
+ return IRQ_NONE;
+
+ uccf = ugeth->uccf;
+ ug_info = ugeth->ug_info;
+
+ do {
+ ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm));
+
+ /* clear event bits for next time */
+ /* Side effect here is to mask ucce variable
+ for future processing below. */
+ out_be32(uccf->p_ucce, ucce); /* Clear with ones,
+ but only bits in UCCM */
+
+ /* We ignore Tx interrupts because Tx confirmation is
+ done inside Tx routine */
+
+ for (i = 0; i < ug_info->numQueuesRx; i++) {
+ if (ucce & bit_mask)
+ ucc_geth_rx(ugeth, i,
+ (int)ugeth->ug_info->
+ bdRingLenRx[i]);
+ ucce &= ~bit_mask;
+ bit_mask <<= 1;
+ }
+
+ for (i = 0; i < ug_info->numQueuesTx; i++) {
+ if (ucce & tx_mask)
+ ucc_geth_tx(dev, i);
+ ucce &= ~tx_mask;
+ tx_mask <<= 1;
+ }
+
+ /* Exceptions */
+ if (ucce & UCCE_BSY) {
+ ugeth_vdbg("Got BUSY irq!!!!");
+ ugeth->stats.rx_errors++;
+ ucce &= ~UCCE_BSY;
+ }
+ if (ucce & UCCE_OTHER) {
+ ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!",
+ ucce);
+ ugeth->stats.rx_errors++;
+ ucce &= ~ucce;
+ }
+ }
+ while (ucce);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ /* Clear the interrupt */
+ mii_clear_phy_interrupt(ugeth->mii_info);
+
+ /* Disable PHY interrupts */
+ mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED);
+
+ /* Schedule the phy change */
+ schedule_work(&ugeth->tq);
+
+ return IRQ_HANDLED;
+}
+
+/* Scheduled by the phy_interrupt/timer to handle PHY changes */
+static void ugeth_phy_change(void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ ucc_geth_t *ug_regs;
+ int result = 0;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ug_regs = ugeth->ug_regs;
+
+ /* Delay to give the PHY a chance to change the
+ * register state */
+ msleep(1);
+
+ /* Update the link, speed, duplex */
+ result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info);
+
+ /* Adjust the known status as long as the link
+ * isn't still coming up */
+ if ((0 == result) || (ugeth->mii_info->link == 0))
+ adjust_link(dev);
+
+ /* Reenable interrupts, if needed */
+ if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR)
+ mii_configure_phy_interrupt(ugeth->mii_info,
+ MII_INTERRUPT_ENABLED);
+}
+
+/* Called every so often on systems that don't interrupt
+ * the core for PHY changes */
+static void ugeth_phy_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+
+ schedule_work(&ugeth->tq);
+
+ mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
+}
+
+/* Keep trying aneg for some time
+ * If, after GFAR_AN_TIMEOUT seconds, it has not
+ * finished, we switch to forced.
+ * Either way, once the process has completed, we either
+ * request the interrupt, or switch the timer over to
+ * using ugeth_phy_timer to check status */
+static void ugeth_phy_startup_timer(unsigned long data)
+{
+ struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
+ ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev);
+ static int secondary = UGETH_AN_TIMEOUT;
+ int result;
+
+ /* Configure the Auto-negotiation */
+ result = mii_info->phyinfo->config_aneg(mii_info);
+
+ /* If autonegotiation failed to start, and
+ * we haven't timed out, reset the timer, and return */
+ if (result && secondary--) {
+ mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
+ return;
+ } else if (result) {
+ /* Couldn't start autonegotiation.
+ * Try switching to forced */
+ mii_info->autoneg = 0;
+ result = mii_info->phyinfo->config_aneg(mii_info);
+
+ /* Forcing failed! Give up */
+ if (result) {
+ ugeth_err("%s: Forcing failed!", mii_info->dev->name);
+ return;
+ }
+ }
+
+ /* Kill the timer so it can be restarted */
+ del_timer_sync(&ugeth->phy_info_timer);
+
+ /* Grab the PHY interrupt, if necessary/possible */
+ if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
+ if (request_irq(ugeth->ug_info->phy_interrupt,
+ phy_interrupt,
+ SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) {
+ ugeth_err("%s: Can't get IRQ %d (PHY)",
+ mii_info->dev->name,
+ ugeth->ug_info->phy_interrupt);
+ } else {
+ mii_configure_phy_interrupt(ugeth->mii_info,
+ MII_INTERRUPT_ENABLED);
+ return;
+ }
+ }
+
+ /* Start the timer again, this time in order to
+ * handle a change in status */
+ init_timer(&ugeth->phy_info_timer);
+ ugeth->phy_info_timer.function = &ugeth_phy_timer;
+ ugeth->phy_info_timer.data = (unsigned long)mii_info->dev;
+ mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
+}
+
+/* Called when something needs to use the ethernet device */
+/* Returns 0 for success. */
+static int ucc_geth_open(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ int err;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ /* Test station address */
+ if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
+ ugeth_err("%s: Multicast address used for station address"
+ " - is this what you wanted?", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ err = ucc_geth_startup(ugeth);
+ if (err) {
+ ugeth_err("%s: Cannot configure net device, aborting.",
+ dev->name);
+ return err;
+ }
+
+ err = adjust_enet_interface(ugeth);
+ if (err) {
+ ugeth_err("%s: Cannot configure net device, aborting.",
+ dev->name);
+ return err;
+ }
+
+ /* Set MACSTNADDR1, MACSTNADDR2 */
+ /* For more details see the hardware spec. */
+ init_mac_station_addr_regs(dev->dev_addr[0],
+ dev->dev_addr[1],
+ dev->dev_addr[2],
+ dev->dev_addr[3],
+ dev->dev_addr[4],
+ dev->dev_addr[5],
+ &ugeth->ug_regs->macstnaddr1,
+ &ugeth->ug_regs->macstnaddr2);
+
+ err = init_phy(dev);
+ if (err) {
+ ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name);
+ return err;
+ }
+#ifndef CONFIG_UGETH_NAPI
+ err =
+ request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
+ "UCC Geth", dev);
+ if (err) {
+ ugeth_err("%s: Cannot get IRQ for net device, aborting.",
+ dev->name);
+ ucc_geth_stop(ugeth);
+ return err;
+ }
+#endif /* CONFIG_UGETH_NAPI */
+
+ /* Set up the PHY change work queue */
+ INIT_WORK(&ugeth->tq, ugeth_phy_change, dev);
+
+ init_timer(&ugeth->phy_info_timer);
+ ugeth->phy_info_timer.function = &ugeth_phy_startup_timer;
+ ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info;
+ mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
+
+ err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+ if (err) {
+ ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
+ ucc_geth_stop(ugeth);
+ return err;
+ }
+
+ netif_start_queue(dev);
+
+ return err;
+}
+
+/* Stops the kernel queue, and halts the controller */
+static int ucc_geth_close(struct net_device *dev)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ucc_geth_stop(ugeth);
+
+ /* Shutdown the PHY */
+ if (ugeth->mii_info->phyinfo->close)
+ ugeth->mii_info->phyinfo->close(ugeth->mii_info);
+
+ kfree(ugeth->mii_info);
+
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+struct ethtool_ops ucc_geth_ethtool_ops = {
+ .get_settings = NULL,
+ .get_drvinfo = NULL,
+ .get_regs_len = NULL,
+ .get_regs = NULL,
+ .get_link = NULL,
+ .get_coalesce = NULL,
+ .set_coalesce = NULL,
+ .get_ringparam = NULL,
+ .set_ringparam = NULL,
+ .get_strings = NULL,
+ .get_stats_count = NULL,
+ .get_ethtool_stats = NULL,
+};
+
+static int ucc_geth_probe(struct device *device)
+{
+ struct platform_device *pdev = to_platform_device(device);
+ struct ucc_geth_platform_data *ugeth_pdata;
+ struct net_device *dev = NULL;
+ struct ucc_geth_private *ugeth = NULL;
+ struct ucc_geth_info *ug_info;
+ int err;
+ static int mii_mng_configured = 0;
+
+ ugeth_vdbg("%s: IN", __FUNCTION__);
+
+ ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data;
+
+ ug_info = &ugeth_info[pdev->id];
+ ug_info->uf_info.ucc_num = pdev->id;
+ ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock;
+ ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock;
+ ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr;
+ ug_info->uf_info.irq = platform_get_irq(pdev, 0);
+ ug_info->phy_address = ugeth_pdata->phy_id;
+ ug_info->enet_interface = ugeth_pdata->phy_interface;
+ ug_info->board_flags = ugeth_pdata->board_flags;
+ ug_info->phy_interrupt = ugeth_pdata->phy_interrupt;
+
+ printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
+ ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
+ ug_info->uf_info.irq);
+
+ if (ug_info == NULL) {
+ ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
+ pdev->id);
+ return -ENODEV;
+ }
+
+ if (!mii_mng_configured) {
+ ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num);
+ mii_mng_configured = 1;
+ }
+
+ /* Create an ethernet device instance */
+ dev = alloc_etherdev(sizeof(*ugeth));
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ ugeth = netdev_priv(dev);
+ spin_lock_init(&ugeth->lock);
+
+ dev_set_drvdata(device, dev);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
+
+ SET_MODULE_OWNER(dev);
+ SET_NETDEV_DEV(dev, device);
+
+ /* Fill in the dev structure */
+ dev->open = ucc_geth_open;
+ dev->hard_start_xmit = ucc_geth_start_xmit;
+ dev->tx_timeout = ucc_geth_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+#ifdef CONFIG_UGETH_NAPI
+ dev->poll = ucc_geth_poll;
+ dev->weight = UCC_GETH_DEV_WEIGHT;
+#endif /* CONFIG_UGETH_NAPI */
+ dev->stop = ucc_geth_close;
+ dev->get_stats = ucc_geth_get_stats;
+// dev->change_mtu = ucc_geth_change_mtu;
+ dev->mtu = 1500;
+ dev->set_multicast_list = ucc_geth_set_multi;
+ dev->ethtool_ops = &ucc_geth_ethtool_ops;
+
+ err = register_netdev(dev);
+ if (err) {
+ ugeth_err("%s: Cannot register net device, aborting.",
+ dev->name);
+ free_netdev(dev);
+ return err;
+ }
+
+ ugeth->ug_info = ug_info;
+ ugeth->dev = dev;
+ memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6);
+
+ return 0;
+}
+
+static int ucc_geth_remove(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+
+ dev_set_drvdata(device, NULL);
+ ucc_geth_memclean(ugeth);
+ free_netdev(dev);
+
+ return 0;
+}
+
+/* Structure for a device driver */
+static struct device_driver ucc_geth_driver = {
+ .name = DRV_NAME,
+ .bus = &platform_bus_type,
+ .probe = ucc_geth_probe,
+ .remove = ucc_geth_remove,
+};
+
+static int __init ucc_geth_init(void)
+{
+ int i;
+ printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
+ for (i = 0; i < 8; i++)
+ memcpy(&(ugeth_info[i]), &ugeth_primary_info,
+ sizeof(ugeth_primary_info));
+
+ return driver_register(&ucc_geth_driver);
+}
+
+static void __exit ucc_geth_exit(void)
+{
+ driver_unregister(&ucc_geth_driver);
+}
+
+module_init(ucc_geth_init);
+module_exit(ucc_geth_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
new file mode 100644
index 0000000..005965f
--- /dev/null
+++ b/drivers/net/ucc_geth.h
@@ -0,0 +1,1339 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * Internal header file for UCC Gigabit Ethernet unit routines.
+ *
+ * Changelog:
+ * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __UCC_GETH_H__
+#define __UCC_GETH_H__
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+
+#include <asm/ucc.h>
+#include <asm/ucc_fast.h>
+
+#define NUM_TX_QUEUES 8
+#define NUM_RX_QUEUES 8
+#define NUM_BDS_IN_PREFETCHED_BDS 4
+#define TX_IP_OFFSET_ENTRY_MAX 8
+#define NUM_OF_PADDRS 4
+#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9
+#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8
+
+typedef struct ucc_mii_mng {
+ u32 miimcfg; /* MII management configuration reg */
+ u32 miimcom; /* MII management command reg */
+ u32 miimadd; /* MII management address reg */
+ u32 miimcon; /* MII management control reg */
+ u32 miimstat; /* MII management status reg */
+ u32 miimind; /* MII management indication reg */
+} __attribute__ ((packed)) ucc_mii_mng_t;
+
+typedef struct ucc_geth {
+ ucc_fast_t uccf;
+
+ u32 maccfg1; /* mac configuration reg. 1 */
+ u32 maccfg2; /* mac configuration reg. 2 */
+ u32 ipgifg; /* interframe gap reg. */
+ u32 hafdup; /* half-duplex reg. */
+ u8 res1[0x10];
+ ucc_mii_mng_t miimng; /* MII management structure */
+ u32 ifctl; /* interface control reg */
+ u32 ifstat; /* interface statux reg */
+ u32 macstnaddr1; /* mac station address part 1 reg */
+ u32 macstnaddr2; /* mac station address part 2 reg */
+ u8 res2[0x8];
+ u32 uempr; /* UCC Ethernet Mac parameter reg */
+ u32 utbipar; /* UCC tbi address reg */
+ u16 uescr; /* UCC Ethernet statistics control reg */
+ u8 res3[0x180 - 0x15A];
+ u32 tx64; /* Total number of frames (including bad
+ frames) transmitted that were exactly of the
+ minimal length (64 for un tagged, 68 for
+ tagged, or with length exactly equal to the
+ parameter MINLength */
+ u32 tx127; /* Total number of frames (including bad
+ frames) transmitted that were between
+ MINLength (Including FCS length==4) and 127
+ octets */
+ u32 tx255; /* Total number of frames (including bad
+ frames) transmitted that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 rx64; /* Total number of frames received including
+ bad frames that were exactly of the mninimal
+ length (64 bytes) */
+ u32 rx127; /* Total number of frames (including bad
+ frames) received that were between MINLength
+ (Including FCS length==4) and 127 octets */
+ u32 rx255; /* Total number of frames (including bad
+ frames) received that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 txok; /* Total number of octets residing in frames
+ that where involved in succesfull
+ transmission */
+ u16 txcf; /* Total number of PAUSE control frames
+ transmitted by this MAC */
+ u8 res4[0x2];
+ u32 tmca; /* Total number of frames that were transmitted
+ succesfully with the group address bit set
+ that are not broadcast frames */
+ u32 tbca; /* Total number of frames transmitted
+ succesfully that had destination address
+ field equal to the broadcast address */
+ u32 rxfok; /* Total number of frames received OK */
+ u32 rxbok; /* Total number of octets received OK */
+ u32 rbyt; /* Total number of octets received including
+ octets in bad frames. Must be implemented in
+ HW because it includes octets in frames that
+ never even reach the UCC */
+ u32 rmca; /* Total number of frames that were received
+ succesfully with the group address bit set
+ that are not broadcast frames */
+ u32 rbca; /* Total number of frames received succesfully
+ that had destination address equal to the
+ broadcast address */
+ u32 scar; /* Statistics carry register */
+ u32 scam; /* Statistics caryy mask register */
+ u8 res5[0x200 - 0x1c4];
+} __attribute__ ((packed)) ucc_geth_t;
+
+/* UCC GETH TEMODR Register */
+#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
+ */
+#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */
+#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4
+ checksums */
+#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance
+ optimization
+ enhancement (mode1) */
+#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics
+ */
+#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues <<
+ shift */
+
+/* UCC GETH TEMODR Register */
+#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx
+ statistics */
+#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable
+ extended
+ features */
+#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation
+ tagged << shift */
+#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non
+ tagged << shift */
+#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift
+ */
+#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx
+ statistics */
+#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended
+ filtering
+ vs.
+ mpc82xx-like
+ filtering */
+#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues <<
+ shift */
+#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable
+ dynamic max
+ frame length
+ */
+#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable
+ dynamic min
+ frame length
+ */
+#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4
+ checksums */
+#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip
+ address to
+ 4-byte
+ boundary */
+
+/* UCC GETH Event Register */
+#define UCCE_MPD 0x80000000 /* Magic packet
+ detection */
+#define UCCE_SCAR 0x40000000
+#define UCCE_GRA 0x20000000 /* Tx graceful
+ stop
+ complete */
+#define UCCE_CBPR 0x10000000
+#define UCCE_BSY 0x08000000
+#define UCCE_RXC 0x04000000
+#define UCCE_TXC 0x02000000
+#define UCCE_TXE 0x01000000
+#define UCCE_TXB7 0x00800000
+#define UCCE_TXB6 0x00400000
+#define UCCE_TXB5 0x00200000
+#define UCCE_TXB4 0x00100000
+#define UCCE_TXB3 0x00080000
+#define UCCE_TXB2 0x00040000
+#define UCCE_TXB1 0x00020000
+#define UCCE_TXB0 0x00010000
+#define UCCE_RXB7 0x00008000
+#define UCCE_RXB6 0x00004000
+#define UCCE_RXB5 0x00002000
+#define UCCE_RXB4 0x00001000
+#define UCCE_RXB3 0x00000800
+#define UCCE_RXB2 0x00000400
+#define UCCE_RXB1 0x00000200
+#define UCCE_RXB0 0x00000100
+#define UCCE_RXF7 0x00000080
+#define UCCE_RXF6 0x00000040
+#define UCCE_RXF5 0x00000020
+#define UCCE_RXF4 0x00000010
+#define UCCE_RXF3 0x00000008
+#define UCCE_RXF2 0x00000004
+#define UCCE_RXF1 0x00000002
+#define UCCE_RXF0 0x00000001
+
+#define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0)
+#define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0)
+
+#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\
+ UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0)
+#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\
+ UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0)
+#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\
+ UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0)
+#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\
+ UCCE_RXC | UCCE_TXC | UCCE_TXE)
+
+/* UCC GETH UPSMR (Protocol Specific Mode Register) */
+#define UPSMR_ECM 0x04000000 /* Enable CAM
+ Miss or
+ Enable
+ Filtering
+ Miss */
+#define UPSMR_HSE 0x02000000 /* Hardware
+ Statistics
+ Enable */
+#define UPSMR_PRO 0x00400000 /* Promiscuous*/
+#define UPSMR_CAP 0x00200000 /* CAM polarity
+ */
+#define UPSMR_RSH 0x00100000 /* Receive
+ Short Frames
+ */
+#define UPSMR_RPM 0x00080000 /* Reduced Pin
+ Mode
+ interfaces */
+#define UPSMR_R10M 0x00040000 /* RGMII/RMII
+ 10 Mode */
+#define UPSMR_RLPB 0x00020000 /* RMII
+ Loopback
+ Mode */
+#define UPSMR_TBIM 0x00010000 /* Ten-bit
+ Interface
+ Mode */
+#define UPSMR_RMM 0x00001000 /* RMII/RGMII
+ Mode */
+#define UPSMR_CAM 0x00000400 /* CAM Address
+ Matching */
+#define UPSMR_BRO 0x00000200 /* Broadcast
+ Address */
+#define UPSMR_RES1 0x00002000 /* Reserved
+ feild - must
+ be 1 */
+
+/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
+#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
+ Rx */
+#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control
+ Tx */
+#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable
+ synchronized
+ to Rx stream
+ */
+#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */
+#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable
+ synchronized
+ to Tx stream
+ */
+#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */
+
+/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */
+#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble
+ Length <<
+ shift */
+#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble
+ Length mask */
+#define MACCFG2_SRP 0x00000080 /* Soft Receive
+ Preamble */
+#define MACCFG2_STP 0x00000040 /* Soft
+ Transmit
+ Preamble */
+#define MACCFG2_RESERVED_1 0x00000020 /* Reserved -
+ must be set
+ to 1 */
+#define MACCFG2_LC 0x00000010 /* Length Check
+ */
+#define MACCFG2_MPE 0x00000008 /* Magic packet
+ detect */
+#define MACCFG2_FDX 0x00000001 /* Full Duplex */
+#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex
+ mask */
+#define MACCFG2_PAD_CRC 0x00000004
+#define MACCFG2_CRC_EN 0x00000002
+#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither
+ Padding
+ short frames
+ nor CRC */
+#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC
+ only */
+#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004
+#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode
+ (MII/RMII/RGMII
+ 10/100bps) */
+#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode
+ (GMII/TBI/RTB/RGMII
+ 1000bps ) */
+#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask
+ covering all
+ relevant
+ bits */
+
+/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non
+ back-to-back
+ inter frame
+ gap part 1.
+ << shift */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non
+ back-to-back
+ inter frame
+ gap part 2.
+ << shift */
+#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG
+ Enforcement
+ << shift */
+#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back
+ inter frame
+ gap << shift
+ */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back
+ inter frame gap part
+ 1. max val */
+#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back
+ inter frame gap part
+ 2. max val */
+#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG
+ Enforcement max val */
+#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter
+ frame gap max val */
+#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000
+#define IPGIFG_NBTB_IPG_MASK 0x007F0000
+#define IPGIFG_MIN_IFG_MASK 0x0000FF00
+#define IPGIFG_BTB_IPG_MASK 0x0000007F
+
+/* UCC GETH HAFDUP (Half Duplex Register) */
+#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate
+ Binary
+ Exponential
+ Backoff
+ Truncation
+ << shift */
+#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary
+ Exponential Backoff
+ Truncation max val */
+#define HALFDUP_ALT_BEB 0x00080000 /* Alternate
+ Binary
+ Exponential
+ Backoff */
+#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back
+ pressure no
+ backoff */
+#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */
+#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive
+ Defer */
+#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum
+ Retransmission
+ << shift */
+#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum
+ Retransmission max
+ val */
+#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision
+ Window <<
+ shift */
+#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max
+ val */
+#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000
+#define HALFDUP_RETRANS_MASK 0x0000F000
+#define HALFDUP_COL_WINDOW_MASK 0x0000003F
+
+/* UCC GETH UCCS (Ethernet Status Register) */
+#define UCCS_BPR 0x02 /* Back pressure (in
+ half duplex mode) */
+#define UCCS_PAU 0x02 /* Pause state (in full
+ duplex mode) */
+#define UCCS_MPD 0x01 /* Magic Packet
+ Detected */
+
+/* UCC GETH MIIMCFG (MII Management Configuration Register) */
+#define MIIMCFG_RESET_MANAGEMENT 0x80000000 /* Reset
+ management */
+#define MIIMCFG_NO_PREAMBLE 0x00000010 /* Preamble
+ suppress */
+#define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31) /* clock divide
+ << shift */
+#define MIIMCFG_CLOCK_DIVIDE_MAX 0xf /* clock divide max val
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_2 0x00000000 /* divide by 2 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001 /* divide by 4 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002 /* divide by 6 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003 /* divide by 8 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004 /* divide by 10
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005 /* divide by 14
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_16 0x00000008 /* divide by 16
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006 /* divide by 20
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007 /* divide by 28
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_32 0x00000009 /* divide by 32
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_48 0x0000000a /* divide by 48
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_64 0x0000000b /* divide by 64
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_80 0x0000000c /* divide by 80
+ */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112 0x0000000d /* divide by
+ 112 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_160 0x0000000e /* divide by
+ 160 */
+#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_224 0x0000000f /* divide by
+ 224 */
+
+/* UCC GETH MIIMCOM (MII Management Command Register) */
+#define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */
+#define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */
+
+/* UCC GETH MIIMADD (MII Management Address Register) */
+#define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23) /* PHY Address
+ << shift */
+#define MIIMADD_PHY_REGISTER_SHIFT (31 - 31) /* PHY Register
+ << shift */
+
+/* UCC GETH MIIMCON (MII Management Control Register) */
+#define MIIMCON_PHY_CONTROL_SHIFT (31 - 31) /* PHY Control
+ << shift */
+#define MIIMCON_PHY_STATUS_SHIFT (31 - 31) /* PHY Status
+ << shift */
+
+/* UCC GETH MIIMIND (MII Management Indicator Register) */
+#define MIIMIND_NOT_VALID 0x00000004 /* Not valid */
+#define MIIMIND_SCAN 0x00000002 /* Scan in
+ progress */
+#define MIIMIND_BUSY 0x00000001
+
+/* UCC GETH IFSTAT (Interface Status Register) */
+#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive
+ transmission
+ defer */
+
+/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */
+#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station
+ address 6th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station
+ address 5th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station
+ address 4th
+ octet <<
+ shift */
+#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station
+ address 3rd
+ octet <<
+ shift */
+
+/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */
+#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station
+ address 2nd
+ octet <<
+ shift */
+#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station
+ address 1st
+ octet <<
+ shift */
+
+/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */
+#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time
+ value <<
+ shift */
+#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended
+ pause time
+ value <<
+ shift */
+
+/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */
+#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address
+ << shift */
+#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address
+ mask */
+
+/* UCC GETH UESCR (Ethernet Statistics Control Register) */
+#define UESCR_AUTOZ 0x8000 /* Automatically zero
+ addressed
+ statistical counter
+ values */
+#define UESCR_CLRCNT 0x4000 /* Clear all statistics
+ counters */
+#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max
+ Coalescing
+ Value <<
+ shift */
+#define UESCR_SCOV_SHIFT (15 - 15) /* Status
+ Coalescing
+ Value <<
+ shift */
+
+/* UCC GETH UDSR (Data Synchronization Register) */
+#define UDSR_MAGIC 0x067E
+
+typedef struct ucc_geth_thread_data_tx {
+ u8 res0[104];
+} __attribute__ ((packed)) ucc_geth_thread_data_tx_t;
+
+typedef struct ucc_geth_thread_data_rx {
+ u8 res0[40];
+} __attribute__ ((packed)) ucc_geth_thread_data_rx_t;
+
+/* Send Queue Queue-Descriptor */
+typedef struct ucc_geth_send_queue_qd {
+ u32 bd_ring_base; /* pointer to BD ring base address */
+ u8 res0[0x8];
+ u32 last_bd_completed_address;/* initialize to last entry in BD ring */
+ u8 res1[0x30];
+} __attribute__ ((packed)) ucc_geth_send_queue_qd_t;
+
+typedef struct ucc_geth_send_queue_mem_region {
+ ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES];
+} __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t;
+
+typedef struct ucc_geth_thread_tx_pram {
+ u8 res0[64];
+} __attribute__ ((packed)) ucc_geth_thread_tx_pram_t;
+
+typedef struct ucc_geth_thread_rx_pram {
+ u8 res0[128];
+} __attribute__ ((packed)) ucc_geth_thread_rx_pram_t;
+
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
+#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96
+
+typedef struct ucc_geth_scheduler {
+ u16 cpucount0; /* CPU packet counter */
+ u16 cpucount1; /* CPU packet counter */
+ u16 cecount0; /* QE packet counter */
+ u16 cecount1; /* QE packet counter */
+ u16 cpucount2; /* CPU packet counter */
+ u16 cpucount3; /* CPU packet counter */
+ u16 cecount2; /* QE packet counter */
+ u16 cecount3; /* QE packet counter */
+ u16 cpucount4; /* CPU packet counter */
+ u16 cpucount5; /* CPU packet counter */
+ u16 cecount4; /* QE packet counter */
+ u16 cecount5; /* QE packet counter */
+ u16 cpucount6; /* CPU packet counter */
+ u16 cpucount7; /* CPU packet counter */
+ u16 cecount6; /* QE packet counter */
+ u16 cecount7; /* QE packet counter */
+ u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */
+ u32 rtsrshadow; /* temporary variable handled by QE */
+ u32 time; /* temporary variable handled by QE */
+ u32 ttl; /* temporary variable handled by QE */
+ u32 mblinterval; /* max burst length interval */
+ u16 nortsrbytetime; /* normalized value of byte time in tsr units */
+ u8 fracsiz; /* radix 2 log value of denom. of
+ NorTSRByteTime */
+ u8 res0[1];
+ u8 strictpriorityq; /* Strict Priority Mask register */
+ u8 txasap; /* Transmit ASAP register */
+ u8 extrabw; /* Extra BandWidth register */
+ u8 oldwfqmask; /* temporary variable handled by QE */
+ u8 weightfactor[NUM_TX_QUEUES];
+ /**< weight factor for queues */
+ u32 minw; /* temporary variable handled by QE */
+ u8 res1[0x70 - 0x64];
+} __attribute__ ((packed)) ucc_geth_scheduler_t;
+
+typedef struct ucc_geth_tx_firmware_statistics_pram {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to transmit collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error
+ transmission that are not counted on any
+ other counter */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer; /* frames with defferal time greater than
+ specified threshold */
+ u32 txpkts256; /* total packets (including bad) between 256
+ and 511 octets */
+ u32 txpkts512; /* total packets (including bad) between 512
+ and 1023 octets */
+ u32 txpkts1024; /* total packets (including bad) between 1024
+ and 1518 octets */
+ u32 txpktsjumbo; /* total packets (including bad) between 1024
+ and MAXLength octets */
+} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t;
+
+typedef struct ucc_geth_rx_firmware_statistics_pram {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
+ or type mismatch) */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames (including bad) between 256 and
+ 511 octets */
+ u32 pkts512; /* total frames (including bad) between 512 and
+ 1023 octets */
+ u32 pkts1024; /* total frames (including bad) between 1024
+ and 1518 octets */
+ u32 pktsjumbo; /* total frames (including bad) between 1024
+ and MAXLength octets */
+ u32 frlossinmacer; /* frames lost because of internal MAC error
+ that is not counted in any other counter */
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan; /* total frames that had their VLAN tag removed
+ */
+ u32 replacevlan; /* total frames that had their VLAN tag
+ replaced */
+ u32 insertvlan; /* total frames that had their VLAN tag
+ inserted */
+} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t;
+
+typedef struct ucc_geth_rx_interrupt_coalescing_entry {
+ u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
+ value */
+ u32 interruptcoalescingcounter; /* interrupt coalescing counter,
+ initialize to
+ interruptcoalescingmaxvalue */
+} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t;
+
+typedef struct ucc_geth_rx_interrupt_coalescing_table {
+ ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES];
+ /**< interrupt coalescing entry */
+} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t;
+
+typedef struct ucc_geth_rx_prefetched_bds {
+ qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
+} __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t;
+
+typedef struct ucc_geth_rx_bd_queues_entry {
+ u32 bdbaseptr; /* BD base pointer */
+ u32 bdptr; /* BD pointer */
+ u32 externalbdbaseptr; /* external BD base pointer */
+ u32 externalbdptr; /* external BD pointer */
+} __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t;
+
+typedef struct ucc_geth_tx_global_pram {
+ u16 temoder;
+ u8 res0[0x38 - 0x02];
+ u32 sqptr; /* a base pointer to send queue memory region */
+ u32 schedulerbasepointer; /* a base pointer to scheduler memory
+ region */
+ u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */
+ u32 tstate; /* tx internal state. High byte contains
+ function code */
+ u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
+ u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
+ u32 tqptr; /* a base pointer to the Tx Queues Memory
+ Region */
+ u8 res2[0x80 - 0x74];
+} __attribute__ ((packed)) ucc_geth_tx_global_pram_t;
+
+/* structure representing Extended Filtering Global Parameters in PRAM */
+typedef struct ucc_geth_exf_global_pram {
+ u32 l2pcdptr; /* individual address filter, high */
+ u8 res0[0x10 - 0x04];
+} __attribute__ ((packed)) ucc_geth_exf_global_pram_t;
+
+typedef struct ucc_geth_rx_global_pram {
+ u32 remoder; /* ethernet mode reg. */
+ u32 rqptr; /* base pointer to the Rx Queues Memory Region*/
+ u32 res0[0x1];
+ u8 res1[0x20 - 0xC];
+ u16 typeorlen; /* cutoff point less than which, type/len field
+ is considered length */
+ u8 res2[0x1];
+ u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/
+ u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */
+ u8 res3[0x30 - 0x28];
+ u32 intcoalescingptr; /* Interrupt coalescing table pointer */
+ u8 res4[0x36 - 0x34];
+ u8 rstate; /* rx internal state. High byte contains
+ function code */
+ u8 res5[0x46 - 0x37];
+ u16 mrblr; /* max receive buffer length reg. */
+ u32 rbdqptr; /* base pointer to RxBD parameter table
+ description */
+ u16 mflr; /* max frame length reg. */
+ u16 minflr; /* min frame length reg. */
+ u16 maxd1; /* max dma1 length reg. */
+ u16 maxd2; /* max dma2 length reg. */
+ u32 ecamptr; /* external CAM address */
+ u32 l2qt; /* VLAN priority mapping table. */
+ u32 l3qt[0x8]; /* IP priority mapping table. */
+ u16 vlantype; /* vlan type */
+ u16 vlantci; /* default vlan tci */
+ u8 addressfiltering[64]; /* address filtering data structure */
+ u32 exfGlobalParam; /* base address for extended filtering global
+ parameters */
+ u8 res6[0x100 - 0xC4]; /* Initialize to zero */
+} __attribute__ ((packed)) ucc_geth_rx_global_pram_t;
+
+#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
+
+/* structure representing InitEnet command */
+typedef struct ucc_geth_init_pram {
+ u8 resinit1;
+ u8 resinit2;
+ u8 resinit3;
+ u8 resinit4;
+ u16 resinit5;
+ u8 res1[0x1];
+ u8 largestexternallookupkeysize;
+ u32 rgftgfrxglobal;
+ u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */
+ u8 res2[0x38 - 0x30];
+ u32 txglobal; /* tx global */
+ u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
+ u8 res3[0x1];
+} __attribute__ ((packed)) ucc_geth_init_pram_t;
+
+#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
+#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
+
+#define ENET_INIT_PARAM_RISC_MASK 0x0000003f
+#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0
+#define ENET_INIT_PARAM_SNUM_MASK 0xff000000
+#define ENET_INIT_PARAM_SNUM_SHIFT 24
+
+#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06
+#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30
+#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff
+#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00
+#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400
+
+/* structure representing 82xx Address Filtering Enet Address in PRAM */
+typedef struct ucc_geth_82xx_enet_address {
+ u8 res1[0x2];
+ u16 h; /* address (MSB) */
+ u16 m; /* address */
+ u16 l; /* address (LSB) */
+} __attribute__ ((packed)) ucc_geth_82xx_enet_address_t;
+
+/* structure representing 82xx Address Filtering PRAM */
+typedef struct ucc_geth_82xx_address_filtering_pram {
+ u32 iaddr_h; /* individual address filter, high */
+ u32 iaddr_l; /* individual address filter, low */
+ u32 gaddr_h; /* group address filter, high */
+ u32 gaddr_l; /* group address filter, low */
+ ucc_geth_82xx_enet_address_t taddr;
+ ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS];
+ u8 res0[0x40 - 0x38];
+} __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t;
+
+/* GETH Tx firmware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+typedef struct ucc_geth_tx_firmware_statistics {
+ u32 sicoltx; /* single collision */
+ u32 mulcoltx; /* multiple collision */
+ u32 latecoltxfr; /* late collision */
+ u32 frabortduecol; /* frames aborted due to transmit collision */
+ u32 frlostinmactxer; /* frames lost due to internal MAC error
+ transmission that are not counted on any
+ other counter */
+ u32 carriersenseertx; /* carrier sense error */
+ u32 frtxok; /* frames transmitted OK */
+ u32 txfrexcessivedefer; /* frames with defferal time greater than
+ specified threshold */
+ u32 txpkts256; /* total packets (including bad) between 256
+ and 511 octets */
+ u32 txpkts512; /* total packets (including bad) between 512
+ and 1023 octets */
+ u32 txpkts1024; /* total packets (including bad) between 1024
+ and 1518 octets */
+ u32 txpktsjumbo; /* total packets (including bad) between 1024
+ and MAXLength octets */
+} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t;
+
+/* GETH Rx firmware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+typedef struct ucc_geth_rx_firmware_statistics {
+ u32 frrxfcser; /* frames with crc error */
+ u32 fraligner; /* frames with alignment error */
+ u32 inrangelenrxer; /* in range length error */
+ u32 outrangelenrxer; /* out of range length error */
+ u32 frtoolong; /* frame too long */
+ u32 runt; /* runt */
+ u32 verylongevent; /* very long event */
+ u32 symbolerror; /* symbol error */
+ u32 dropbsy; /* drop because of BD not ready */
+ u8 res0[0x8];
+ u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
+ or type mismatch) */
+ u32 underpkts; /* total frames less than 64 octets */
+ u32 pkts256; /* total frames (including bad) between 256 and
+ 511 octets */
+ u32 pkts512; /* total frames (including bad) between 512 and
+ 1023 octets */
+ u32 pkts1024; /* total frames (including bad) between 1024
+ and 1518 octets */
+ u32 pktsjumbo; /* total frames (including bad) between 1024
+ and MAXLength octets */
+ u32 frlossinmacer; /* frames lost because of internal MAC error
+ that is not counted in any other counter */
+ u32 pausefr; /* pause frames */
+ u8 res1[0x4];
+ u32 removevlan; /* total frames that had their VLAN tag removed
+ */
+ u32 replacevlan; /* total frames that had their VLAN tag
+ replaced */
+ u32 insertvlan; /* total frames that had their VLAN tag
+ inserted */
+} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t;
+
+/* GETH hardware statistics structure, used when calling
+ UCC_GETH_GetStatistics. */
+typedef struct ucc_geth_hardware_statistics {
+ u32 tx64; /* Total number of frames (including bad
+ frames) transmitted that were exactly of the
+ minimal length (64 for un tagged, 68 for
+ tagged, or with length exactly equal to the
+ parameter MINLength */
+ u32 tx127; /* Total number of frames (including bad
+ frames) transmitted that were between
+ MINLength (Including FCS length==4) and 127
+ octets */
+ u32 tx255; /* Total number of frames (including bad
+ frames) transmitted that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 rx64; /* Total number of frames received including
+ bad frames that were exactly of the mninimal
+ length (64 bytes) */
+ u32 rx127; /* Total number of frames (including bad
+ frames) received that were between MINLength
+ (Including FCS length==4) and 127 octets */
+ u32 rx255; /* Total number of frames (including bad
+ frames) received that were between 128
+ (Including FCS length==4) and 255 octets */
+ u32 txok; /* Total number of octets residing in frames
+ that where involved in succesfull
+ transmission */
+ u16 txcf; /* Total number of PAUSE control frames
+ transmitted by this MAC */
+ u32 tmca; /* Total number of frames that were transmitted
+ succesfully with the group address bit set
+ that are not broadcast frames */
+ u32 tbca; /* Total number of frames transmitted
+ succesfully that had destination address
+ field equal to the broadcast address */
+ u32 rxfok; /* Total number of frames received OK */
+ u32 rxbok; /* Total number of octets received OK */
+ u32 rbyt; /* Total number of octets received including
+ octets in bad frames. Must be implemented in
+ HW because it includes octets in frames that
+ never even reach the UCC */
+ u32 rmca; /* Total number of frames that were received
+ succesfully with the group address bit set
+ that are not broadcast frames */
+ u32 rbca; /* Total number of frames received succesfully
+ that had destination address equal to the
+ broadcast address */
+} __attribute__ ((packed)) ucc_geth_hardware_statistics_t;
+
+/* UCC GETH Tx errors returned via TxConf callback */
+#define TX_ERRORS_DEF 0x0200
+#define TX_ERRORS_EXDEF 0x0100
+#define TX_ERRORS_LC 0x0080
+#define TX_ERRORS_RL 0x0040
+#define TX_ERRORS_RC_MASK 0x003C
+#define TX_ERRORS_RC_SHIFT 2
+#define TX_ERRORS_UN 0x0002
+#define TX_ERRORS_CSL 0x0001
+
+/* UCC GETH Rx errors returned via RxStore callback */
+#define RX_ERRORS_CMR 0x0200
+#define RX_ERRORS_M 0x0100
+#define RX_ERRORS_BC 0x0080
+#define RX_ERRORS_MC 0x0040
+
+/* Transmit BD. These are in addition to values defined in uccf. */
+#define T_VID 0x003c0000 /* insert VLAN id index mask. */
+#define T_DEF (((u32) TX_ERRORS_DEF ) << 16)
+#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16)
+#define T_LC (((u32) TX_ERRORS_LC ) << 16)
+#define T_RL (((u32) TX_ERRORS_RL ) << 16)
+#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16)
+#define T_UN (((u32) TX_ERRORS_UN ) << 16)
+#define T_CSL (((u32) TX_ERRORS_CSL ) << 16)
+#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \
+ | T_UN | T_CSL) /* transmit errors to report */
+
+/* Receive BD. These are in addition to values defined in uccf. */
+#define R_LG 0x00200000 /* Frame length violation. */
+#define R_NO 0x00100000 /* Non-octet aligned frame. */
+#define R_SH 0x00080000 /* Short frame. */
+#define R_CR 0x00040000 /* CRC error. */
+#define R_OV 0x00020000 /* Overrun. */
+#define R_IPCH 0x00010000 /* IP checksum check failed. */
+#define R_CMR (((u32) RX_ERRORS_CMR ) << 16)
+#define R_M (((u32) RX_ERRORS_M ) << 16)
+#define R_BC (((u32) RX_ERRORS_BC ) << 16)
+#define R_MC (((u32) RX_ERRORS_MC ) << 16)
+#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to
+ report */
+#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \
+ R_OV | R_IPCH) /* receive errors to discard */
+
+/* Alignments */
+#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256
+#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128
+#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128
+#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64
+#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values
+ based on num of
+ threads, but always
+ using the maximum is
+ easier */
+#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
+#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 4 /* This is a
+ guess */
+#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
+#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
+#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This
+ is a
+ guess
+ */
+#define UCC_GETH_RX_BD_RING_ALIGNMENT 32
+#define UCC_GETH_TX_BD_RING_ALIGNMENT 32
+#define UCC_GETH_MRBLR_ALIGNMENT 128
+#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4
+#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32
+#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64
+
+#define UCC_GETH_TAD_EF 0x80
+#define UCC_GETH_TAD_V 0x40
+#define UCC_GETH_TAD_REJ 0x20
+#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2
+#define UCC_GETH_TAD_VTAG_OP_SHIFT 6
+#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20
+#define UCC_GETH_TAD_RQOS_SHIFT 0
+#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5
+#define UCC_GETH_TAD_CFI 0x10
+
+#define UCC_GETH_VLAN_PRIORITY_MAX 8
+#define UCC_GETH_IP_PRIORITY_MAX 64
+#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8
+#define UCC_GETH_RX_BD_RING_SIZE_MIN 8
+#define UCC_GETH_TX_BD_RING_SIZE_MIN 2
+
+#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD
+
+/* Driver definitions */
+#define TX_BD_RING_LEN 0x10
+#define RX_BD_RING_LEN 0x10
+#define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN
+
+#define TX_RING_MOD_MASK(size) (size-1)
+#define RX_RING_MOD_MASK(size) (size-1)
+
+#define ENET_NUM_OCTETS_PER_ADDRESS 6
+#define ENET_GROUP_ADDR 0x01 /* Group address mask
+ for ethernet
+ addresses */
+
+#define TX_TIMEOUT (1*HZ)
+#define SKB_ALLOC_TIMEOUT 100000
+#define PHY_INIT_TIMEOUT 100000
+#define PHY_CHANGE_TIME 2
+
+/* Fast Ethernet (10/100 Mbps) */
+#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
+ */
+#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */
+#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */
+#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
+ */
+#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
+#define UCC_GETH_UTFTT_INIT 128
+/* Gigabit Ethernet (1000 Mbps) */
+#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
+ FIFO size */
+#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
+#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
+#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual
+ FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
+
+#define UCC_GETH_REMODER_INIT 0 /* bits that must be
+ set */
+#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */
+#define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value
+ for this
+ register */
+#define UCC_GETH_MACCFG1_INIT 0
+#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
+#define UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT \
+ (MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112)
+
+/* Ethernet speed */
+typedef enum enet_speed {
+ ENET_SPEED_10BT, /* 10 Base T */
+ ENET_SPEED_100BT, /* 100 Base T */
+ ENET_SPEED_1000BT /* 1000 Base T */
+} enet_speed_e;
+
+/* Ethernet Address Type. */
+typedef enum enet_addr_type {
+ ENET_ADDR_TYPE_INDIVIDUAL,
+ ENET_ADDR_TYPE_GROUP,
+ ENET_ADDR_TYPE_BROADCAST
+} enet_addr_type_e;
+
+/* TBI / MII Set Register */
+typedef enum enet_tbi_mii_reg {
+ ENET_TBI_MII_CR = 0x00, /* Control (CR ) */
+ ENET_TBI_MII_SR = 0x01, /* Status (SR ) */
+ ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */
+ ENET_TBI_MII_ANLPBPA = 0x05, /* AN link partner base page ability
+ (ANLPBPA) */
+ ENET_TBI_MII_ANEX = 0x06, /* AN expansion (ANEX ) */
+ ENET_TBI_MII_ANNPT = 0x07, /* AN next page transmit (ANNPT ) */
+ ENET_TBI_MII_ANLPANP = 0x08, /* AN link partner ability next page
+ (ANLPANP) */
+ ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */
+ ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */
+ ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */
+} enet_tbi_mii_reg_e;
+
+/* UCC GETH 82xx Ethernet Address Recognition Location */
+typedef enum ucc_geth_enet_address_recognition_location {
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station
+ address */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional
+ station
+ address
+ paddr1 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional
+ station
+ address
+ paddr2 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional
+ station
+ address
+ paddr3 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional
+ station
+ address
+ paddr4 */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */
+ UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual
+ hash */
+} ucc_geth_enet_address_recognition_location_e;
+
+/* UCC GETH vlan operation tagged */
+typedef enum ucc_geth_vlan_operation_tagged {
+ UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */
+ UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG
+ = 0x1, /* Tagged - replace vid portion of q tag */
+ UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE
+ = 0x2, /* Tagged - if vid0 replace vid with default value */
+ UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME
+ = 0x3 /* Tagged - extract q tag from frame */
+} ucc_geth_vlan_operation_tagged_e;
+
+/* UCC GETH vlan operation non-tagged */
+typedef enum ucc_geth_vlan_operation_non_tagged {
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */
+ UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged -
+ q tag insert
+ */
+} ucc_geth_vlan_operation_non_tagged_e;
+
+/* UCC GETH Rx Quality of Service Mode */
+typedef enum ucc_geth_qos_mode {
+ UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */
+ UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue
+ determined
+ by L2
+ criteria */
+ UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue
+ determined
+ by L3
+ criteria */
+} ucc_geth_qos_mode_e;
+
+/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together
+ for combined functionality */
+typedef enum ucc_geth_statistics_gathering_mode {
+ UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No
+ statistics
+ gathering */
+ UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable
+ hardware
+ statistics
+ gathering
+ */
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable
+ firmware
+ tx
+ statistics
+ gathering
+ */
+ UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable
+ firmware
+ rx
+ statistics
+ gathering
+ */
+} ucc_geth_statistics_gathering_mode_e;
+
+/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */
+typedef enum ucc_geth_maccfg2_pad_and_crc_mode {
+ UCC_GETH_PAD_AND_CRC_MODE_NONE
+ = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding
+ short frames
+ nor CRC */
+ UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY
+ = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append
+ CRC only */
+ UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC =
+ MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC
+} ucc_geth_maccfg2_pad_and_crc_mode_e;
+
+/* UCC GETH upsmr Flow Control Mode */
+typedef enum ucc_geth_flow_control_mode {
+ UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic
+ flow control
+ */
+ UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY
+ = 0x00004000 /* Send pause frame when RxFIFO reaches its
+ emergency threshold */
+} ucc_geth_flow_control_mode_e;
+
+/* UCC GETH number of threads */
+typedef enum ucc_geth_num_of_threads {
+ UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */
+ UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */
+ UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */
+ UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */
+ UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */
+} ucc_geth_num_of_threads_e;
+
+/* UCC GETH number of station addresses */
+typedef enum ucc_geth_num_of_station_addresses {
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */
+ UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */
+} ucc_geth_num_of_station_addresses_e;
+
+typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS];
+
+/* UCC GETH 82xx Ethernet Address Container */
+typedef struct enet_addr_container {
+ enet_addr_t address; /* ethernet address */
+ ucc_geth_enet_address_recognition_location_e location; /* location in
+ 82xx address
+ recognition
+ hardware */
+ struct list_head node;
+} enet_addr_container_t;
+
+#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node)
+
+/* UCC GETH Termination Action Descriptor (TAD) structure. */
+typedef struct ucc_geth_tad_params {
+ int rx_non_dynamic_extended_features_mode;
+ int reject_frame;
+ ucc_geth_vlan_operation_tagged_e vtag_op;
+ ucc_geth_vlan_operation_non_tagged_e vnontag_op;
+ ucc_geth_qos_mode_e rqos;
+ u8 vpri;
+ u16 vid;
+} ucc_geth_tad_params_t;
+
+/* GETH protocol initialization structure */
+typedef struct ucc_geth_info {
+ ucc_fast_info_t uf_info;
+ u8 numQueuesTx;
+ u8 numQueuesRx;
+ int ipCheckSumCheck;
+ int ipCheckSumGenerate;
+ int rxExtendedFiltering;
+ u32 extendedFilteringChainPointer;
+ u16 typeorlen;
+ int dynamicMaxFrameLength;
+ int dynamicMinFrameLength;
+ u8 nonBackToBackIfgPart1;
+ u8 nonBackToBackIfgPart2;
+ u8 miminumInterFrameGapEnforcement;
+ u8 backToBackInterFrameGap;
+ int ipAddressAlignment;
+ int lengthCheckRx;
+ u32 mblinterval;
+ u16 nortsrbytetime;
+ u8 fracsiz;
+ u8 strictpriorityq;
+ u8 txasap;
+ u8 extrabw;
+ int miiPreambleSupress;
+ u8 altBebTruncation;
+ int altBeb;
+ int backPressureNoBackoff;
+ int noBackoff;
+ int excessDefer;
+ u8 maxRetransmission;
+ u8 collisionWindow;
+ int pro;
+ int cap;
+ int rsh;
+ int rlpb;
+ int cam;
+ int bro;
+ int ecm;
+ int receiveFlowControl;
+ u8 maxGroupAddrInHash;
+ u8 maxIndAddrInHash;
+ u8 prel;
+ u16 maxFrameLength;
+ u16 minFrameLength;
+ u16 maxD1Length;
+ u16 maxD2Length;
+ u16 vlantype;
+ u16 vlantci;
+ u32 ecamptr;
+ u32 eventRegMask;
+ u16 pausePeriod;
+ u16 extensionField;
+ u8 phy_address;
+ u32 board_flags;
+ u32 phy_interrupt;
+ u8 weightfactor[NUM_TX_QUEUES];
+ u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
+ u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
+ u8 l3qt[UCC_GETH_IP_PRIORITY_MAX];
+ u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX];
+ u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
+ u16 bdRingLenTx[NUM_TX_QUEUES];
+ u16 bdRingLenRx[NUM_RX_QUEUES];
+ enet_interface_e enet_interface;
+ ucc_geth_num_of_station_addresses_e numStationAddresses;
+ qe_fltr_largest_external_tbl_lookup_key_size_e
+ largestexternallookupkeysize;
+ ucc_geth_statistics_gathering_mode_e statisticsMode;
+ ucc_geth_vlan_operation_tagged_e vlanOperationTagged;
+ ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged;
+ ucc_geth_qos_mode_e rxQoSMode;
+ ucc_geth_flow_control_mode_e aufc;
+ ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc;
+ ucc_geth_num_of_threads_e numThreadsTx;
+ ucc_geth_num_of_threads_e numThreadsRx;
+ qe_risc_allocation_e riscTx;
+ qe_risc_allocation_e riscRx;
+} ucc_geth_info_t;
+
+/* structure representing UCC GETH */
+typedef struct ucc_geth_private {
+ ucc_geth_info_t *ug_info;
+ ucc_fast_private_t *uccf;
+ struct net_device *dev;
+ struct net_device_stats stats; /* linux network statistics */
+ ucc_geth_t *ug_regs;
+ ucc_geth_init_pram_t *p_init_enet_param_shadow;
+ ucc_geth_exf_global_pram_t *p_exf_glbl_param;
+ u32 exf_glbl_param_offset;
+ ucc_geth_rx_global_pram_t *p_rx_glbl_pram;
+ u32 rx_glbl_pram_offset;
+ ucc_geth_tx_global_pram_t *p_tx_glbl_pram;
+ u32 tx_glbl_pram_offset;
+ ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg;
+ u32 send_q_mem_reg_offset;
+ ucc_geth_thread_data_tx_t *p_thread_data_tx;
+ u32 thread_dat_tx_offset;
+ ucc_geth_thread_data_rx_t *p_thread_data_rx;
+ u32 thread_dat_rx_offset;
+ ucc_geth_scheduler_t *p_scheduler;
+ u32 scheduler_offset;
+ ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
+ u32 tx_fw_statistics_pram_offset;
+ ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
+ u32 rx_fw_statistics_pram_offset;
+ ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl;
+ u32 rx_irq_coalescing_tbl_offset;
+ ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl;
+ u32 rx_bd_qs_tbl_offset;
+ u8 *p_tx_bd_ring[NUM_TX_QUEUES];
+ u32 tx_bd_ring_offset[NUM_TX_QUEUES];
+ u8 *p_rx_bd_ring[NUM_RX_QUEUES];
+ u32 rx_bd_ring_offset[NUM_RX_QUEUES];
+ u8 *confBd[NUM_TX_QUEUES];
+ u8 *txBd[NUM_TX_QUEUES];
+ u8 *rxBd[NUM_RX_QUEUES];
+ int badFrame[NUM_RX_QUEUES];
+ u16 cpucount[NUM_TX_QUEUES];
+ volatile u16 *p_cpucount[NUM_TX_QUEUES];
+ int indAddrRegUsed[NUM_OF_PADDRS];
+ enet_addr_t paddr[NUM_OF_PADDRS];
+ u8 numGroupAddrInHash;
+ u8 numIndAddrInHash;
+ u8 numIndAddrInReg;
+ int rx_extended_features;
+ int rx_non_dynamic_extended_features;
+ struct list_head conf_skbs;
+ struct list_head group_hash_q;
+ struct list_head ind_hash_q;
+ u32 saved_uccm;
+ spinlock_t lock;
+ /* pointers to arrays of skbuffs for tx and rx */
+ struct sk_buff **tx_skbuff[NUM_TX_QUEUES];
+ struct sk_buff **rx_skbuff[NUM_RX_QUEUES];
+ /* indices pointing to the next free sbk in skb arrays */
+ u16 skb_curtx[NUM_TX_QUEUES];
+ u16 skb_currx[NUM_RX_QUEUES];
+ /* index of the first skb which hasn't been transmitted yet. */
+ u16 skb_dirtytx[NUM_TX_QUEUES];
+
+ struct work_struct tq;
+ struct timer_list phy_info_timer;
+ struct ugeth_mii_info *mii_info;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+} ucc_geth_private_t;
+
+#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c
new file mode 100644
index 0000000..f91028c
--- /dev/null
+++ b/drivers/net/ucc_geth_phy.c
@@ -0,0 +1,801 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * UCC GETH Driver -- PHY handling
+ *
+ * Changelog:
+ * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#include "ucc_geth.h"
+#include "ucc_geth_phy.h"
+#include <platforms/83xx/mpc8360e_pb.h>
+
+#define ugphy_printk(level, format, arg...) \
+ printk(level format "\n", ## arg)
+
+#define ugphy_dbg(format, arg...) \
+ ugphy_printk(KERN_DEBUG, format , ## arg)
+#define ugphy_err(format, arg...) \
+ ugphy_printk(KERN_ERR, format , ## arg)
+#define ugphy_info(format, arg...) \
+ ugphy_printk(KERN_INFO, format , ## arg)
+#define ugphy_warn(format, arg...) \
+ ugphy_printk(KERN_WARNING, format , ## arg)
+
+#ifdef UGETH_VERBOSE_DEBUG
+#define ugphy_vdbg ugphy_dbg
+#else
+#define ugphy_vdbg(fmt, args...) do { } while (0)
+#endif /* UGETH_VERBOSE_DEBUG */
+
+static void config_genmii_advert(struct ugeth_mii_info *mii_info);
+static void genmii_setup_forced(struct ugeth_mii_info *mii_info);
+static void genmii_restart_aneg(struct ugeth_mii_info *mii_info);
+static int gbit_config_aneg(struct ugeth_mii_info *mii_info);
+static int genmii_config_aneg(struct ugeth_mii_info *mii_info);
+static int genmii_update_link(struct ugeth_mii_info *mii_info);
+static int genmii_read_status(struct ugeth_mii_info *mii_info);
+u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum);
+void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val);
+
+static u8 *bcsr_regs = NULL;
+
+/* Write value to the PHY for this device to the register at regnum, */
+/* waiting until the write is done before it returns. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ ucc_mii_mng_t *mii_regs;
+ enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+ u32 tmp_reg;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock_irq(&ugeth->lock);
+
+ mii_regs = ugeth->mii_info->mii_regs;
+
+ /* Set this UCC to be the master of the MII managment */
+ ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
+
+ /* Stop the MII management read cycle */
+ out_be32(&mii_regs->miimcom, 0);
+ /* Setting up the MII Mangement Address Register */
+ tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
+ out_be32(&mii_regs->miimadd, tmp_reg);
+
+ /* Setting up the MII Mangement Control Register with the value */
+ out_be32(&mii_regs->miimcon, (u32) value);
+
+ /* Wait till MII management write is complete */
+ while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY)
+ cpu_relax();
+
+ spin_unlock_irq(&ugeth->lock);
+
+ udelay(10000);
+}
+
+/* Reads from register regnum in the PHY for device dev, */
+/* returning the value. Clears miimcom first. All PHY */
+/* configuration has to be done through the TSEC1 MIIM regs */
+int read_phy_reg(struct net_device *dev, int mii_id, int regnum)
+{
+ ucc_geth_private_t *ugeth = netdev_priv(dev);
+ ucc_mii_mng_t *mii_regs;
+ enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
+ u32 tmp_reg;
+ u16 value;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock_irq(&ugeth->lock);
+
+ mii_regs = ugeth->mii_info->mii_regs;
+
+ /* Setting up the MII Mangement Address Register */
+ tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
+ out_be32(&mii_regs->miimadd, tmp_reg);
+
+ /* Perform an MII management read cycle */
+ out_be32(&mii_regs->miimcom, MIIMCOM_READ_CYCLE);
+
+ /* Wait till MII management write is complete */
+ while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY)
+ cpu_relax();
+
+ udelay(10000);
+
+ /* Read MII management status */
+ value = (u16) in_be32(&mii_regs->miimstat);
+ out_be32(&mii_regs->miimcom, 0);
+ if (value == 0xffff)
+ ugphy_warn("read wrong value : mii_id %d,mii_reg %d, base %08x",
+ mii_id, mii_reg, (u32) & (mii_regs->miimcfg));
+
+ spin_unlock_irq(&ugeth->lock);
+
+ return (value);
+}
+
+void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->phyinfo->ack_interrupt)
+ mii_info->phyinfo->ack_interrupt(mii_info);
+}
+
+void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info,
+ u32 interrupts)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ mii_info->interrupts = interrupts;
+ if (mii_info->phyinfo->config_intr)
+ mii_info->phyinfo->config_intr(mii_info);
+}
+
+/* Writes MII_ADVERTISE with the appropriate values, after
+ * sanitizing advertise to make sure only supported features
+ * are advertised
+ */
+static void config_genmii_advert(struct ugeth_mii_info *mii_info)
+{
+ u32 advertise;
+ u16 adv;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Only allow advertising what this PHY supports */
+ mii_info->advertising &= mii_info->phyinfo->features;
+ advertise = mii_info->advertising;
+
+ /* Setup standard advertisement */
+ adv = phy_read(mii_info, MII_ADVERTISE);
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ if (advertise & ADVERTISED_10baseT_Half)
+ adv |= ADVERTISE_10HALF;
+ if (advertise & ADVERTISED_10baseT_Full)
+ adv |= ADVERTISE_10FULL;
+ if (advertise & ADVERTISED_100baseT_Half)
+ adv |= ADVERTISE_100HALF;
+ if (advertise & ADVERTISED_100baseT_Full)
+ adv |= ADVERTISE_100FULL;
+ phy_write(mii_info, MII_ADVERTISE, adv);
+}
+
+static void genmii_setup_forced(struct ugeth_mii_info *mii_info)
+{
+ u16 ctrl;
+ u32 features = mii_info->phyinfo->features;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ ctrl = phy_read(mii_info, MII_BMCR);
+
+ ctrl &=
+ ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
+ ctrl |= BMCR_RESET;
+
+ switch (mii_info->speed) {
+ case SPEED_1000:
+ if (features & (SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full)) {
+ ctrl |= BMCR_SPEED1000;
+ break;
+ }
+ mii_info->speed = SPEED_100;
+ case SPEED_100:
+ if (features & (SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full)) {
+ ctrl |= BMCR_SPEED100;
+ break;
+ }
+ mii_info->speed = SPEED_10;
+ case SPEED_10:
+ if (features & (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full))
+ break;
+ default: /* Unsupported speed! */
+ ugphy_err("%s: Bad speed!", mii_info->dev->name);
+ break;
+ }
+
+ phy_write(mii_info, MII_BMCR, ctrl);
+}
+
+/* Enable and Restart Autonegotiation */
+static void genmii_restart_aneg(struct ugeth_mii_info *mii_info)
+{
+ u16 ctl;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ ctl = phy_read(mii_info, MII_BMCR);
+ ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(mii_info, MII_BMCR, ctl);
+}
+
+static int gbit_config_aneg(struct ugeth_mii_info *mii_info)
+{
+ u16 adv;
+ u32 advertise;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->autoneg) {
+ /* Configure the ADVERTISE register */
+ config_genmii_advert(mii_info);
+ advertise = mii_info->advertising;
+
+ adv = phy_read(mii_info, MII_1000BASETCONTROL);
+ adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
+ MII_1000BASETCONTROL_HALFDUPLEXCAP);
+ if (advertise & SUPPORTED_1000baseT_Half)
+ adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
+ if (advertise & SUPPORTED_1000baseT_Full)
+ adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
+ phy_write(mii_info, MII_1000BASETCONTROL, adv);
+
+ /* Start/Restart aneg */
+ genmii_restart_aneg(mii_info);
+ } else
+ genmii_setup_forced(mii_info);
+
+ return 0;
+}
+
+static int genmii_config_aneg(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->autoneg) {
+ config_genmii_advert(mii_info);
+ genmii_restart_aneg(mii_info);
+ } else
+ genmii_setup_forced(mii_info);
+
+ return 0;
+}
+
+static int genmii_update_link(struct ugeth_mii_info *mii_info)
+{
+ u16 status;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Do a fake read */
+ phy_read(mii_info, MII_BMSR);
+
+ /* Read link and autonegotiation status */
+ status = phy_read(mii_info, MII_BMSR);
+ if ((status & BMSR_LSTATUS) == 0)
+ mii_info->link = 0;
+ else
+ mii_info->link = 1;
+
+ /* If we are autonegotiating, and not done,
+ * return an error */
+ if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int genmii_read_status(struct ugeth_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ if (mii_info->autoneg) {
+ status = phy_read(mii_info, MII_LPA);
+
+ if (status & (LPA_10FULL | LPA_100FULL))
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+ if (status & (LPA_100FULL | LPA_100HALF))
+ mii_info->speed = SPEED_100;
+ else
+ mii_info->speed = SPEED_10;
+ mii_info->pause = 0;
+ }
+ /* On non-aneg, we assume what we put in BMCR is the speed,
+ * though magic-aneg shouldn't prevent this case from occurring
+ */
+
+ return 0;
+}
+
+static int marvell_init(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ phy_write(mii_info, 0x14, 0x0cd2);
+ phy_write(mii_info, MII_BMCR,
+ phy_read(mii_info, MII_BMCR) | BMCR_RESET);
+ msleep(4000);
+
+ return 0;
+}
+
+static int marvell_config_aneg(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* The Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation */
+ phy_write(mii_info, MII_BMCR, BMCR_RESET);
+
+ phy_write(mii_info, 0x1d, 0x1f);
+ phy_write(mii_info, 0x1e, 0x200c);
+ phy_write(mii_info, 0x1d, 0x5);
+ phy_write(mii_info, 0x1e, 0);
+ phy_write(mii_info, 0x1e, 0x100);
+
+ gbit_config_aneg(mii_info);
+
+ return 0;
+}
+
+static int marvell_read_status(struct ugeth_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ /* If the link is up, read the speed and duplex */
+ /* If we aren't autonegotiating, assume speeds
+ * are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ int speed;
+ status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS);
+
+ /* Get the duplexity */
+ if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+
+ /* Get the speed */
+ speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
+ switch (speed) {
+ case MII_M1011_PHY_SPEC_STATUS_1000:
+ mii_info->speed = SPEED_1000;
+ break;
+ case MII_M1011_PHY_SPEC_STATUS_100:
+ mii_info->speed = SPEED_100;
+ break;
+ default:
+ mii_info->speed = SPEED_10;
+ break;
+ }
+ mii_info->pause = 0;
+ }
+
+ return 0;
+}
+
+static int marvell_ack_interrupt(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Clear the interrupts by reading the reg */
+ phy_read(mii_info, MII_M1011_IEVENT);
+
+ return 0;
+}
+
+static int marvell_config_intr(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+ phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
+ else
+ phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
+
+ return 0;
+}
+
+static int cis820x_init(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ phy_write(mii_info, MII_CIS8201_AUX_CONSTAT,
+ MII_CIS8201_AUXCONSTAT_INIT);
+ phy_write(mii_info, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT);
+
+ return 0;
+}
+
+static int cis820x_read_status(struct ugeth_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ /* If the link is up, read the speed and duplex */
+ /* If we aren't autonegotiating, assume speeds
+ * are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ int speed;
+
+ status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT);
+ if (status & MII_CIS8201_AUXCONSTAT_DUPLEX)
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+
+ speed = status & MII_CIS8201_AUXCONSTAT_SPEED;
+
+ switch (speed) {
+ case MII_CIS8201_AUXCONSTAT_GBIT:
+ mii_info->speed = SPEED_1000;
+ break;
+ case MII_CIS8201_AUXCONSTAT_100:
+ mii_info->speed = SPEED_100;
+ break;
+ default:
+ mii_info->speed = SPEED_10;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int cis820x_ack_interrupt(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ phy_read(mii_info, MII_CIS8201_ISTAT);
+
+ return 0;
+}
+
+static int cis820x_config_intr(struct ugeth_mii_info *mii_info)
+{
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+ phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK);
+ else
+ phy_write(mii_info, MII_CIS8201_IMASK, 0);
+
+ return 0;
+}
+
+#define DM9161_DELAY 10
+
+static int dm9161_read_status(struct ugeth_mii_info *mii_info)
+{
+ u16 status;
+ int err;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Update the link, but return if there
+ * was an error */
+ err = genmii_update_link(mii_info);
+ if (err)
+ return err;
+
+ /* If the link is up, read the speed and duplex */
+ /* If we aren't autonegotiating, assume speeds
+ * are as set */
+ if (mii_info->autoneg && mii_info->link) {
+ status = phy_read(mii_info, MII_DM9161_SCSR);
+ if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
+ mii_info->speed = SPEED_100;
+ else
+ mii_info->speed = SPEED_10;
+
+ if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
+ mii_info->duplex = DUPLEX_FULL;
+ else
+ mii_info->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int dm9161_config_aneg(struct ugeth_mii_info *mii_info)
+{
+ struct dm9161_private *priv = mii_info->priv;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (0 == priv->resetdone)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void dm9161_timer(unsigned long data)
+{
+ struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
+ struct dm9161_private *priv = mii_info->priv;
+ u16 status = phy_read(mii_info, MII_BMSR);
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (status & BMSR_ANEGCOMPLETE) {
+ priv->resetdone = 1;
+ } else
+ mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
+}
+
+static int dm9161_init(struct ugeth_mii_info *mii_info)
+{
+ struct dm9161_private *priv;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Allocate the private data structure */
+ priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL);
+
+ if (NULL == priv)
+ return -ENOMEM;
+
+ mii_info->priv = priv;
+
+ /* Reset is not done yet */
+ priv->resetdone = 0;
+
+ phy_write(mii_info, MII_BMCR,
+ phy_read(mii_info, MII_BMCR) | BMCR_RESET);
+
+ phy_write(mii_info, MII_BMCR,
+ phy_read(mii_info, MII_BMCR) & ~BMCR_ISOLATE);
+
+ config_genmii_advert(mii_info);
+ /* Start/Restart aneg */
+ genmii_config_aneg(mii_info);
+
+ /* Start a timer for DM9161_DELAY seconds to wait
+ * for the PHY to be ready */
+ init_timer(&priv->timer);
+ priv->timer.function = &dm9161_timer;
+ priv->timer.data = (unsigned long)mii_info;
+ mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
+
+ return 0;
+}
+
+static void dm9161_close(struct ugeth_mii_info *mii_info)
+{
+ struct dm9161_private *priv = mii_info->priv;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ del_timer_sync(&priv->timer);
+ kfree(priv);
+}
+
+static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info)
+{
+/* FIXME: This lines are for BUG fixing in the mpc8325.
+Remove this from here when it's fixed */
+ if (bcsr_regs == NULL)
+ bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
+ bcsr_regs[14] |= 0x40;
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Clear the interrupts by reading the reg */
+ phy_read(mii_info, MII_DM9161_INTR);
+
+
+ return 0;
+}
+
+static int dm9161_config_intr(struct ugeth_mii_info *mii_info)
+{
+/* FIXME: This lines are for BUG fixing in the mpc8325.
+Remove this from here when it's fixed */
+ if (bcsr_regs == NULL) {
+ bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
+ bcsr_regs[14] &= ~0x40;
+ }
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
+ phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT);
+ else
+ phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP);
+
+ return 0;
+}
+
+/* Cicada 820x */
+static struct phy_info phy_info_cis820x = {
+ .phy_id = 0x000fc440,
+ .name = "Cicada Cis8204",
+ .phy_id_mask = 0x000fffc0,
+ .features = MII_GBIT_FEATURES,
+ .init = &cis820x_init,
+ .config_aneg = &gbit_config_aneg,
+ .read_status = &cis820x_read_status,
+ .ack_interrupt = &cis820x_ack_interrupt,
+ .config_intr = &cis820x_config_intr,
+};
+
+static struct phy_info phy_info_dm9161 = {
+ .phy_id = 0x0181b880,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "Davicom DM9161E",
+ .init = dm9161_init,
+ .config_aneg = dm9161_config_aneg,
+ .read_status = dm9161_read_status,
+ .close = dm9161_close,
+};
+
+static struct phy_info phy_info_dm9161a = {
+ .phy_id = 0x0181b8a0,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "Davicom DM9161A",
+ .features = MII_BASIC_FEATURES,
+ .init = dm9161_init,
+ .config_aneg = dm9161_config_aneg,
+ .read_status = dm9161_read_status,
+ .ack_interrupt = dm9161_ack_interrupt,
+ .config_intr = dm9161_config_intr,
+ .close = dm9161_close,
+};
+
+static struct phy_info phy_info_marvell = {
+ .phy_id = 0x01410c00,
+ .phy_id_mask = 0xffffff00,
+ .name = "Marvell 88E11x1",
+ .features = MII_GBIT_FEATURES,
+ .init = &marvell_init,
+ .config_aneg = &marvell_config_aneg,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+};
+
+static struct phy_info phy_info_genmii = {
+ .phy_id = 0x00000000,
+ .phy_id_mask = 0x00000000,
+ .name = "Generic MII",
+ .features = MII_BASIC_FEATURES,
+ .config_aneg = genmii_config_aneg,
+ .read_status = genmii_read_status,
+};
+
+static struct phy_info *phy_info[] = {
+ &phy_info_cis820x,
+ &phy_info_marvell,
+ &phy_info_dm9161,
+ &phy_info_dm9161a,
+ &phy_info_genmii,
+ NULL
+};
+
+u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum)
+{
+ u16 retval;
+ unsigned long flags;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock_irqsave(&mii_info->mdio_lock, flags);
+ retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum);
+ spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
+
+ return retval;
+}
+
+void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val)
+{
+ unsigned long flags;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ spin_lock_irqsave(&mii_info->mdio_lock, flags);
+ mii_info->mdio_write(mii_info->dev, mii_info->mii_id, regnum, val);
+ spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
+}
+
+/* Use the PHY ID registers to determine what type of PHY is attached
+ * to device dev. return a struct phy_info structure describing that PHY
+ */
+struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info)
+{
+ u16 phy_reg;
+ u32 phy_ID;
+ int i;
+ struct phy_info *theInfo = NULL;
+ struct net_device *dev = mii_info->dev;
+
+ ugphy_vdbg("%s: IN", __FUNCTION__);
+
+ /* Grab the bits from PHYIR1, and put them in the upper half */
+ phy_reg = phy_read(mii_info, MII_PHYSID1);
+ phy_ID = (phy_reg & 0xffff) << 16;
+
+ /* Grab the bits from PHYIR2, and put them in the lower half */
+ phy_reg = phy_read(mii_info, MII_PHYSID2);
+ phy_ID |= (phy_reg & 0xffff);
+
+ /* loop through all the known PHY types, and find one that */
+ /* matches the ID we read from the PHY. */
+ for (i = 0; phy_info[i]; i++)
+ if (phy_info[i]->phy_id == (phy_ID & phy_info[i]->phy_id_mask)){
+ theInfo = phy_info[i];
+ break;
+ }
+
+ /* This shouldn't happen, as we have generic PHY support */
+ if (theInfo == NULL) {
+ ugphy_info("%s: PHY id %x is not supported!", dev->name,
+ phy_ID);
+ return NULL;
+ } else {
+ ugphy_info("%s: PHY is %s (%x)", dev->name, theInfo->name,
+ phy_ID);
+ }
+
+ return theInfo;
+}
diff --git a/drivers/net/ucc_geth_phy.h b/drivers/net/ucc_geth_phy.h
new file mode 100644
index 0000000..2f98b8f
--- /dev/null
+++ b/drivers/net/ucc_geth_phy.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Author: Shlomi Gridish <gridish@freescale.com>
+ *
+ * Description:
+ * UCC GETH Driver -- PHY handling
+ *
+ * Changelog:
+ * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
+ * - Rearrange code and style fixes
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __UCC_GETH_PHY_H__
+#define __UCC_GETH_PHY_H__
+
+#define MII_end ((u32)-2)
+#define MII_read ((u32)-1)
+
+#define MIIMIND_BUSY 0x00000001
+#define MIIMIND_NOTVALID 0x00000004
+
+#define UGETH_AN_TIMEOUT 2000
+
+/* 1000BT control (Marvell & BCM54xx at least) */
+#define MII_1000BASETCONTROL 0x09
+#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
+#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
+
+/* Cicada Extended Control Register 1 */
+#define MII_CIS8201_EXT_CON1 0x17
+#define MII_CIS8201_EXTCON1_INIT 0x0000
+
+/* Cicada Interrupt Mask Register */
+#define MII_CIS8201_IMASK 0x19
+#define MII_CIS8201_IMASK_IEN 0x8000
+#define MII_CIS8201_IMASK_SPEED 0x4000
+#define MII_CIS8201_IMASK_LINK 0x2000
+#define MII_CIS8201_IMASK_DUPLEX 0x1000
+#define MII_CIS8201_IMASK_MASK 0xf000
+
+/* Cicada Interrupt Status Register */
+#define MII_CIS8201_ISTAT 0x1a
+#define MII_CIS8201_ISTAT_STATUS 0x8000
+#define MII_CIS8201_ISTAT_SPEED 0x4000
+#define MII_CIS8201_ISTAT_LINK 0x2000
+#define MII_CIS8201_ISTAT_DUPLEX 0x1000
+
+/* Cicada Auxiliary Control/Status Register */
+#define MII_CIS8201_AUX_CONSTAT 0x1c
+#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
+#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
+#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
+#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
+#define MII_CIS8201_AUXCONSTAT_100 0x0008
+
+/* 88E1011 PHY Status Register */
+#define MII_M1011_PHY_SPEC_STATUS 0x11
+#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
+#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
+#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
+#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
+#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
+#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400
+
+#define MII_M1011_IEVENT 0x13
+#define MII_M1011_IEVENT_CLEAR 0x0000
+
+#define MII_M1011_IMASK 0x12
+#define MII_M1011_IMASK_INIT 0x6400
+#define MII_M1011_IMASK_CLEAR 0x0000
+
+#define MII_DM9161_SCR 0x10
+#define MII_DM9161_SCR_INIT 0x0610
+
+/* DM9161 Specified Configuration and Status Register */
+#define MII_DM9161_SCSR 0x11
+#define MII_DM9161_SCSR_100F 0x8000
+#define MII_DM9161_SCSR_100H 0x4000
+#define MII_DM9161_SCSR_10F 0x2000
+#define MII_DM9161_SCSR_10H 0x1000
+
+/* DM9161 Interrupt Register */
+#define MII_DM9161_INTR 0x15
+#define MII_DM9161_INTR_PEND 0x8000
+#define MII_DM9161_INTR_DPLX_MASK 0x0800
+#define MII_DM9161_INTR_SPD_MASK 0x0400
+#define MII_DM9161_INTR_LINK_MASK 0x0200
+#define MII_DM9161_INTR_MASK 0x0100
+#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
+#define MII_DM9161_INTR_SPD_CHANGE 0x0008
+#define MII_DM9161_INTR_LINK_CHANGE 0x0004
+#define MII_DM9161_INTR_INIT 0x0000
+#define MII_DM9161_INTR_STOP \
+(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
+ | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+
+/* DM9161 10BT Configuration/Status */
+#define MII_DM9161_10BTCSR 0x12
+#define MII_DM9161_10BTCSR_INIT 0x7800
+
+#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
+ SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | \
+ SUPPORTED_100baseT_Full | \
+ SUPPORTED_Autoneg | \
+ SUPPORTED_TP | \
+ SUPPORTED_MII)
+
+#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
+ SUPPORTED_1000baseT_Half | \
+ SUPPORTED_1000baseT_Full)
+
+#define MII_READ_COMMAND 0x00000001
+
+#define MII_INTERRUPT_DISABLED 0x0
+#define MII_INTERRUPT_ENABLED 0x1
+/* Taken from mii_if_info and sungem_phy.h */
+struct ugeth_mii_info {
+ /* Information about the PHY type */
+ /* And management functions */
+ struct phy_info *phyinfo;
+
+ ucc_mii_mng_t *mii_regs;
+
+ /* forced speed & duplex (no autoneg)
+ * partner speed & duplex & pause (autoneg)
+ */
+ int speed;
+ int duplex;
+ int pause;
+
+ /* The most recently read link state */
+ int link;
+
+ /* Enabled Interrupts */
+ u32 interrupts;
+
+ u32 advertising;
+ int autoneg;
+ int mii_id;
+
+ /* private data pointer */
+ /* For use by PHYs to maintain extra state */
+ void *priv;
+
+ /* Provided by host chip */
+ struct net_device *dev;
+
+ /* A lock to ensure that only one thing can read/write
+ * the MDIO bus at a time */
+ spinlock_t mdio_lock;
+
+ /* Provided by ethernet driver */
+ int (*mdio_read) (struct net_device * dev, int mii_id, int reg);
+ void (*mdio_write) (struct net_device * dev, int mii_id, int reg,
+ int val);
+};
+
+/* struct phy_info: a structure which defines attributes for a PHY
+ *
+ * id will contain a number which represents the PHY. During
+ * startup, the driver will poll the PHY to find out what its
+ * UID--as defined by registers 2 and 3--is. The 32-bit result
+ * gotten from the PHY will be ANDed with phy_id_mask to
+ * discard any bits which may change based on revision numbers
+ * unimportant to functionality
+ *
+ * There are 6 commands which take a ugeth_mii_info structure.
+ * Each PHY must declare config_aneg, and read_status.
+ */
+struct phy_info {
+ u32 phy_id;
+ char *name;
+ unsigned int phy_id_mask;
+ u32 features;
+
+ /* Called to initialize the PHY */
+ int (*init) (struct ugeth_mii_info * mii_info);
+
+ /* Called to suspend the PHY for power */
+ int (*suspend) (struct ugeth_mii_info * mii_info);
+
+ /* Reconfigures autonegotiation (or disables it) */
+ int (*config_aneg) (struct ugeth_mii_info * mii_info);
+
+ /* Determines the negotiated speed and duplex */
+ int (*read_status) (struct ugeth_mii_info * mii_info);
+
+ /* Clears any pending interrupts */
+ int (*ack_interrupt) (struct ugeth_mii_info * mii_info);
+
+ /* Enables or disables interrupts */
+ int (*config_intr) (struct ugeth_mii_info * mii_info);
+
+ /* Clears up any memory if needed */
+ void (*close) (struct ugeth_mii_info * mii_info);
+};
+
+struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info);
+void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value);
+int read_phy_reg(struct net_device *dev, int mii_id, int regnum);
+void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info);
+void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info,
+ u32 interrupts);
+
+struct dm9161_private {
+ struct timer_list timer;
+ int resetdone;
+};
+
+#endif /* __UCC_GETH_PHY_H__ */
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index d3d0ec9..ae97108 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
*/
#define DRV_NAME "via-rhine"
-#define DRV_VERSION "1.4.0"
-#define DRV_RELDATE "June-27-2006"
+#define DRV_VERSION "1.4.1"
+#define DRV_RELDATE "July-24-2006"
/* A few user-configurable values.
@@ -44,6 +44,10 @@ static int max_interrupt_work = 20;
Setting to > 1518 effectively disables this feature. */
static int rx_copybreak;
+/* Work-around for broken BIOSes: they are unable to get the chip back out of
+ power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
+static int avoid_D3;
+
/*
* In case you are looking for 'options[]' or 'full_duplex[]', they
* are gone. Use ethtool(8) instead.
@@ -63,7 +67,11 @@ static const int multicast_filter_limit = 32;
There are no ill effects from too-large receive rings. */
#define TX_RING_SIZE 16
#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
+#ifdef CONFIG_VIA_RHINE_NAPI
+#define RX_RING_SIZE 64
+#else
#define RX_RING_SIZE 16
+#endif
/* Operational parameters that usually are not changed. */
@@ -116,9 +124,11 @@ MODULE_LICENSE("GPL");
module_param(max_interrupt_work, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
+module_param(avoid_D3, bool, 0);
MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
+MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
/*
Theory of Operation
@@ -396,7 +406,7 @@ static void rhine_tx_timeout(struct net_device *dev);
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
static void rhine_tx(struct net_device *dev);
-static void rhine_rx(struct net_device *dev);
+static int rhine_rx(struct net_device *dev, int limit);
static void rhine_error(struct net_device *dev, int intr_status);
static void rhine_set_rx_mode(struct net_device *dev);
static struct net_device_stats *rhine_get_stats(struct net_device *dev);
@@ -564,6 +574,32 @@ static void rhine_poll(struct net_device *dev)
}
#endif
+#ifdef CONFIG_VIA_RHINE_NAPI
+static int rhine_napipoll(struct net_device *dev, int *budget)
+{
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
+ int done, limit = min(dev->quota, *budget);
+
+ done = rhine_rx(dev, limit);
+ *budget -= done;
+ dev->quota -= done;
+
+ if (done < limit) {
+ netif_rx_complete(dev);
+
+ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
+ IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
+ IntrTxDone | IntrTxError | IntrTxUnderrun |
+ IntrPCIErr | IntrStatsMax | IntrLinkChange,
+ ioaddr + IntrEnable);
+ return 0;
+ }
+ else
+ return 1;
+}
+#endif
+
static void rhine_hw_init(struct net_device *dev, long pioaddr)
{
struct rhine_private *rp = netdev_priv(dev);
@@ -744,6 +780,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = rhine_poll;
#endif
+#ifdef CONFIG_VIA_RHINE_NAPI
+ dev->poll = rhine_napipoll;
+ dev->weight = 64;
+#endif
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
@@ -789,6 +829,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
}
}
rp->mii_if.phy_id = phy_id;
+ if (debug > 1 && avoid_D3)
+ printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
+ dev->name);
return 0;
@@ -1014,6 +1057,8 @@ static void init_registers(struct net_device *dev)
rhine_set_rx_mode(dev);
+ netif_poll_enable(dev);
+
/* Enable interrupts by setting the interrupt mask. */
iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
@@ -1268,8 +1313,18 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
dev->name, intr_status);
if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
- IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
- rhine_rx(dev);
+ IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
+#ifdef CONFIG_VIA_RHINE_NAPI
+ iowrite16(IntrTxAborted |
+ IntrTxDone | IntrTxError | IntrTxUnderrun |
+ IntrPCIErr | IntrStatsMax | IntrLinkChange,
+ ioaddr + IntrEnable);
+
+ netif_rx_schedule(dev);
+#else
+ rhine_rx(dev, RX_RING_SIZE);
+#endif
+ }
if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
if (intr_status & IntrTxErrSummary) {
@@ -1367,13 +1422,12 @@ static void rhine_tx(struct net_device *dev)
spin_unlock(&rp->lock);
}
-/* This routine is logically part of the interrupt handler, but isolated
- for clarity and better register allocation. */
-static void rhine_rx(struct net_device *dev)
+/* Process up to limit frames from receive ring */
+static int rhine_rx(struct net_device *dev, int limit)
{
struct rhine_private *rp = netdev_priv(dev);
+ int count;
int entry = rp->cur_rx % RX_RING_SIZE;
- int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
if (debug > 4) {
printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
@@ -1382,16 +1436,18 @@ static void rhine_rx(struct net_device *dev)
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
- while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
+ for (count = 0; count < limit; ++count) {
struct rx_desc *desc = rp->rx_head_desc;
u32 desc_status = le32_to_cpu(desc->rx_status);
int data_size = desc_status >> 16;
+ if (desc_status & DescOwn)
+ break;
+
if (debug > 4)
printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
desc_status);
- if (--boguscnt < 0)
- break;
+
if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
if ((desc_status & RxWholePkt) != RxWholePkt) {
printk(KERN_WARNING "%s: Oversized Ethernet "
@@ -1460,7 +1516,11 @@ static void rhine_rx(struct net_device *dev)
PCI_DMA_FROMDEVICE);
}
skb->protocol = eth_type_trans(skb, dev);
+#ifdef CONFIG_VIA_RHINE_NAPI
+ netif_receive_skb(skb);
+#else
netif_rx(skb);
+#endif
dev->last_rx = jiffies;
rp->stats.rx_bytes += pkt_len;
rp->stats.rx_packets++;
@@ -1487,6 +1547,8 @@ static void rhine_rx(struct net_device *dev)
}
rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
}
+
+ return count;
}
/*
@@ -1776,6 +1838,7 @@ static int rhine_close(struct net_device *dev)
spin_lock_irq(&rp->lock);
netif_stop_queue(dev);
+ netif_poll_disable(dev);
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "
@@ -1857,7 +1920,8 @@ static void rhine_shutdown (struct pci_dev *pdev)
}
/* Hit power state D3 (sleep) */
- iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
+ if (!avoid_D3)
+ iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
/* TODO: Check use of pci_enable_wake() */
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 435e91e..6b63b35 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -118,7 +118,7 @@ static inline void openwin(card_t *card, u8 page)
static inline void set_carrier(port_t *port)
{
- if (!sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD)
+ if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
netif_carrier_on(port_to_dev(port));
else
netif_carrier_off(port_to_dev(port));
@@ -127,10 +127,10 @@ static inline void set_carrier(port_t *port)
static void sca_msci_intr(port_t *port)
{
- u8 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI ST1 status */
+ u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */
- /* Reset MSCI TX underrun status bit */
- sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, port);
+ /* Reset MSCI TX underrun and CDCD (ignored) status bit */
+ sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);
if (stat & ST1_UDRN) {
struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
@@ -138,6 +138,7 @@ static void sca_msci_intr(port_t *port)
stats->tx_fifo_errors++;
}
+ stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
/* Reset MSCI CDCD status bit - uses ch#2 DCD input */
sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index 7caa8dc..b1ba187 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -500,8 +500,8 @@ MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
-int
-init_module(void)
+
+int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index dafaa5f..d500012 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1042,6 +1042,9 @@ static int prism2_reset_port(struct net_device *dev)
dev->name, local->fragm_threshold);
}
+ /* Some firmwares lose antenna selection settings on reset */
+ (void) hostap_set_antsel(local);
+
return res;
}
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 7f78b78..bcc7038 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -242,7 +242,7 @@ spectrum_reset(struct pcmcia_device *link, int idle)
u_int save_cor;
/* Doing it if hardware is gone is guaranteed crash */
- if (pcmcia_dev_present(link))
+ if (!pcmcia_dev_present(link))
return -ENODEV;
/* Save original COR value */
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 3c148ea..8a60f39 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -76,7 +76,7 @@ config HOTPLUG_PCI_IBM
config HOTPLUG_PCI_ACPI
tristate "ACPI PCI Hotplug driver"
- depends on ACPI_DOCK && HOTPLUG_PCI
+ depends on (!ACPI_DOCK && ACPI && HOTPLUG_PCI) || (ACPI_DOCK && HOTPLUG_PCI)
help
Say Y here if you have a system that supports PCI Hotplug using
ACPI.
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 02be74c..4afcaff 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -254,8 +254,8 @@ int cpci_led_off(struct slot* slot)
int cpci_configure_slot(struct slot* slot)
{
- unsigned char busnr;
- struct pci_bus *child;
+ struct pci_bus *parent;
+ int fn;
dbg("%s - enter", __FUNCTION__);
@@ -276,23 +276,53 @@ int cpci_configure_slot(struct slot* slot)
*/
n = pci_scan_slot(slot->bus, slot->devfn);
dbg("%s: pci_scan_slot returned %d", __FUNCTION__, n);
- if (n > 0)
- pci_bus_add_devices(slot->bus);
slot->dev = pci_get_slot(slot->bus, slot->devfn);
if (slot->dev == NULL) {
err("Could not find PCI device for slot %02x", slot->number);
- return 1;
+ return -ENODEV;
}
}
-
- if (slot->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(slot->dev, PCI_SECONDARY_BUS, &busnr);
- child = pci_add_new_bus(slot->dev->bus, slot->dev, busnr);
- pci_do_scan_bus(child);
- pci_bus_size_bridges(child);
+ parent = slot->dev->bus;
+
+ for (fn = 0; fn < 8; fn++) {
+ struct pci_dev *dev;
+
+ dev = pci_get_slot(parent, PCI_DEVFN(PCI_SLOT(slot->devfn), fn));
+ if (!dev)
+ continue;
+ if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
+ (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
+ /* Find an unused bus number for the new bridge */
+ struct pci_bus *child;
+ unsigned char busnr, start = parent->secondary;
+ unsigned char end = parent->subordinate;
+
+ for (busnr = start; busnr <= end; busnr++) {
+ if (!pci_find_bus(pci_domain_nr(parent),
+ busnr))
+ break;
+ }
+ if (busnr >= end) {
+ err("No free bus for hot-added bridge\n");
+ pci_dev_put(dev);
+ continue;
+ }
+ child = pci_add_new_bus(parent, dev, busnr);
+ if (!child) {
+ err("Cannot add new bus for %s\n",
+ pci_name(dev));
+ pci_dev_put(dev);
+ continue;
+ }
+ child->subordinate = pci_do_scan_bus(child);
+ pci_bus_size_bridges(child);
+ }
+ pci_dev_put(dev);
}
- pci_bus_assign_resources(slot->dev->bus);
+ pci_bus_assign_resources(parent);
+ pci_bus_add_devices(parent);
+ pci_enable_bridges(parent);
dbg("%s - exit", __FUNCTION__);
return 0;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 10e1a90..474e9cd 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -139,9 +139,8 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
/**
* pci_match_device - Tell if a PCI device structure has a matching
* PCI device id structure
- * @ids: array of PCI device id structures to search in
- * @dev: the PCI device structure to match against
* @drv: the PCI driver to match against
+ * @dev: the PCI device structure to match against
*
* Used by a driver to check whether a PCI device present in the
* system is in its list of supported devices. Returns the matching
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2d967b4..b497038 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -438,6 +438,7 @@ static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
pci_read_config_dword(dev, 0x48, &region);
quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi );
/*
@@ -1091,7 +1092,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc );
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc );
static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
{
@@ -1518,6 +1518,63 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
+static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
+{
+ u16 command;
+ u32 bar;
+ u8 __iomem *csr;
+ u8 cmd_hi;
+
+ switch (dev->device) {
+ /* PCI IDs taken from drivers/net/e100.c */
+ case 0x1029:
+ case 0x1030 ... 0x1034:
+ case 0x1038 ... 0x103E:
+ case 0x1050 ... 0x1057:
+ case 0x1059:
+ case 0x1064 ... 0x106B:
+ case 0x1091 ... 0x1095:
+ case 0x1209:
+ case 0x1229:
+ case 0x2449:
+ case 0x2459:
+ case 0x245D:
+ case 0x27DC:
+ break;
+ default:
+ return;
+ }
+
+ /*
+ * Some firmware hands off the e100 with interrupts enabled,
+ * which can cause a flood of interrupts if packets are
+ * received before the driver attaches to the device. So
+ * disable all e100 interrupts here. The driver will
+ * re-enable them when it's ready.
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar);
+
+ if (!(command & PCI_COMMAND_MEMORY) || !bar)
+ return;
+
+ csr = ioremap(bar, 8);
+ if (!csr) {
+ printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
+ pci_name(dev));
+ return;
+ }
+
+ cmd_hi = readb(csr + 3);
+ if (cmd_hi == 0) {
+ printk(KERN_WARNING "PCI: Firmware left %s e100 interrupts "
+ "enabled, disabling\n", pci_name(dev));
+ writeb(1, csr + 3);
+ }
+
+ iounmap(csr);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
{
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index d6d1bff..2c7de79 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -69,12 +69,12 @@ static void s3c_rtc_setaie(int to)
pr_debug("%s: aie=%d\n", __FUNCTION__, to);
- tmp = readb(S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
+ tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
if (to)
tmp |= S3C2410_RTCALM_ALMEN;
- writeb(tmp, S3C2410_RTCALM);
+ writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
}
static void s3c_rtc_setpie(int to)
@@ -84,12 +84,12 @@ static void s3c_rtc_setpie(int to)
pr_debug("%s: pie=%d\n", __FUNCTION__, to);
spin_lock_irq(&s3c_rtc_pie_lock);
- tmp = readb(S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
+ tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
if (to)
tmp |= S3C2410_TICNT_ENABLE;
- writeb(tmp, S3C2410_TICNT);
+ writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
spin_unlock_irq(&s3c_rtc_pie_lock);
}
@@ -98,13 +98,13 @@ static void s3c_rtc_setfreq(int freq)
unsigned int tmp;
spin_lock_irq(&s3c_rtc_pie_lock);
- tmp = readb(S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
+ tmp = readb(s3c_rtc_base + S3C2410_TICNT) & S3C2410_TICNT_ENABLE;
s3c_rtc_freq = freq;
tmp |= (128 / freq)-1;
- writeb(tmp, S3C2410_TICNT);
+ writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
spin_unlock_irq(&s3c_rtc_pie_lock);
}
@@ -113,14 +113,15 @@ static void s3c_rtc_setfreq(int freq)
static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
unsigned int have_retried = 0;
+ void __iomem *base = s3c_rtc_base;
retry_get_time:
- rtc_tm->tm_min = readb(S3C2410_RTCMIN);
- rtc_tm->tm_hour = readb(S3C2410_RTCHOUR);
- rtc_tm->tm_mday = readb(S3C2410_RTCDATE);
- rtc_tm->tm_mon = readb(S3C2410_RTCMON);
- rtc_tm->tm_year = readb(S3C2410_RTCYEAR);
- rtc_tm->tm_sec = readb(S3C2410_RTCSEC);
+ rtc_tm->tm_min = readb(base + S3C2410_RTCMIN);
+ rtc_tm->tm_hour = readb(base + S3C2410_RTCHOUR);
+ rtc_tm->tm_mday = readb(base + S3C2410_RTCDATE);
+ rtc_tm->tm_mon = readb(base + S3C2410_RTCMON);
+ rtc_tm->tm_year = readb(base + S3C2410_RTCYEAR);
+ rtc_tm->tm_sec = readb(base + S3C2410_RTCSEC);
/* the only way to work out wether the system was mid-update
* when we read it is to check the second counter, and if it
@@ -151,17 +152,26 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
{
- /* the rtc gets round the y2k problem by just not supporting it */
+ void __iomem *base = s3c_rtc_base;
+ int year = tm->tm_year - 100;
- if (tm->tm_year < 100)
+ pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
+ tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ /* we get around y2k by simply not supporting it */
+
+ if (year < 0 || year >= 100) {
+ dev_err(dev, "rtc only supports 100 years\n");
return -EINVAL;
+ }
- writeb(BIN2BCD(tm->tm_sec), S3C2410_RTCSEC);
- writeb(BIN2BCD(tm->tm_min), S3C2410_RTCMIN);
- writeb(BIN2BCD(tm->tm_hour), S3C2410_RTCHOUR);
- writeb(BIN2BCD(tm->tm_mday), S3C2410_RTCDATE);
- writeb(BIN2BCD(tm->tm_mon + 1), S3C2410_RTCMON);
- writeb(BIN2BCD(tm->tm_year - 100), S3C2410_RTCYEAR);
+ writeb(BIN2BCD(tm->tm_sec), base + S3C2410_RTCSEC);
+ writeb(BIN2BCD(tm->tm_min), base + S3C2410_RTCMIN);
+ writeb(BIN2BCD(tm->tm_hour), base + S3C2410_RTCHOUR);
+ writeb(BIN2BCD(tm->tm_mday), base + S3C2410_RTCDATE);
+ writeb(BIN2BCD(tm->tm_mon + 1), base + S3C2410_RTCMON);
+ writeb(BIN2BCD(year), base + S3C2410_RTCYEAR);
return 0;
}
@@ -169,16 +179,17 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_time *alm_tm = &alrm->time;
+ void __iomem *base = s3c_rtc_base;
unsigned int alm_en;
- alm_tm->tm_sec = readb(S3C2410_ALMSEC);
- alm_tm->tm_min = readb(S3C2410_ALMMIN);
- alm_tm->tm_hour = readb(S3C2410_ALMHOUR);
- alm_tm->tm_mon = readb(S3C2410_ALMMON);
- alm_tm->tm_mday = readb(S3C2410_ALMDATE);
- alm_tm->tm_year = readb(S3C2410_ALMYEAR);
+ alm_tm->tm_sec = readb(base + S3C2410_ALMSEC);
+ alm_tm->tm_min = readb(base + S3C2410_ALMMIN);
+ alm_tm->tm_hour = readb(base + S3C2410_ALMHOUR);
+ alm_tm->tm_mon = readb(base + S3C2410_ALMMON);
+ alm_tm->tm_mday = readb(base + S3C2410_ALMDATE);
+ alm_tm->tm_year = readb(base + S3C2410_ALMYEAR);
- alm_en = readb(S3C2410_RTCALM);
+ alm_en = readb(base + S3C2410_RTCALM);
pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
alm_en,
@@ -226,6 +237,7 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_time *tm = &alrm->time;
+ void __iomem *base = s3c_rtc_base;
unsigned int alrm_en;
pr_debug("s3c_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
@@ -234,32 +246,32 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
- alrm_en = readb(S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
- writeb(0x00, S3C2410_RTCALM);
+ alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
+ writeb(0x00, base + S3C2410_RTCALM);
if (tm->tm_sec < 60 && tm->tm_sec >= 0) {
alrm_en |= S3C2410_RTCALM_SECEN;
- writeb(BIN2BCD(tm->tm_sec), S3C2410_ALMSEC);
+ writeb(BIN2BCD(tm->tm_sec), base + S3C2410_ALMSEC);
}
if (tm->tm_min < 60 && tm->tm_min >= 0) {
alrm_en |= S3C2410_RTCALM_MINEN;
- writeb(BIN2BCD(tm->tm_min), S3C2410_ALMMIN);
+ writeb(BIN2BCD(tm->tm_min), base + S3C2410_ALMMIN);
}
if (tm->tm_hour < 24 && tm->tm_hour >= 0) {
alrm_en |= S3C2410_RTCALM_HOUREN;
- writeb(BIN2BCD(tm->tm_hour), S3C2410_ALMHOUR);
+ writeb(BIN2BCD(tm->tm_hour), base + S3C2410_ALMHOUR);
}
pr_debug("setting S3C2410_RTCALM to %08x\n", alrm_en);
- writeb(alrm_en, S3C2410_RTCALM);
+ writeb(alrm_en, base + S3C2410_RTCALM);
if (0) {
- alrm_en = readb(S3C2410_RTCALM);
+ alrm_en = readb(base + S3C2410_RTCALM);
alrm_en &= ~S3C2410_RTCALM_ALMEN;
- writeb(alrm_en, S3C2410_RTCALM);
+ writeb(alrm_en, base + S3C2410_RTCALM);
disable_irq_wake(s3c_rtc_alarmno);
}
@@ -319,8 +331,8 @@ static int s3c_rtc_ioctl(struct device *dev,
static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
{
- unsigned int rtcalm = readb(S3C2410_RTCALM);
- unsigned int ticnt = readb (S3C2410_TICNT);
+ unsigned int rtcalm = readb(s3c_rtc_base + S3C2410_RTCALM);
+ unsigned int ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
seq_printf(seq, "alarm_IRQ\t: %s\n",
(rtcalm & S3C2410_RTCALM_ALMEN) ? "yes" : "no" );
@@ -387,39 +399,40 @@ static struct rtc_class_ops s3c_rtcops = {
static void s3c_rtc_enable(struct platform_device *pdev, int en)
{
+ void __iomem *base = s3c_rtc_base;
unsigned int tmp;
if (s3c_rtc_base == NULL)
return;
if (!en) {
- tmp = readb(S3C2410_RTCCON);
- writeb(tmp & ~S3C2410_RTCCON_RTCEN, S3C2410_RTCCON);
+ tmp = readb(base + S3C2410_RTCCON);
+ writeb(tmp & ~S3C2410_RTCCON_RTCEN, base + S3C2410_RTCCON);
- tmp = readb(S3C2410_TICNT);
- writeb(tmp & ~S3C2410_TICNT_ENABLE, S3C2410_TICNT);
+ tmp = readb(base + S3C2410_TICNT);
+ writeb(tmp & ~S3C2410_TICNT_ENABLE, base + S3C2410_TICNT);
} else {
/* re-enable the device, and check it is ok */
- if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
+ if ((readb(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0){
dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
- tmp = readb(S3C2410_RTCCON);
- writeb(tmp | S3C2410_RTCCON_RTCEN , S3C2410_RTCCON);
+ tmp = readb(base + S3C2410_RTCCON);
+ writeb(tmp|S3C2410_RTCCON_RTCEN, base+S3C2410_RTCCON);
}
- if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
+ if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)){
dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
- tmp = readb(S3C2410_RTCCON);
- writeb(tmp& ~S3C2410_RTCCON_CNTSEL , S3C2410_RTCCON);
+ tmp = readb(base + S3C2410_RTCCON);
+ writeb(tmp& ~S3C2410_RTCCON_CNTSEL, base+S3C2410_RTCCON);
}
- if ((readb(S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
+ if ((readb(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)){
dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
- tmp = readb(S3C2410_RTCCON);
- writeb(tmp & ~S3C2410_RTCCON_CLKRST, S3C2410_RTCCON);
+ tmp = readb(base + S3C2410_RTCCON);
+ writeb(tmp & ~S3C2410_RTCCON_CLKRST, base+S3C2410_RTCCON);
}
}
}
@@ -475,8 +488,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
}
s3c_rtc_mem = request_mem_region(res->start,
- res->end-res->start+1,
- pdev->name);
+ res->end-res->start+1,
+ pdev->name);
if (s3c_rtc_mem == NULL) {
dev_err(&pdev->dev, "failed to reserve memory region\n");
@@ -495,7 +508,8 @@ static int s3c_rtc_probe(struct platform_device *pdev)
s3c_rtc_enable(pdev, 1);
- pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(S3C2410_RTCCON));
+ pr_debug("s3c2410_rtc: RTCCON=%02x\n",
+ readb(s3c_rtc_base + S3C2410_RTCCON));
s3c_rtc_setfreq(s3c_rtc_freq);
@@ -543,7 +557,7 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
/* save TICNT for anyone using periodic interrupts */
- ticnt_save = readb(S3C2410_TICNT);
+ ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
/* calculate time delta for suspend */
@@ -567,7 +581,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
rtc_tm_to_time(&tm, &time.tv_sec);
restore_time_delta(&s3c_rtc_delta, &time);
- writeb(ticnt_save, S3C2410_TICNT);
+ writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
return 0;
}
#else
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 9d0c6e1..9af02c7 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -54,11 +54,11 @@ struct dasd_devmap {
*/
struct dasd_server_ssid_map {
struct list_head list;
- struct server_id {
+ struct system_id {
char vendor[4];
char serial[15];
+ __u16 ssid;
} sid;
- __u16 ssid;
};
static struct list_head dasd_server_ssid_list;
@@ -904,14 +904,14 @@ dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
return -ENOMEM;
strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1);
strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1);
- srv->ssid = uid->ssid;
+ srv->sid.ssid = uid->ssid;
/* server is already contained ? */
spin_lock(&dasd_devmap_lock);
devmap->uid = *uid;
list_for_each_entry(tmp, &dasd_server_ssid_list, list) {
if (!memcmp(&srv->sid, &tmp->sid,
- sizeof(struct dasd_server_ssid_map))) {
+ sizeof(struct system_id))) {
kfree(srv);
srv = NULL;
break;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 957ed5d..b7a7fac 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -607,7 +607,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device)
* Valide storage server of current device.
*/
static int
-dasd_eckd_validate_server(struct dasd_device *device)
+dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
{
int rc;
@@ -616,11 +616,11 @@ dasd_eckd_validate_server(struct dasd_device *device)
return 0;
rc = dasd_eckd_psf_ssc(device);
- if (rc)
- /* may be requested feature is not available on server,
- * therefore just report error and go ahead */
- DEV_MESSAGE(KERN_INFO, device,
- "Perform Subsystem Function returned rc=%d", rc);
+ /* may be requested feature is not available on server,
+ * therefore just report error and go ahead */
+ DEV_MESSAGE(KERN_INFO, device,
+ "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
+ uid->vendor, uid->serial, uid->ssid, rc);
/* RE-Read Configuration Data */
return dasd_eckd_read_conf(device);
}
@@ -666,7 +666,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
return rc;
rc = dasd_set_uid(device->cdev, &uid);
if (rc == 1) /* new server found */
- rc = dasd_eckd_validate_server(device);
+ rc = dasd_eckd_validate_server(device, &uid);
if (rc)
return rc;
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 5fff1f9..e1327b8 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -8510,9 +8510,9 @@ static int
qeth_ipv6_init(void)
{
qeth_old_arp_constructor = arp_tbl.constructor;
- write_lock(&arp_tbl.lock);
+ write_lock_bh(&arp_tbl.lock);
arp_tbl.constructor = qeth_arp_constructor;
- write_unlock(&arp_tbl.lock);
+ write_unlock_bh(&arp_tbl.lock);
arp_direct_ops = (struct neigh_ops*)
kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
@@ -8528,9 +8528,9 @@ qeth_ipv6_init(void)
static void
qeth_ipv6_uninit(void)
{
- write_lock(&arp_tbl.lock);
+ write_lock_bh(&arp_tbl.lock);
arp_tbl.constructor = qeth_old_arp_constructor;
- write_unlock(&arp_tbl.lock);
+ write_unlock_bh(&arp_tbl.lock);
kfree(arp_direct_ops);
}
#endif /* CONFIG_QETH_IPV6 */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9cd789b..adc9d8f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count)
printk("\n");
}
+
+/****************************************************************/
+/****** Functions to handle the request ID hash table ********/
+/****************************************************************/
+
+#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
+
+static int zfcp_reqlist_init(struct zfcp_adapter *adapter)
+{
+ int i;
+
+ adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
+ GFP_KERNEL);
+
+ if (!adapter->req_list)
+ return -ENOMEM;
+
+ for (i=0; i<REQUEST_LIST_SIZE; i++)
+ INIT_LIST_HEAD(&adapter->req_list[i]);
+
+ return 0;
+}
+
+static void zfcp_reqlist_free(struct zfcp_adapter *adapter)
+{
+ struct zfcp_fsf_req *request, *tmp;
+ unsigned int i;
+
+ for (i=0; i<REQUEST_LIST_SIZE; i++) {
+ if (list_empty(&adapter->req_list[i]))
+ continue;
+
+ list_for_each_entry_safe(request, tmp,
+ &adapter->req_list[i], list)
+ list_del(&request->list);
+ }
+
+ kfree(adapter->req_list);
+}
+
+void zfcp_reqlist_add(struct zfcp_adapter *adapter,
+ struct zfcp_fsf_req *fsf_req)
+{
+ unsigned int i;
+
+ i = fsf_req->req_id % REQUEST_LIST_SIZE;
+ list_add_tail(&fsf_req->list, &adapter->req_list[i]);
+}
+
+void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id)
+{
+ struct zfcp_fsf_req *request, *tmp;
+ unsigned int i, counter;
+ u64 dbg_tmp[2];
+
+ i = req_id % REQUEST_LIST_SIZE;
+ BUG_ON(list_empty(&adapter->req_list[i]));
+
+ counter = 0;
+ list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) {
+ if (request->req_id == req_id) {
+ dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
+ dbg_tmp[1] = (u64) counter;
+ debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
+ list_del(&request->list);
+ break;
+ }
+ counter++;
+ }
+}
+
+struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
+ unsigned long req_id)
+{
+ struct zfcp_fsf_req *request, *tmp;
+ unsigned int i;
+
+ i = req_id % REQUEST_LIST_SIZE;
+
+ list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
+ if (request->req_id == req_id)
+ return request;
+
+ return NULL;
+}
+
+int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
+{
+ unsigned int i;
+
+ for (i=0; i<REQUEST_LIST_SIZE; i++)
+ if (!list_empty(&adapter->req_list[i]))
+ return 0;
+
+ return 1;
+}
+
+#undef ZFCP_LOG_AREA
+
/****************************************************************/
/************** Uncategorised Functions *************************/
/****************************************************************/
@@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_LIST_HEAD(&adapter->port_remove_lh);
/* initialize list of fsf requests */
- spin_lock_init(&adapter->fsf_req_list_lock);
- INIT_LIST_HEAD(&adapter->fsf_req_list_head);
+ spin_lock_init(&adapter->req_list_lock);
+ retval = zfcp_reqlist_init(adapter);
+ if (retval) {
+ ZFCP_LOG_INFO("request list initialization failed\n");
+ goto failed_low_mem_buffers;
+ }
/* initialize debug locks */
@@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
* !0 - struct zfcp_adapter data structure could not be removed
* (e.g. still used)
* locks: adapter list write lock is assumed to be held by caller
- * adapter->fsf_req_list_lock is taken and released within this
- * function and must not be held on entry
*/
void
zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
@@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
dev_set_drvdata(&adapter->ccw_device->dev, NULL);
/* sanity check: no pending FSF requests */
- spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
- retval = !list_empty(&adapter->fsf_req_list_head);
- spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
- if (retval) {
+ spin_lock_irqsave(&adapter->req_list_lock, flags);
+ retval = zfcp_reqlist_isempty(adapter);
+ spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+ if (!retval) {
ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
"%i requests outstanding\n",
zfcp_get_busid_by_adapter(adapter), adapter,
- atomic_read(&adapter->fsf_reqs_active));
+ atomic_read(&adapter->reqs_active));
retval = -EBUSY;
goto out;
}
@@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
zfcp_free_low_mem_buffers(adapter);
/* free memory of adapter data structure and queues */
zfcp_qdio_free_queues(adapter);
+ zfcp_reqlist_free(adapter);
kfree(adapter->fc_stats);
kfree(adapter->stats_reset_data);
ZFCP_LOG_TRACE("freeing adapter structure\n");
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 57d8e4b..fdabade 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
retval = zfcp_adapter_scsi_register(adapter);
if (retval)
goto out_scsi_register;
+
+ /* initialize request counter */
+ BUG_ON(!zfcp_reqlist_isempty(adapter));
+ adapter->req_no = 0;
+
zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
ZFCP_SET);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 2df512a..94d1b74d 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -52,7 +52,7 @@
/********************* GENERAL DEFINES *********************************/
/* zfcp version number, it consists of major, minor, and patch-level number */
-#define ZFCP_VERSION "4.7.0"
+#define ZFCP_VERSION "4.8.0"
/**
* zfcp_sg_to_address - determine kernel address from struct scatterlist
@@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
#define REQUEST_LIST_SIZE 128
/********************* SCSI SPECIFIC DEFINES *********************************/
-#define ZFCP_SCSI_ER_TIMEOUT (100*HZ)
+#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
@@ -886,11 +886,11 @@ struct zfcp_adapter {
struct list_head port_remove_lh; /* head of ports to be
removed */
u32 ports; /* number of remote ports */
- struct timer_list scsi_er_timer; /* SCSI err recovery watch */
- struct list_head fsf_req_list_head; /* head of FSF req list */
- spinlock_t fsf_req_list_lock; /* lock for ops on list of
- FSF requests */
- atomic_t fsf_reqs_active; /* # active FSF reqs */
+ struct timer_list scsi_er_timer; /* SCSI err recovery watch */
+ atomic_t reqs_active; /* # active FSF reqs */
+ unsigned long req_no; /* unique FSF req number */
+ struct list_head *req_list; /* list of pending reqs */
+ spinlock_t req_list_lock; /* request list lock */
struct zfcp_qdio_queue request_queue; /* request queue */
u32 fsf_req_seq_no; /* FSF cmnd seq number */
wait_queue_head_t request_wq; /* can be used to wait for
@@ -986,6 +986,7 @@ struct zfcp_unit {
/* FSF request */
struct zfcp_fsf_req {
struct list_head list; /* list of FSF requests */
+ unsigned long req_id; /* unique request ID */
struct zfcp_adapter *adapter; /* adapter request belongs to */
u8 sbal_number; /* nr of SBALs free for use */
u8 sbal_first; /* first SBAL for this request */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 8ec8da0..7f60b6f 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
-static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
-static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
+static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
+static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
@@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
-static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
-static int zfcp_erp_action_dismiss_port(struct zfcp_port *);
-static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
-static int zfcp_erp_action_dismiss(struct zfcp_erp_action *);
+static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
+static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
+static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
struct zfcp_port *, struct zfcp_unit *);
@@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data)
zfcp_erp_adapter_reopen(adapter, 0);
}
-/*
- * function: zfcp_fsf_scsi_er_timeout_handler
- *
- * purpose: This function needs to be called whenever a SCSI error recovery
- * action (abort/reset) does not return.
- * Re-opening the adapter means that the command can be returned
- * by zfcp (it is guarranteed that it does not return via the
- * adapter anymore). The buffer can then be used again.
- *
- * returns: sod all
+/**
+ * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks
+ *
+ * This function needs to be called whenever a SCSI error recovery
+ * action (abort/reset) does not return. Re-opening the adapter means
+ * that the abort/reset command can be returned by zfcp. It won't complete
+ * via the adapter anymore (because qdio queues are closed). If ERP is
+ * already running on this adapter it will be stopped.
*/
-void
-zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
+void zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
+ unsigned long flags;
ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. "
"Restarting all operations on the adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout");
- zfcp_erp_adapter_reopen(adapter, 0);
- return;
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+ &adapter->status)) {
+ zfcp_erp_modify_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
+ ZFCP_CLEAR);
+ zfcp_erp_action_dismiss_adapter(adapter);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ /* dismiss all pending requests including requests for ERP */
+ zfcp_fsf_req_dismiss_all(adapter);
+ adapter->fsf_req_seq_no = 0;
+ } else
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ zfcp_erp_adapter_reopen(adapter, 0);
}
/*
@@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask)
return retval;
}
-/*
- * function:
- *
- * purpose: disable I/O,
- * return any open requests and clean them up,
- * aim: no pending and incoming I/O
- *
- * returns:
+/**
+ * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests
*/
-static void
-zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
+static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
{
debug_text_event(adapter->erp_dbf, 6, "a_bl");
zfcp_erp_modify_adapter_status(adapter,
@@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
clear_mask, ZFCP_CLEAR);
}
-/*
- * function:
- *
- * purpose: enable I/O
- *
- * returns:
+/**
+ * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests
*/
-static void
-zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
+static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
{
debug_text_event(adapter->erp_dbf, 6, "a_ubl");
atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
@@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
struct zfcp_adapter *adapter = erp_action->adapter;
if (erp_action->fsf_req) {
- /* take lock to ensure that request is not being deleted meanwhile */
- spin_lock(&adapter->fsf_req_list_lock);
- /* check whether fsf req does still exist */
- list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list)
- if (fsf_req == erp_action->fsf_req)
- break;
- if (fsf_req && (fsf_req->erp_action == erp_action)) {
+ /* take lock to ensure that request is not deleted meanwhile */
+ spin_lock(&adapter->req_list_lock);
+ if ((!zfcp_reqlist_ismember(adapter,
+ erp_action->fsf_req->req_id)) &&
+ (fsf_req->erp_action == erp_action)) {
/* fsf_req still exists */
debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
debug_event(adapter->erp_dbf, 3, &fsf_req,
sizeof (unsigned long));
- /* dismiss fsf_req of timed out or dismissed erp_action */
+ /* dismiss fsf_req of timed out/dismissed erp_action */
if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
ZFCP_STATUS_ERP_TIMEDOUT)) {
debug_text_event(adapter->erp_dbf, 3,
@@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
*/
erp_action->fsf_req = NULL;
}
- spin_unlock(&adapter->fsf_req_list_lock);
+ spin_unlock(&adapter->req_list_lock);
} else
debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
return retval;
}
-/*
- * purpose: generic handler for asynchronous events related to erp_action events
- * (normal completion, time-out, dismissing, retry after
- * low memory condition)
- *
- * note: deletion of timer is not required (e.g. in case of a time-out),
- * but a second try does no harm,
- * we leave it in here to allow for greater simplification
+/**
+ * zfcp_erp_async_handler_nolock - complete erp_action
*
- * returns: 0 - there was an action to handle
- * !0 - otherwise
+ * Used for normal completion, time-out, dismissal and failure after
+ * low memory condition.
*/
-static int
-zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
- unsigned long set_mask)
+static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
+ unsigned long set_mask)
{
- int retval;
struct zfcp_adapter *adapter = erp_action->adapter;
if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
@@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
del_timer(&erp_action->timer);
erp_action->status |= set_mask;
zfcp_erp_action_ready(erp_action);
- retval = 0;
} else {
/* action is ready or gone - nothing to do */
debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone");
debug_event(adapter->erp_dbf, 3, &erp_action->action,
sizeof (int));
- retval = 1;
}
-
- return retval;
}
-/*
- * purpose: generic handler for asynchronous events related to erp_action
- * events (normal completion, time-out, dismissing, retry after
- * low memory condition)
- *
- * note: deletion of timer is not required (e.g. in case of a time-out),
- * but a second try does no harm,
- * we leave it in here to allow for greater simplification
- *
- * returns: 0 - there was an action to handle
- * !0 - otherwise
+/**
+ * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking
*/
-int
-zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
- unsigned long set_mask)
+void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
+ unsigned long set_mask)
{
struct zfcp_adapter *adapter = erp_action->adapter;
unsigned long flags;
- int retval;
write_lock_irqsave(&adapter->erp_lock, flags);
- retval = zfcp_erp_async_handler_nolock(erp_action, set_mask);
+ zfcp_erp_async_handler_nolock(erp_action, set_mask);
write_unlock_irqrestore(&adapter->erp_lock, flags);
-
- return retval;
}
/*
@@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data)
zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
}
-/*
- * purpose: is called for an erp_action which needs to be ended
- * though not being done,
- * this is usually required if an higher is generated,
- * action gets an appropriate flag and will be processed
- * accordingly
+/**
+ * zfcp_erp_action_dismiss - dismiss an erp_action
*
- * locks: erp_lock held (thus we need to call another handler variant)
+ * adapter->erp_lock must be held
+ *
+ * Dismissal of an erp_action is usually required if an erp_action of
+ * higher priority is generated.
*/
-static int
-zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
+static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
{
struct zfcp_adapter *adapter = erp_action->adapter;
@@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED);
-
- return 0;
}
int
@@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
return retval;
}
-/*
- * function: zfcp_qdio_cleanup
- *
- * purpose: cleans up QDIO operation for the specified adapter
- *
- * returns: 0 - successful cleanup
- * !0 - failed cleanup
+/**
+ * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter
*/
-int
+static void
zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
{
- int retval = ZFCP_ERP_SUCCEEDED;
int first_used;
int used_count;
struct zfcp_adapter *adapter = erp_action->adapter;
@@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
"queues on adapter %s\n",
zfcp_get_busid_by_adapter(adapter));
- retval = ZFCP_ERP_FAILED;
- goto out;
+ return;
}
/*
* Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
* do_QDIO won't be called while qdio_shutdown is in progress.
*/
-
write_lock_irq(&adapter->request_queue.queue_lock);
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
write_unlock_irq(&adapter->request_queue.queue_lock);
@@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
adapter->request_queue.free_index = 0;
atomic_set(&adapter->request_queue.free_count, 0);
adapter->request_queue.distance_from_int = 0;
- out:
- return retval;
}
static int
@@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
"%s)\n", zfcp_get_busid_by_adapter(adapter));
ret = ZFCP_ERP_FAILED;
}
- if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) {
- ZFCP_LOG_INFO("error: exchange port data failed (adapter "
+
+ /* don't treat as error for the sake of compatibility */
+ if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status))
+ ZFCP_LOG_INFO("warning: exchange port data failed (adapter "
"%s\n", zfcp_get_busid_by_adapter(adapter));
- ret = ZFCP_ERP_FAILED;
- }
return ret;
}
@@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
return retval;
}
-/*
- * function: zfcp_fsf_cleanup
- *
- * purpose: cleanup FSF operation for specified adapter
- *
- * returns: 0 - FSF operation successfully cleaned up
- * !0 - failed to cleanup FSF operation for this adapter
+/**
+ * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter
*/
-static int
+static void
zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
{
- int retval = ZFCP_ERP_SUCCEEDED;
struct zfcp_adapter *adapter = erp_action->adapter;
/*
@@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
/* all ports and units are closed */
zfcp_erp_modify_adapter_status(adapter,
ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
-
- return retval;
}
/*
@@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
}
-static int
-zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
+void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
{
- int retval = 0;
struct zfcp_port *port;
debug_text_event(adapter->erp_dbf, 5, "a_actab");
@@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
else
list_for_each_entry(port, &adapter->port_list_head, list)
zfcp_erp_action_dismiss_port(port);
-
- return retval;
}
-static int
-zfcp_erp_action_dismiss_port(struct zfcp_port *port)
+static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
{
- int retval = 0;
struct zfcp_unit *unit;
struct zfcp_adapter *adapter = port->adapter;
@@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port)
else
list_for_each_entry(unit, &port->unit_list_head, list)
zfcp_erp_action_dismiss_unit(unit);
-
- return retval;
}
-static int
-zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
+static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
{
- int retval = 0;
struct zfcp_adapter *adapter = unit->port->adapter;
debug_text_event(adapter->erp_dbf, 5, "u_actab");
debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status))
zfcp_erp_action_dismiss(&unit->erp_action);
-
- return retval;
}
static inline void
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index d023660..146d7a2 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *);
extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
struct zfcp_fsf_req *);
-extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *);
extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req
(struct zfcp_fsf_req *, int, int);
@@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
+extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
@@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *);
extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
extern int zfcp_erp_wait(struct zfcp_adapter *);
-extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
+extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
extern int zfcp_test_link(struct zfcp_port *);
@@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
struct zfcp_fsf_req *);
extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
struct scsi_cmnd *);
+extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
+extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long);
+extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *,
+ unsigned long);
+extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 31db2b0..ff2eacf 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -49,7 +49,6 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
struct fsf_link_down_info *);
static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
-static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
/* association between FSF command and FSF QTCB type */
static u32 fsf_qtcb_type[] = {
@@ -146,47 +145,48 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
kfree(fsf_req);
}
-/*
- * function:
- *
- * purpose:
- *
- * returns:
- *
- * note: qdio queues shall be down (no ongoing inbound processing)
+/**
+ * zfcp_fsf_req_dismiss - dismiss a single fsf request
*/
-int
-zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
+static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter,
+ struct zfcp_fsf_req *fsf_req,
+ unsigned int counter)
{
- struct zfcp_fsf_req *fsf_req, *tmp;
- unsigned long flags;
- LIST_HEAD(remove_queue);
+ u64 dbg_tmp[2];
- spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
- list_splice_init(&adapter->fsf_req_list_head, &remove_queue);
- atomic_set(&adapter->fsf_reqs_active, 0);
- spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
-
- list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) {
- list_del(&fsf_req->list);
- zfcp_fsf_req_dismiss(fsf_req);
- }
-
- return 0;
+ dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
+ dbg_tmp[1] = (u64) counter;
+ debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
+ list_del(&fsf_req->list);
+ fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+ zfcp_fsf_req_complete(fsf_req);
}
-/*
- * function:
- *
- * purpose:
- *
- * returns:
+/**
+ * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests
*/
-static void
-zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req)
+int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
{
- fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
- zfcp_fsf_req_complete(fsf_req);
+ struct zfcp_fsf_req *request, *tmp;
+ unsigned long flags;
+ unsigned int i, counter;
+
+ spin_lock_irqsave(&adapter->req_list_lock, flags);
+ atomic_set(&adapter->reqs_active, 0);
+ for (i=0; i<REQUEST_LIST_SIZE; i++) {
+ if (list_empty(&adapter->req_list[i]))
+ continue;
+
+ counter = 0;
+ list_for_each_entry_safe(request, tmp,
+ &adapter->req_list[i], list) {
+ zfcp_fsf_req_dismiss(adapter, request, counter);
+ counter++;
+ }
+ }
+ spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+
+ return 0;
}
/*
@@ -4592,12 +4592,14 @@ static inline void
zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
{
if (likely(fsf_req->qtcb != NULL)) {
- fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no;
- fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req;
+ fsf_req->qtcb->prefix.req_seq_no =
+ fsf_req->adapter->fsf_req_seq_no;
+ fsf_req->qtcb->prefix.req_id = fsf_req->req_id;
fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
- fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command];
+ fsf_req->qtcb->prefix.qtcb_type =
+ fsf_qtcb_type[fsf_req->fsf_command];
fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
- fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req;
+ fsf_req->qtcb->header.req_handle = fsf_req->req_id;
fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
}
}
@@ -4654,6 +4656,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
{
volatile struct qdio_buffer_element *sbale;
struct zfcp_fsf_req *fsf_req = NULL;
+ unsigned long flags;
int ret = 0;
struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
@@ -4668,6 +4671,12 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
fsf_req->adapter = adapter;
fsf_req->fsf_command = fsf_cmd;
+ INIT_LIST_HEAD(&fsf_req->list);
+
+ /* unique request id */
+ spin_lock_irqsave(&adapter->req_list_lock, flags);
+ fsf_req->req_id = adapter->req_no++;
+ spin_unlock_irqrestore(&adapter->req_list_lock, flags);
zfcp_fsf_req_qtcb_init(fsf_req);
@@ -4707,7 +4716,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
/* setup common SBALE fields */
- sbale[0].addr = fsf_req;
+ sbale[0].addr = (void *) fsf_req->req_id;
sbale[0].flags |= SBAL_FLAGS0_COMMAND;
if (likely(fsf_req->qtcb != NULL)) {
sbale[1].addr = (void *) fsf_req->qtcb;
@@ -4747,7 +4756,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
volatile struct qdio_buffer_element *sbale;
int inc_seq_no;
int new_distance_from_int;
- unsigned long flags;
+ u64 dbg_tmp[2];
int retval = 0;
adapter = fsf_req->adapter;
@@ -4761,10 +4770,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
sbale[1].length);
- /* put allocated FSF request at list tail */
- spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
- list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head);
- spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
+ /* put allocated FSF request into hash table */
+ spin_lock(&adapter->req_list_lock);
+ zfcp_reqlist_add(adapter, fsf_req);
+ spin_unlock(&adapter->req_list_lock);
inc_seq_no = (fsf_req->qtcb != NULL);
@@ -4803,6 +4812,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
QDIO_FLAG_SYNC_OUTPUT,
0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
+ dbg_tmp[0] = (unsigned long) sbale[0].addr;
+ dbg_tmp[1] = (u64) retval;
+ debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
+
if (unlikely(retval)) {
/* Queues are down..... */
retval = -EIO;
@@ -4812,22 +4825,17 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
*/
if (timer)
del_timer(timer);
- spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
- list_del(&fsf_req->list);
- spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
- /*
- * adjust the number of free SBALs in request queue as well as
- * position of first one
- */
+ spin_lock(&adapter->req_list_lock);
+ zfcp_reqlist_remove(adapter, fsf_req->req_id);
+ spin_unlock(&adapter->req_list_lock);
+ /* undo changes in request queue made for this request */
zfcp_qdio_zero_sbals(req_queue->buffer,
fsf_req->sbal_first, fsf_req->sbal_number);
atomic_add(fsf_req->sbal_number, &req_queue->free_count);
- req_queue->free_index -= fsf_req->sbal_number; /* increase */
+ req_queue->free_index -= fsf_req->sbal_number;
req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
- ZFCP_LOG_DEBUG
- ("error: do_QDIO failed. Buffers could not be enqueued "
- "to request queue.\n");
+ zfcp_erp_adapter_reopen(adapter, 0);
} else {
req_queue->distance_from_int = new_distance_from_int;
/*
@@ -4843,7 +4851,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
adapter->fsf_req_seq_no++;
/* count FSF requests pending */
- atomic_inc(&adapter->fsf_reqs_active);
+ atomic_inc(&adapter->reqs_active);
}
return retval;
}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 49ea5ad..dbd9f48 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -282,6 +282,37 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
return;
}
+/**
+ * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status
+ */
+static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
+ unsigned long req_id)
+{
+ struct zfcp_fsf_req *fsf_req;
+ unsigned long flags;
+
+ debug_long_event(adapter->erp_dbf, 4, req_id);
+
+ spin_lock_irqsave(&adapter->req_list_lock, flags);
+ fsf_req = zfcp_reqlist_ismember(adapter, req_id);
+
+ if (!fsf_req) {
+ spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+ ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id);
+ zfcp_erp_adapter_reopen(adapter, 0);
+ return -EINVAL;
+ }
+
+ zfcp_reqlist_remove(adapter, req_id);
+ atomic_dec(&adapter->reqs_active);
+ spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+
+ /* finish the FSF request */
+ zfcp_fsf_req_complete(fsf_req);
+
+ return 0;
+}
+
/*
* function: zfcp_qdio_response_handler
*
@@ -344,7 +375,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
/* look for QDIO request identifiers in SB */
buffere = &buffer->element[buffere_index];
retval = zfcp_qdio_reqid_check(adapter,
- (void *) buffere->addr);
+ (unsigned long) buffere->addr);
if (retval) {
ZFCP_LOG_NORMAL("bug: unexpected inbound "
@@ -415,52 +446,6 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
return;
}
-/*
- * function: zfcp_qdio_reqid_check
- *
- * purpose: checks for valid reqids or unsolicited status
- *
- * returns: 0 - valid request id or unsolicited status
- * !0 - otherwise
- */
-int
-zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr)
-{
- struct zfcp_fsf_req *fsf_req;
- unsigned long flags;
-
- /* invalid (per convention used in this driver) */
- if (unlikely(!sbale_addr)) {
- ZFCP_LOG_NORMAL("bug: invalid reqid\n");
- return -EINVAL;
- }
-
- /* valid request id and thus (hopefully :) valid fsf_req address */
- fsf_req = (struct zfcp_fsf_req *) sbale_addr;
-
- /* serialize with zfcp_fsf_req_dismiss_all */
- spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
- if (list_empty(&adapter->fsf_req_list_head)) {
- spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
- return 0;
- }
- list_del(&fsf_req->list);
- atomic_dec(&adapter->fsf_reqs_active);
- spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
-
- if (unlikely(adapter != fsf_req->adapter)) {
- ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, "
- "fsf_req->adapter=%p, adapter=%p)\n",
- fsf_req, fsf_req->adapter, adapter);
- return -EINVAL;
- }
-
- /* finish the FSF request */
- zfcp_fsf_req_complete(fsf_req);
-
- return 0;
-}
-
/**
* zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue
* @queue: queue from which SBALE should be returned
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 671f4a6..1bb5508 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -30,7 +30,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *,
void (*done) (struct scsi_cmnd *));
static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
-static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
static int zfcp_task_management_function(struct zfcp_unit *, u8,
struct scsi_cmnd *);
@@ -46,30 +45,22 @@ struct zfcp_data zfcp_data = {
.scsi_host_template = {
.name = ZFCP_NAME,
.proc_name = "zfcp",
- .proc_info = NULL,
- .detect = NULL,
.slave_alloc = zfcp_scsi_slave_alloc,
.slave_configure = zfcp_scsi_slave_configure,
.slave_destroy = zfcp_scsi_slave_destroy,
.queuecommand = zfcp_scsi_queuecommand,
.eh_abort_handler = zfcp_scsi_eh_abort_handler,
.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
- .eh_bus_reset_handler = zfcp_scsi_eh_bus_reset_handler,
+ .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler,
.eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
.can_queue = 4096,
.this_id = -1,
- /*
- * FIXME:
- * one less? can zfcp_create_sbale cope with it?
- */
.sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
.cmd_per_lun = 1,
- .unchecked_isa_dma = 0,
.use_clustering = 1,
.sdev_attrs = zfcp_sysfs_sdev_attrs,
},
.driver_version = ZFCP_VERSION,
- /* rest initialised with zeros */
};
/* Find start of Response Information in FCP response unit*/
@@ -176,8 +167,14 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp)
return retval;
}
-static void
-zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
+/**
+ * zfcp_scsi_slave_destroy - called when scsi device is removed
+ *
+ * Remove reference to associated scsi device for an zfcp_unit.
+ * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs
+ * or a scan for this device might have failed.
+ */
+static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
{
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -185,6 +182,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
sdpnt->hostdata = NULL;
unit->device = NULL;
+ zfcp_erp_unit_failed(unit);
zfcp_unit_put(unit);
} else {
ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
@@ -549,35 +547,38 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
}
/**
- * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter)
+ * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
+ *
+ * If ERP is already running it will be stopped.
*/
-int
-zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
+int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
- struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
- struct zfcp_adapter *adapter = unit->port->adapter;
-
- ZFCP_LOG_NORMAL("bus reset because of problems with "
- "unit 0x%016Lx\n", unit->fcp_lun);
- zfcp_erp_adapter_reopen(adapter, 0);
- zfcp_erp_wait(adapter);
-
- return SUCCESS;
-}
+ struct zfcp_unit *unit;
+ struct zfcp_adapter *adapter;
+ unsigned long flags;
-/**
- * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter)
- */
-int
-zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
-{
- struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
- struct zfcp_adapter *adapter = unit->port->adapter;
+ unit = (struct zfcp_unit*) scpnt->device->hostdata;
+ adapter = unit->port->adapter;
- ZFCP_LOG_NORMAL("host reset because of problems with "
+ ZFCP_LOG_NORMAL("host/bus reset because of problems with "
"unit 0x%016Lx\n", unit->fcp_lun);
- zfcp_erp_adapter_reopen(adapter, 0);
- zfcp_erp_wait(adapter);
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+ &adapter->status)) {
+ zfcp_erp_modify_adapter_status(adapter,
+ ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
+ ZFCP_CLEAR);
+ zfcp_erp_action_dismiss_adapter(adapter);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ zfcp_fsf_req_dismiss_all(adapter);
+ adapter->fsf_req_seq_no = 0;
+ zfcp_erp_adapter_reopen(adapter, 0);
+ } else {
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ zfcp_erp_adapter_reopen(adapter, 0);
+ zfcp_erp_wait(adapter);
+ }
return SUCCESS;
}
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 98bd227..5630868 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1146,7 +1146,7 @@ static struct sbus_dev sun4_esp_dev;
static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
{
if (sun4_esp_physaddr) {
- memset(&sun4_esp_dev, 0, sizeof(esp_dev));
+ memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev));
sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr;
sun4_esp_dev.irqs[0] = 4;
sun4_esp_dev.resource[0].start = sun4_esp_physaddr;
@@ -1162,6 +1162,7 @@ static int __init esp_sun4_probe(struct scsi_host_template *tpnt)
static int __devexit esp_sun4_remove(void)
{
+ struct of_device *dev = &sun4_esp_dev.ofdev;
struct esp *esp = dev_get_drvdata(&dev->dev);
return esp_remove_common(esp);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index ab2f8b2..bcb3444 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -45,10 +45,6 @@ static char driver_name[] = "hptiop";
static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
static const char driver_ver[] = "v1.0 (060426)";
-static DEFINE_SPINLOCK(hptiop_hba_list_lock);
-static LIST_HEAD(hptiop_hba_list);
-static int hptiop_cdev_major = -1;
-
static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
@@ -577,7 +573,7 @@ static int hptiop_reset_hba(struct hptiop_hba *hba)
if (atomic_xchg(&hba->resetting, 1) == 0) {
atomic_inc(&hba->reset_count);
writel(IOPMU_INBOUND_MSG0_RESET,
- &hba->iop->outbound_msgaddr0);
+ &hba->iop->inbound_msgaddr0);
hptiop_pci_posting_flush(hba->iop);
}
@@ -620,532 +616,11 @@ static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
return queue_depth;
}
-struct hptiop_getinfo {
- char __user *buffer;
- loff_t buflength;
- loff_t bufoffset;
- loff_t buffillen;
- loff_t filpos;
-};
-
-static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo,
- char *data, int datalen)
-{
- if (pinfo->filpos < pinfo->bufoffset) {
- if (pinfo->filpos + datalen <= pinfo->bufoffset) {
- pinfo->filpos += datalen;
- return;
- } else {
- data += (pinfo->bufoffset - pinfo->filpos);
- datalen -= (pinfo->bufoffset - pinfo->filpos);
- pinfo->filpos = pinfo->bufoffset;
- }
- }
-
- pinfo->filpos += datalen;
- if (pinfo->buffillen == pinfo->buflength)
- return;
-
- if (pinfo->buflength - pinfo->buffillen < datalen)
- datalen = pinfo->buflength - pinfo->buffillen;
-
- if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen))
- return;
-
- pinfo->buffillen += datalen;
-}
-
-static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...)
-{
- va_list args;
- char buf[128];
- int len;
-
- va_start(args, fmt);
- len = vsnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
- hptiop_copy_mem_info(pinfo, buf, len);
- return len;
-}
-
-static void hptiop_ioctl_done(struct hpt_ioctl_k *arg)
-{
- arg->done = NULL;
- wake_up(&arg->hba->ioctl_wq);
-}
-
-static void hptiop_do_ioctl(struct hpt_ioctl_k *arg)
-{
- struct hptiop_hba *hba = arg->hba;
- u32 val;
- struct hpt_iop_request_ioctl_command __iomem *req;
- int ioctl_retry = 0;
-
- dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no);
-
- /*
- * check (in + out) buff size from application.
- * outbuf must be dword aligned.
- */
- if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size >
- hba->max_request_size
- - sizeof(struct hpt_iop_request_header)
- - 4 * sizeof(u32)) {
- dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n",
- hba->host->host_no,
- arg->inbuf_size, arg->outbuf_size);
- arg->result = HPT_IOCTL_RESULT_FAILED;
- return;
- }
-
-retry:
- spin_lock_irq(hba->host->host_lock);
-
- val = readl(&hba->iop->inbound_queue);
- if (val == IOPMU_QUEUE_EMPTY) {
- spin_unlock_irq(hba->host->host_lock);
- dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no);
- arg->result = -1;
- return;
- }
-
- req = (struct hpt_iop_request_ioctl_command __iomem *)
- ((unsigned long)hba->iop + val);
-
- writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code),
- &req->ioctl_code);
- writel(arg->inbuf_size, &req->inbuf_size);
- writel(arg->outbuf_size, &req->outbuf_size);
-
- /*
- * use the buffer on the IOP local memory first, then copy it
- * back to host.
- * the caller's request buffer shoudl be little-endian.
- */
- if (arg->inbuf_size)
- memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size);
-
- /* correct the controller ID for IOP */
- if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO ||
- arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 ||
- arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO)
- && arg->inbuf_size >= sizeof(u32))
- writel(0, req->buf);
-
- writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type);
- writel(0, &req->header.flags);
- writel(offsetof(struct hpt_iop_request_ioctl_command, buf)
- + arg->inbuf_size, &req->header.size);
- writel((u32)(unsigned long)arg, &req->header.context);
- writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0,
- &req->header.context_hi32);
- writel(IOP_RESULT_PENDING, &req->header.result);
-
- arg->result = HPT_IOCTL_RESULT_FAILED;
- arg->done = hptiop_ioctl_done;
-
- writel(val, &hba->iop->inbound_queue);
- hptiop_pci_posting_flush(hba->iop);
-
- spin_unlock_irq(hba->host->host_lock);
-
- wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ);
-
- if (arg->done != NULL) {
- hptiop_reset_hba(hba);
- if (ioctl_retry++ < 3)
- goto retry;
- }
-
- dprintk("hpt_iop_ioctl %x result %d\n",
- arg->ioctl_code, arg->result);
-}
-
-static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf,
- u32 insize, void *outbuf, u32 outsize)
-{
- struct hpt_ioctl_k arg;
- arg.hba = hba;
- arg.ioctl_code = code;
- arg.inbuf = inbuf;
- arg.outbuf = outbuf;
- arg.inbuf_size = insize;
- arg.outbuf_size = outsize;
- arg.bytes_returned = NULL;
- hptiop_do_ioctl(&arg);
- return arg.result;
-}
-
-static inline int hpt_id_valid(__le32 id)
-{
- return id != 0 && id != cpu_to_le32(0xffffffff);
-}
-
-static int hptiop_get_controller_info(struct hptiop_hba *hba,
- struct hpt_controller_info *pinfo)
-{
- int id = 0;
-
- return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO,
- &id, sizeof(int), pinfo, sizeof(*pinfo));
-}
-
-
-static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus,
- struct hpt_channel_info *pinfo)
-{
- u32 ids[2];
-
- ids[0] = 0;
- ids[1] = bus;
- return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO,
- ids, sizeof(ids), pinfo, sizeof(*pinfo));
-
-}
-
-static int hptiop_get_logical_devices(struct hptiop_hba *hba,
- __le32 *pids, int maxcount)
-{
- int i;
- u32 count = maxcount - 1;
-
- if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES,
- &count, sizeof(u32),
- pids, sizeof(u32) * maxcount))
- return -1;
-
- maxcount = le32_to_cpu(pids[0]);
- for (i = 0; i < maxcount; i++)
- pids[i] = pids[i+1];
-
- return maxcount;
-}
-
-static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id,
- struct hpt_logical_device_info_v3 *pinfo)
-{
- return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3,
- &id, sizeof(u32),
- pinfo, sizeof(*pinfo));
-}
-
-static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo)
-{
- static char s[64];
- u32 flags = le32_to_cpu(devinfo->u.array.flags);
- u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress);
- u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress);
-
- if (flags & ARRAY_FLAG_DISABLED)
- return "Disabled";
- else if (flags & ARRAY_FLAG_TRANSFORMING)
- sprintf(s, "Expanding/Migrating %d.%d%%%s%s",
- trans_prog / 100,
- trans_prog % 100,
- (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))?
- ", Critical" : "",
- ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
- !(flags & ARRAY_FLAG_REBUILDING) &&
- !(flags & ARRAY_FLAG_INITIALIZING))?
- ", Unintialized" : "");
- else if ((flags & ARRAY_FLAG_BROKEN) &&
- devinfo->u.array.array_type != AT_RAID6)
- return "Critical";
- else if (flags & ARRAY_FLAG_REBUILDING)
- sprintf(s,
- (flags & ARRAY_FLAG_NEEDINITIALIZING)?
- "%sBackground initializing %d.%d%%" :
- "%sRebuilding %d.%d%%",
- (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
- reb_prog / 100,
- reb_prog % 100);
- else if (flags & ARRAY_FLAG_VERIFYING)
- sprintf(s, "%sVerifying %d.%d%%",
- (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
- reb_prog / 100,
- reb_prog % 100);
- else if (flags & ARRAY_FLAG_INITIALIZING)
- sprintf(s, "%sForground initializing %d.%d%%",
- (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
- reb_prog / 100,
- reb_prog % 100);
- else if (flags & ARRAY_FLAG_NEEDTRANSFORM)
- sprintf(s,"%s%s%s", "Need Expanding/Migrating",
- (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
- ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
- !(flags & ARRAY_FLAG_REBUILDING) &&
- !(flags & ARRAY_FLAG_INITIALIZING))?
- ", Unintialized" : "");
- else if (flags & ARRAY_FLAG_NEEDINITIALIZING &&
- !(flags & ARRAY_FLAG_REBUILDING) &&
- !(flags & ARRAY_FLAG_INITIALIZING))
- sprintf(s,"%sUninitialized",
- (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "");
- else if ((flags & ARRAY_FLAG_NEEDBUILDING) ||
- (flags & ARRAY_FLAG_BROKEN))
- return "Critical";
- else
- return "Normal";
- return s;
-}
-
-static void hptiop_dump_devinfo(struct hptiop_hba *hba,
- struct hptiop_getinfo *pinfo, __le32 id, int indent)
-{
- struct hpt_logical_device_info_v3 devinfo;
- int i;
- u64 capacity;
-
- for (i = 0; i < indent; i++)
- hptiop_copy_info(pinfo, "\t");
-
- if (hptiop_get_device_info_v3(hba, id, &devinfo)) {
- hptiop_copy_info(pinfo, "unknown\n");
- return;
- }
-
- switch (devinfo.type) {
-
- case LDT_DEVICE: {
- struct hd_driveid *driveid;
- u32 flags = le32_to_cpu(devinfo.u.device.flags);
-
- driveid = (struct hd_driveid *)devinfo.u.device.ident;
- /* model[] is 40 chars long, but we just want 20 chars here */
- driveid->model[20] = 0;
-
- if (indent)
- if (flags & DEVICE_FLAG_DISABLED)
- hptiop_copy_info(pinfo,"Missing\n");
- else
- hptiop_copy_info(pinfo, "CH%d %s\n",
- devinfo.u.device.path_id + 1,
- driveid->model);
- else {
- capacity = le64_to_cpu(devinfo.capacity) * 512;
- do_div(capacity, 1000000);
- hptiop_copy_info(pinfo,
- "CH%d %s, %lluMB, %s %s%s%s%s\n",
- devinfo.u.device.path_id + 1,
- driveid->model,
- capacity,
- (flags & DEVICE_FLAG_DISABLED)?
- "Disabled" : "Normal",
- devinfo.u.device.read_ahead_enabled?
- "[RA]" : "",
- devinfo.u.device.write_cache_enabled?
- "[WC]" : "",
- devinfo.u.device.TCQ_enabled?
- "[TCQ]" : "",
- devinfo.u.device.NCQ_enabled?
- "[NCQ]" : ""
- );
- }
- break;
- }
-
- case LDT_ARRAY:
- if (devinfo.target_id != INVALID_TARGET_ID)
- hptiop_copy_info(pinfo, "[DISK %d_%d] ",
- devinfo.vbus_id, devinfo.target_id);
-
- capacity = le64_to_cpu(devinfo.capacity) * 512;
- do_div(capacity, 1000000);
- hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n",
- devinfo.u.array.name,
- devinfo.u.array.array_type==AT_RAID0? "RAID0" :
- devinfo.u.array.array_type==AT_RAID1? "RAID1" :
- devinfo.u.array.array_type==AT_RAID5? "RAID5" :
- devinfo.u.array.array_type==AT_RAID6? "RAID6" :
- devinfo.u.array.array_type==AT_JBOD? "JBOD" :
- "unknown",
- capacity,
- get_array_status(&devinfo));
- for (i = 0; i < devinfo.u.array.ndisk; i++) {
- if (hpt_id_valid(devinfo.u.array.members[i])) {
- if (cpu_to_le16(1<<i) &
- devinfo.u.array.critical_members)
- hptiop_copy_info(pinfo, "\t*");
- hptiop_dump_devinfo(hba, pinfo,
- devinfo.u.array.members[i], indent+1);
- }
- else
- hptiop_copy_info(pinfo, "\tMissing\n");
- }
- if (id == devinfo.u.array.transform_source) {
- hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n");
- hptiop_dump_devinfo(hba, pinfo,
- devinfo.u.array.transform_target, indent+1);
- }
- break;
- }
-}
-
static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
}
-static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct hptiop_hba *hba = filp->private_data;
- struct hptiop_getinfo info;
- int i, j, ndev;
- struct hpt_controller_info con_info;
- struct hpt_channel_info chan_info;
- __le32 ids[32];
-
- info.buffer = buf;
- info.buflength = count;
- info.bufoffset = ppos ? *ppos : 0;
- info.filpos = 0;
- info.buffillen = 0;
-
- if (hptiop_get_controller_info(hba, &con_info))
- return -EIO;
-
- for (i = 0; i < con_info.num_buses; i++) {
- if (hptiop_get_channel_info(hba, i, &chan_info) == 0) {
- if (hpt_id_valid(chan_info.devices[0]))
- hptiop_dump_devinfo(hba, &info,
- chan_info.devices[0], 0);
- if (hpt_id_valid(chan_info.devices[1]))
- hptiop_dump_devinfo(hba, &info,
- chan_info.devices[1], 0);
- }
- }
-
- ndev = hptiop_get_logical_devices(hba, ids,
- sizeof(ids) / sizeof(ids[0]));
-
- /*
- * if hptiop_get_logical_devices fails, ndev==-1 and it just
- * output nothing here
- */
- for (j = 0; j < ndev; j++)
- hptiop_dump_devinfo(hba, &info, ids[j], 0);
-
- if (ppos)
- *ppos += info.buffillen;
-
- return info.buffillen;
-}
-
-static int hptiop_cdev_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct hptiop_hba *hba = file->private_data;
- struct hpt_ioctl_u ioctl_u;
- struct hpt_ioctl_k ioctl_k;
- u32 bytes_returned;
- int err = -EINVAL;
-
- if (copy_from_user(&ioctl_u,
- (void __user *)arg, sizeof(struct hpt_ioctl_u)))
- return -EINVAL;
-
- if (ioctl_u.magic != HPT_IOCTL_MAGIC)
- return -EINVAL;
-
- ioctl_k.ioctl_code = ioctl_u.ioctl_code;
- ioctl_k.inbuf = NULL;
- ioctl_k.inbuf_size = ioctl_u.inbuf_size;
- ioctl_k.outbuf = NULL;
- ioctl_k.outbuf_size = ioctl_u.outbuf_size;
- ioctl_k.hba = hba;
- ioctl_k.bytes_returned = &bytes_returned;
-
- /* verify user buffer */
- if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ,
- ioctl_u.inbuf, ioctl_k.inbuf_size)) ||
- (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE,
- ioctl_u.outbuf, ioctl_k.outbuf_size)) ||
- (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE,
- ioctl_u.bytes_returned, sizeof(u32))) ||
- ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) {
-
- dprintk("scsi%d: got bad user address\n", hba->host->host_no);
- return -EINVAL;
- }
-
- /* map buffer to kernel. */
- if (ioctl_k.inbuf_size) {
- ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL);
- if (!ioctl_k.inbuf) {
- dprintk("scsi%d: fail to alloc inbuf\n",
- hba->host->host_no);
- err = -ENOMEM;
- goto err_exit;
- }
-
- if (copy_from_user(ioctl_k.inbuf,
- ioctl_u.inbuf, ioctl_k.inbuf_size)) {
- goto err_exit;
- }
- }
-
- if (ioctl_k.outbuf_size) {
- ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL);
- if (!ioctl_k.outbuf) {
- dprintk("scsi%d: fail to alloc outbuf\n",
- hba->host->host_no);
- err = -ENOMEM;
- goto err_exit;
- }
- }
-
- hptiop_do_ioctl(&ioctl_k);
-
- if (ioctl_k.result == HPT_IOCTL_RESULT_OK) {
- if (ioctl_k.outbuf_size &&
- copy_to_user(ioctl_u.outbuf,
- ioctl_k.outbuf, ioctl_k.outbuf_size))
- goto err_exit;
-
- if (ioctl_u.bytes_returned &&
- copy_to_user(ioctl_u.bytes_returned,
- &bytes_returned, sizeof(u32)))
- goto err_exit;
-
- err = 0;
- }
-
-err_exit:
- kfree(ioctl_k.inbuf);
- kfree(ioctl_k.outbuf);
-
- return err;
-}
-
-static int hptiop_cdev_open(struct inode *inode, struct file *file)
-{
- struct hptiop_hba *hba;
- unsigned i = 0, minor = iminor(inode);
- int ret = -ENODEV;
-
- spin_lock(&hptiop_hba_list_lock);
- list_for_each_entry(hba, &hptiop_hba_list, link) {
- if (i == minor) {
- file->private_data = hba;
- ret = 0;
- goto out;
- }
- i++;
- }
-
-out:
- spin_unlock(&hptiop_hba_list_lock);
- return ret;
-}
-
-static struct file_operations hptiop_cdev_fops = {
- .owner = THIS_MODULE,
- .read = hptiop_cdev_read,
- .ioctl = hptiop_cdev_ioctl,
- .open = hptiop_cdev_open,
-};
-
static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
{
struct Scsi_Host *host = class_to_shost(class_dev);
@@ -1296,19 +771,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
goto unmap_pci_bar;
}
- if (scsi_add_host(host, &pcidev->dev)) {
- printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
- hba->host->host_no);
- goto unmap_pci_bar;
- }
-
pci_set_drvdata(pcidev, host);
if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
driver_name, hba)) {
printk(KERN_ERR "scsi%d: request irq %d failed\n",
hba->host->host_no, pcidev->irq);
- goto remove_scsi_host;
+ goto unmap_pci_bar;
}
/* Allocate request mem */
@@ -1355,9 +824,12 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
if (hptiop_initialize_iop(hba))
goto free_request_mem;
- spin_lock(&hptiop_hba_list_lock);
- list_add_tail(&hba->link, &hptiop_hba_list);
- spin_unlock(&hptiop_hba_list_lock);
+ if (scsi_add_host(host, &pcidev->dev)) {
+ printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
+ hba->host->host_no);
+ goto free_request_mem;
+ }
+
scsi_scan_host(host);
@@ -1372,9 +844,6 @@ free_request_mem:
free_request_irq:
free_irq(hba->pcidev->irq, hba);
-remove_scsi_host:
- scsi_remove_host(host);
-
unmap_pci_bar:
iounmap(hba->iop);
@@ -1422,10 +891,6 @@ static void hptiop_remove(struct pci_dev *pcidev)
scsi_remove_host(host);
- spin_lock(&hptiop_hba_list_lock);
- list_del_init(&hba->link);
- spin_unlock(&hptiop_hba_list_lock);
-
hptiop_shutdown(pcidev);
free_irq(hba->pcidev->irq, hba);
@@ -1462,27 +927,12 @@ static struct pci_driver hptiop_pci_driver = {
static int __init hptiop_module_init(void)
{
- int error;
-
printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
-
- error = pci_register_driver(&hptiop_pci_driver);
- if (error < 0)
- return error;
-
- hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops);
- if (hptiop_cdev_major < 0) {
- printk(KERN_WARNING "unable to register hptiop device.\n");
- return hptiop_cdev_major;
- }
-
- return 0;
+ return pci_register_driver(&hptiop_pci_driver);
}
static void __exit hptiop_module_exit(void)
{
- dprintk("hptiop_module_exit\n");
- unregister_chrdev(hptiop_cdev_major, "hptiop");
pci_unregister_driver(&hptiop_pci_driver);
}
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index f7b5d73..94d1de5 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -517,7 +517,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
/* No more interrupts */
if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred);
- local_irq_enable();
+ local_irq_enable_in_hardirq();
if (status.b.check)
rq->errors++;
idescsi_end_request (drive, 1, 0);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 848fb2a..058f094 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -43,13 +43,10 @@
#include "iscsi_tcp.h"
-#define ISCSI_TCP_VERSION "1.0-595"
-
MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
"Alex Aizman <itn780@yahoo.com>");
MODULE_DESCRIPTION("iSCSI/TCP data-path");
MODULE_LICENSE("GPL");
-MODULE_VERSION(ISCSI_TCP_VERSION);
/* #define DEBUG_TCP */
#define DEBUG_ASSERT
@@ -185,11 +182,19 @@ iscsi_hdr_extract(struct iscsi_tcp_conn *tcp_conn)
* must be called with session lock
*/
static void
-__iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
+iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
{
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
+ struct iscsi_r2t_info *r2t;
struct scsi_cmnd *sc;
+ /* flush ctask's r2t queues */
+ while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
+ __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
+ sizeof(void*));
+ debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
+ }
+
sc = ctask->sc;
if (unlikely(!sc))
return;
@@ -374,6 +379,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
spin_unlock(&session->lock);
return 0;
}
+
rc = __kfifo_get(tcp_ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
BUG_ON(!rc);
@@ -399,7 +405,7 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
tcp_ctask->exp_r2tsn = r2tsn + 1;
tcp_ctask->xmstate |= XMSTATE_SOL_HDR;
__kfifo_put(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*));
- __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*));
+ list_move_tail(&ctask->running, &conn->xmitqueue);
scsi_queue_work(session->host, &conn->xmitwork);
conn->r2t_pdus_cnt++;
@@ -477,6 +483,8 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
case ISCSI_OP_SCSI_DATA_IN:
tcp_conn->in.ctask = session->cmds[itt];
rc = iscsi_data_rsp(conn, tcp_conn->in.ctask);
+ if (rc)
+ return rc;
/* fall through */
case ISCSI_OP_SCSI_CMD_RSP:
tcp_conn->in.ctask = session->cmds[itt];
@@ -484,7 +492,7 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
goto copy_hdr;
spin_lock(&session->lock);
- __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask);
+ iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
rc = __iscsi_complete_pdu(conn, hdr, NULL, 0);
spin_unlock(&session->lock);
break;
@@ -500,13 +508,28 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
break;
case ISCSI_OP_LOGIN_RSP:
case ISCSI_OP_TEXT_RSP:
- case ISCSI_OP_LOGOUT_RSP:
- case ISCSI_OP_NOOP_IN:
case ISCSI_OP_REJECT:
case ISCSI_OP_ASYNC_EVENT:
+ /*
+ * It is possible that we could get a PDU with a buffer larger
+ * than 8K, but there are no targets that currently do this.
+ * For now we fail until we find a vendor that needs it
+ */
+ if (DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH <
+ tcp_conn->in.datalen) {
+ printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
+ "but conn buffer is only %u (opcode %0x)\n",
+ tcp_conn->in.datalen,
+ DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, opcode);
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
if (tcp_conn->in.datalen)
goto copy_hdr;
/* fall through */
+ case ISCSI_OP_LOGOUT_RSP:
+ case ISCSI_OP_NOOP_IN:
case ISCSI_OP_SCSI_TMFUNC_RSP:
rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
break;
@@ -523,7 +546,7 @@ copy_hdr:
* skbs to complete the command then we have to copy the header
* for later use
*/
- if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <
+ if (tcp_conn->in.zero_copy_hdr && tcp_conn->in.copy <=
(tcp_conn->in.datalen + tcp_conn->in.padding +
(conn->datadgst_en ? 4 : 0))) {
debug_tcp("Copying header for later use. in.copy %d in.datalen"
@@ -614,9 +637,9 @@ iscsi_ctask_copy(struct iscsi_tcp_conn *tcp_conn, struct iscsi_cmd_task *ctask,
* byte counters.
**/
static inline int
-iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn)
+iscsi_tcp_copy(struct iscsi_conn *conn)
{
- void *buf = tcp_conn->data;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
int buf_size = tcp_conn->in.datalen;
int buf_left = buf_size - tcp_conn->data_copied;
int size = min(tcp_conn->in.copy, buf_left);
@@ -627,7 +650,7 @@ iscsi_tcp_copy(struct iscsi_tcp_conn *tcp_conn)
BUG_ON(size <= 0);
rc = skb_copy_bits(tcp_conn->in.skb, tcp_conn->in.offset,
- (char*)buf + tcp_conn->data_copied, size);
+ (char*)conn->data + tcp_conn->data_copied, size);
BUG_ON(rc);
tcp_conn->in.offset += size;
@@ -745,10 +768,11 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
done:
/* check for non-exceptional status */
if (tcp_conn->in.hdr->flags & ISCSI_FLAG_DATA_STATUS) {
- debug_scsi("done [sc %lx res %d itt 0x%x]\n",
- (long)sc, sc->result, ctask->itt);
+ debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
+ (long)sc, sc->result, ctask->itt,
+ tcp_conn->in.hdr->flags);
spin_lock(&conn->session->lock);
- __iscsi_ctask_cleanup(conn, ctask);
+ iscsi_tcp_cleanup_ctask(conn, ctask);
__iscsi_complete_pdu(conn, tcp_conn->in.hdr, NULL, 0);
spin_unlock(&conn->session->lock);
}
@@ -769,26 +793,25 @@ iscsi_data_recv(struct iscsi_conn *conn)
break;
case ISCSI_OP_SCSI_CMD_RSP:
spin_lock(&conn->session->lock);
- __iscsi_ctask_cleanup(conn, tcp_conn->in.ctask);
+ iscsi_tcp_cleanup_ctask(conn, tcp_conn->in.ctask);
spin_unlock(&conn->session->lock);
case ISCSI_OP_TEXT_RSP:
case ISCSI_OP_LOGIN_RSP:
- case ISCSI_OP_NOOP_IN:
case ISCSI_OP_ASYNC_EVENT:
case ISCSI_OP_REJECT:
/*
* Collect data segment to the connection's data
* placeholder
*/
- if (iscsi_tcp_copy(tcp_conn)) {
+ if (iscsi_tcp_copy(conn)) {
rc = -EAGAIN;
goto exit;
}
- rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, tcp_conn->data,
+ rc = iscsi_complete_pdu(conn, tcp_conn->in.hdr, conn->data,
tcp_conn->in.datalen);
if (!rc && conn->datadgst_en && opcode != ISCSI_OP_LOGIN_RSP)
- iscsi_recv_digest_update(tcp_conn, tcp_conn->data,
+ iscsi_recv_digest_update(tcp_conn, conn->data,
tcp_conn->in.datalen);
break;
default:
@@ -843,7 +866,7 @@ more:
if (rc == -EAGAIN)
goto nomore;
else {
- iscsi_conn_failure(conn, rc);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return 0;
}
}
@@ -897,7 +920,7 @@ more:
if (rc) {
if (rc == -EAGAIN)
goto again;
- iscsi_conn_failure(conn, rc);
+ iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return 0;
}
tcp_conn->in.copy -= tcp_conn->in.padding;
@@ -1028,9 +1051,8 @@ iscsi_conn_set_callbacks(struct iscsi_conn *conn)
}
static void
-iscsi_conn_restore_callbacks(struct iscsi_conn *conn)
+iscsi_conn_restore_callbacks(struct iscsi_tcp_conn *tcp_conn)
{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct sock *sk = tcp_conn->sock->sk;
/* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
@@ -1308,7 +1330,7 @@ iscsi_tcp_cmd_init(struct iscsi_cmd_task *ctask)
ctask->imm_count -
ctask->unsol_count;
- debug_scsi("cmd [itt %x total %d imm %d imm_data %d "
+ debug_scsi("cmd [itt 0x%x total %d imm %d imm_data %d "
"r2t_data %d]\n",
ctask->itt, ctask->total_length, ctask->imm_count,
ctask->unsol_count, tcp_ctask->r2t_data_count);
@@ -1636,7 +1658,7 @@ handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
}
solicit_again:
/*
- * send Data-Out whitnin this R2T sequence.
+ * send Data-Out within this R2T sequence.
*/
if (!r2t->data_count)
goto data_out_done;
@@ -1731,7 +1753,7 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_data_task *dtask = tcp_ctask->dtask;
- int sent, rc;
+ int sent = 0, rc;
tcp_ctask->xmstate &= ~XMSTATE_W_PAD;
iscsi_buf_init_iov(&tcp_ctask->sendbuf, (char*)&tcp_ctask->pad,
@@ -1900,27 +1922,32 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
tcp_conn->in_progress = IN_PROGRESS_WAIT_HEADER;
/* initial operational parameters */
tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
- tcp_conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
-
- /* allocate initial PDU receive place holder */
- if (tcp_conn->data_size <= PAGE_SIZE)
- tcp_conn->data = kmalloc(tcp_conn->data_size, GFP_KERNEL);
- else
- tcp_conn->data = (void*)__get_free_pages(GFP_KERNEL,
- get_order(tcp_conn->data_size));
- if (!tcp_conn->data)
- goto max_recv_dlenght_alloc_fail;
return cls_conn;
-max_recv_dlenght_alloc_fail:
- kfree(tcp_conn);
tcp_conn_alloc_fail:
iscsi_conn_teardown(cls_conn);
return NULL;
}
static void
+iscsi_tcp_release_conn(struct iscsi_conn *conn)
+{
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+
+ if (!tcp_conn->sock)
+ return;
+
+ sock_hold(tcp_conn->sock->sk);
+ iscsi_conn_restore_callbacks(tcp_conn);
+ sock_put(tcp_conn->sock->sk);
+
+ sock_release(tcp_conn->sock);
+ tcp_conn->sock = NULL;
+ conn->recv_lock = NULL;
+}
+
+static void
iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
@@ -1930,6 +1957,7 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
if (conn->hdrdgst_en || conn->datadgst_en)
digest = 1;
+ iscsi_tcp_release_conn(conn);
iscsi_conn_teardown(cls_conn);
/* now free tcp_conn */
@@ -1944,15 +1972,18 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
crypto_free_tfm(tcp_conn->data_rx_tfm);
}
- /* free conn->data, size = MaxRecvDataSegmentLength */
- if (tcp_conn->data_size <= PAGE_SIZE)
- kfree(tcp_conn->data);
- else
- free_pages((unsigned long)tcp_conn->data,
- get_order(tcp_conn->data_size));
kfree(tcp_conn);
}
+static void
+iscsi_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+{
+ struct iscsi_conn *conn = cls_conn->dd_data;
+
+ iscsi_conn_stop(cls_conn, flag);
+ iscsi_tcp_release_conn(conn);
+}
+
static int
iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
@@ -2001,52 +2032,6 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
return 0;
}
-static void
-iscsi_tcp_cleanup_ctask(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
-{
- struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
- struct iscsi_r2t_info *r2t;
-
- /* flush ctask's r2t queues */
- while (__kfifo_get(tcp_ctask->r2tqueue, (void*)&r2t, sizeof(void*)))
- __kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
- sizeof(void*));
-
- __iscsi_ctask_cleanup(conn, ctask);
-}
-
-static void
-iscsi_tcp_suspend_conn_rx(struct iscsi_conn *conn)
-{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
- struct sock *sk;
-
- if (!tcp_conn->sock)
- return;
-
- sk = tcp_conn->sock->sk;
- write_lock_bh(&sk->sk_callback_lock);
- set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
- write_unlock_bh(&sk->sk_callback_lock);
-}
-
-static void
-iscsi_tcp_terminate_conn(struct iscsi_conn *conn)
-{
- struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
-
- if (!tcp_conn->sock)
- return;
-
- sock_hold(tcp_conn->sock->sk);
- iscsi_conn_restore_callbacks(conn);
- sock_put(tcp_conn->sock->sk);
-
- sock_release(tcp_conn->sock);
- tcp_conn->sock = NULL;
- conn->recv_lock = NULL;
-}
-
/* called with host lock */
static void
iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
@@ -2057,6 +2042,7 @@ iscsi_tcp_mgmt_init(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask,
iscsi_buf_init_iov(&tcp_mtask->headbuf, (char*)mtask->hdr,
sizeof(struct iscsi_hdr));
tcp_mtask->xmstate = XMSTATE_IMM_HDR;
+ tcp_mtask->sent = 0;
if (mtask->data_count)
iscsi_buf_init_iov(&tcp_mtask->sendbuf, (char*)mtask->data,
@@ -2138,39 +2124,6 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
int value;
switch(param) {
- case ISCSI_PARAM_MAX_RECV_DLENGTH: {
- char *saveptr = tcp_conn->data;
- gfp_t flags = GFP_KERNEL;
-
- sscanf(buf, "%d", &value);
- if (tcp_conn->data_size >= value) {
- iscsi_set_param(cls_conn, param, buf, buflen);
- break;
- }
-
- spin_lock_bh(&session->lock);
- if (conn->stop_stage == STOP_CONN_RECOVER)
- flags = GFP_ATOMIC;
- spin_unlock_bh(&session->lock);
-
- if (value <= PAGE_SIZE)
- tcp_conn->data = kmalloc(value, flags);
- else
- tcp_conn->data = (void*)__get_free_pages(flags,
- get_order(value));
- if (tcp_conn->data == NULL) {
- tcp_conn->data = saveptr;
- return -ENOMEM;
- }
- if (tcp_conn->data_size <= PAGE_SIZE)
- kfree(saveptr);
- else
- free_pages((unsigned long)saveptr,
- get_order(tcp_conn->data_size));
- iscsi_set_param(cls_conn, param, buf, buflen);
- tcp_conn->data_size = value;
- break;
- }
case ISCSI_PARAM_HDRDGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
tcp_conn->hdr_size = sizeof(struct iscsi_hdr);
@@ -2361,8 +2314,7 @@ static void iscsi_tcp_session_destroy(struct iscsi_cls_session *cls_session)
}
static struct scsi_host_template iscsi_sht = {
- .name = "iSCSI Initiator over TCP/IP, v"
- ISCSI_TCP_VERSION,
+ .name = "iSCSI Initiator over TCP/IP",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = iscsi_change_queue_depth,
.can_queue = ISCSI_XMIT_CMDS_MAX - 1,
@@ -2414,10 +2366,7 @@ static struct iscsi_transport iscsi_tcp_transport = {
.get_conn_param = iscsi_tcp_conn_get_param,
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_conn_start,
- .stop_conn = iscsi_conn_stop,
- /* these are called as part of conn recovery */
- .suspend_conn_recv = iscsi_tcp_suspend_conn_rx,
- .terminate_conn = iscsi_tcp_terminate_conn,
+ .stop_conn = iscsi_tcp_conn_stop,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_conn_get_stats,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 8083028..6a4ee70 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -78,8 +78,6 @@ struct iscsi_tcp_conn {
char hdrext[4*sizeof(__u16) +
sizeof(__u32)];
int data_copied;
- char *data; /* data placeholder */
- int data_size; /* actual recv_dlength */
int stop_stage; /* conn_stop() flag: *
* stop to recover, *
* stop to terminate */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 7e6e031..5884cd2 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -189,6 +189,7 @@ static void iscsi_complete_command(struct iscsi_session *session,
{
struct scsi_cmnd *sc = ctask->sc;
+ ctask->state = ISCSI_TASK_COMPLETED;
ctask->sc = NULL;
list_del_init(&ctask->running);
__kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
@@ -275,6 +276,25 @@ out:
return rc;
}
+static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
+{
+ struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
+
+ conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
+ conn->tmfrsp_pdus_cnt++;
+
+ if (conn->tmabort_state != TMABORT_INITIAL)
+ return;
+
+ if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
+ conn->tmabort_state = TMABORT_SUCCESS;
+ else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
+ conn->tmabort_state = TMABORT_NOT_FOUND;
+ else
+ conn->tmabort_state = TMABORT_FAILED;
+ wake_up(&conn->ehwait);
+}
+
/**
* __iscsi_complete_pdu - complete pdu
* @conn: iscsi conn
@@ -340,6 +360,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
switch(opcode) {
case ISCSI_OP_LOGOUT_RSP:
+ if (datalen) {
+ rc = ISCSI_ERR_PROTO;
+ break;
+ }
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
/* fall through */
case ISCSI_OP_LOGIN_RSP:
@@ -348,7 +372,8 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
* login related PDU's exp_statsn is handled in
* userspace
*/
- rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen);
+ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+ rc = ISCSI_ERR_CONN_FAILED;
list_del(&mtask->running);
if (conn->login_mtask != mtask)
__kfifo_put(session->mgmtpool.queue,
@@ -360,25 +385,17 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
break;
}
- conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
- conn->tmfrsp_pdus_cnt++;
- if (conn->tmabort_state == TMABORT_INITIAL) {
- conn->tmabort_state =
- ((struct iscsi_tm_rsp *)hdr)->
- response == ISCSI_TMF_RSP_COMPLETE ?
- TMABORT_SUCCESS:TMABORT_FAILED;
- /* unblock eh_abort() */
- wake_up(&conn->ehwait);
- }
+ iscsi_tmf_rsp(conn, hdr);
break;
case ISCSI_OP_NOOP_IN:
- if (hdr->ttt != ISCSI_RESERVED_TAG) {
+ if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) {
rc = ISCSI_ERR_PROTO;
break;
}
conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
- rc = iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen);
+ if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
+ rc = ISCSI_ERR_CONN_FAILED;
list_del(&mtask->running);
if (conn->login_mtask != mtask)
__kfifo_put(session->mgmtpool.queue,
@@ -391,14 +408,21 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
} else if (itt == ISCSI_RESERVED_TAG) {
switch(opcode) {
case ISCSI_OP_NOOP_IN:
- if (!datalen) {
- rc = iscsi_check_assign_cmdsn(session,
- (struct iscsi_nopin*)hdr);
- if (!rc && hdr->ttt != ISCSI_RESERVED_TAG)
- rc = iscsi_recv_pdu(conn->cls_conn,
- hdr, NULL, 0);
- } else
+ if (datalen) {
rc = ISCSI_ERR_PROTO;
+ break;
+ }
+
+ rc = iscsi_check_assign_cmdsn(session,
+ (struct iscsi_nopin*)hdr);
+ if (rc)
+ break;
+
+ if (hdr->ttt == ISCSI_RESERVED_TAG)
+ break;
+
+ if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0))
+ rc = ISCSI_ERR_CONN_FAILED;
break;
case ISCSI_OP_REJECT:
/* we need sth like iscsi_reject_rsp()*/
@@ -568,20 +592,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
}
/* process command queue */
- while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask,
- sizeof(void*))) {
+ spin_lock_bh(&conn->session->lock);
+ while (!list_empty(&conn->xmitqueue)) {
/*
* iscsi tcp may readd the task to the xmitqueue to send
* write data
*/
- spin_lock_bh(&conn->session->lock);
- if (list_empty(&conn->ctask->running))
- list_add_tail(&conn->ctask->running, &conn->run_list);
+ conn->ctask = list_entry(conn->xmitqueue.next,
+ struct iscsi_cmd_task, running);
+ conn->ctask->state = ISCSI_TASK_RUNNING;
+ list_move_tail(conn->xmitqueue.next, &conn->run_list);
spin_unlock_bh(&conn->session->lock);
+
rc = tt->xmit_cmd_task(conn, conn->ctask);
if (rc)
goto again;
+ spin_lock_bh(&conn->session->lock);
}
+ spin_unlock_bh(&conn->session->lock);
/* done with this ctask */
conn->ctask = NULL;
@@ -691,6 +719,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
sc->SCp.phase = session->age;
sc->SCp.ptr = (char *)ctask;
+ ctask->state = ISCSI_TASK_PENDING;
ctask->mtask = NULL;
ctask->conn = conn;
ctask->sc = sc;
@@ -700,7 +729,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
session->tt->init_cmd_task(ctask);
- __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*));
+ list_add_tail(&ctask->running, &conn->xmitqueue);
debug_scsi(
"ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
@@ -977,31 +1006,27 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc,
/*
* xmit mutex and session lock must be held
*/
-#define iscsi_remove_task(tasktype) \
-static struct iscsi_##tasktype * \
-iscsi_remove_##tasktype(struct kfifo *fifo, uint32_t itt) \
-{ \
- int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*); \
- struct iscsi_##tasktype *task; \
- \
- debug_scsi("searching %d tasks\n", nr_tasks); \
- \
- for (i = 0; i < nr_tasks; i++) { \
- __kfifo_get(fifo, (void*)&task, sizeof(void*)); \
- debug_scsi("check task %u\n", task->itt); \
- \
- if (task->itt == itt) { \
- debug_scsi("matched task\n"); \
- return task; \
- } \
- \
- __kfifo_put(fifo, (void*)&task, sizeof(void*)); \
- } \
- return NULL; \
-}
+static struct iscsi_mgmt_task *
+iscsi_remove_mgmt_task(struct kfifo *fifo, uint32_t itt)
+{
+ int i, nr_tasks = __kfifo_len(fifo) / sizeof(void*);
+ struct iscsi_mgmt_task *task;
-iscsi_remove_task(mgmt_task);
-iscsi_remove_task(cmd_task);
+ debug_scsi("searching %d tasks\n", nr_tasks);
+
+ for (i = 0; i < nr_tasks; i++) {
+ __kfifo_get(fifo, (void*)&task, sizeof(void*));
+ debug_scsi("check task %u\n", task->itt);
+
+ if (task->itt == itt) {
+ debug_scsi("matched task\n");
+ return task;
+ }
+
+ __kfifo_put(fifo, (void*)&task, sizeof(void*));
+ }
+ return NULL;
+}
static int iscsi_ctask_mtask_cleanup(struct iscsi_cmd_task *ctask)
{
@@ -1027,12 +1052,13 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
{
struct scsi_cmnd *sc;
- conn->session->tt->cleanup_cmd_task(conn, ctask);
- iscsi_ctask_mtask_cleanup(ctask);
-
sc = ctask->sc;
if (!sc)
return;
+
+ conn->session->tt->cleanup_cmd_task(conn, ctask);
+ iscsi_ctask_mtask_cleanup(ctask);
+
sc->result = err;
sc->resid = sc->request_bufflen;
iscsi_complete_command(conn->session, ctask);
@@ -1043,7 +1069,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
struct iscsi_conn *conn = ctask->conn;
struct iscsi_session *session = conn->session;
- struct iscsi_cmd_task *pending_ctask;
int rc;
conn->eh_abort_cnt++;
@@ -1061,8 +1086,11 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
goto failed;
/* ctask completed before time out */
- if (!ctask->sc)
- goto success;
+ if (!ctask->sc) {
+ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+ goto success_rel_mutex;
+ }
/* what should we do here ? */
if (conn->ctask == ctask) {
@@ -1071,17 +1099,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
goto failed;
}
- /* check for the easy pending cmd abort */
- pending_ctask = iscsi_remove_cmd_task(conn->xmitqueue, ctask->itt);
- if (pending_ctask) {
- /* iscsi_tcp queues write transfers on the xmitqueue */
- if (list_empty(&pending_ctask->running)) {
- debug_scsi("found pending task\n");
- goto success;
- } else
- __kfifo_put(conn->xmitqueue, (void*)&pending_ctask,
- sizeof(void*));
- }
+ if (ctask->state == ISCSI_TASK_PENDING)
+ goto success_cleanup;
conn->tmabort_state = TMABORT_INITIAL;
@@ -1089,25 +1108,31 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
rc = iscsi_exec_abort_task(sc, ctask);
spin_lock_bh(&session->lock);
- iscsi_ctask_mtask_cleanup(ctask);
if (rc || sc->SCp.phase != session->age ||
session->state != ISCSI_STATE_LOGGED_IN)
goto failed;
+ iscsi_ctask_mtask_cleanup(ctask);
- /* ctask completed before tmf abort response */
- if (!ctask->sc) {
- debug_scsi("sc completed while abort in progress\n");
- goto success;
- }
-
- if (conn->tmabort_state != TMABORT_SUCCESS) {
+ switch (conn->tmabort_state) {
+ case TMABORT_SUCCESS:
+ goto success_cleanup;
+ case TMABORT_NOT_FOUND:
+ if (!ctask->sc) {
+ /* ctask completed before tmf abort response */
+ spin_unlock_bh(&session->lock);
+ debug_scsi("sc completed while abort in progress\n");
+ goto success_rel_mutex;
+ }
+ /* fall through */
+ default:
+ /* timedout or failed */
spin_unlock_bh(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
spin_lock_bh(&session->lock);
goto failed;
}
-success:
+success_cleanup:
debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
spin_unlock_bh(&session->lock);
@@ -1121,6 +1146,7 @@ success:
spin_unlock(&session->lock);
write_unlock_bh(conn->recv_lock);
+success_rel_mutex:
mutex_unlock(&conn->xmitmutex);
return SUCCESS;
@@ -1263,6 +1289,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
if (cmd_task_size)
ctask->dd_data = &ctask[1];
ctask->itt = cmd_i;
+ INIT_LIST_HEAD(&ctask->running);
}
spin_lock_init(&session->lock);
@@ -1282,6 +1309,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit,
if (mgmt_task_size)
mtask->dd_data = &mtask[1];
mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
+ INIT_LIST_HEAD(&mtask->running);
}
if (scsi_add_host(shost, NULL))
@@ -1322,15 +1350,18 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
+ struct module *owner = cls_session->transport->owner;
scsi_remove_host(shost);
iscsi_pool_free(&session->mgmtpool, (void**)session->mgmt_cmds);
iscsi_pool_free(&session->cmdpool, (void**)session->cmds);
+ kfree(session->targetname);
+
iscsi_destroy_session(cls_session);
scsi_host_put(shost);
- module_put(cls_session->transport->owner);
+ module_put(owner);
}
EXPORT_SYMBOL_GPL(iscsi_session_teardown);
@@ -1361,12 +1392,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
conn->tmabort_state = TMABORT_INITIAL;
INIT_LIST_HEAD(&conn->run_list);
INIT_LIST_HEAD(&conn->mgmt_run_list);
-
- /* initialize general xmit PDU commands queue */
- conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
- GFP_KERNEL, NULL);
- if (conn->xmitqueue == ERR_PTR(-ENOMEM))
- goto xmitqueue_alloc_fail;
+ INIT_LIST_HEAD(&conn->xmitqueue);
/* initialize general immediate & non-immediate PDU commands queue */
conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
@@ -1394,7 +1420,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
data = kmalloc(DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH, GFP_KERNEL);
if (!data)
goto login_mtask_data_alloc_fail;
- conn->login_mtask->data = data;
+ conn->login_mtask->data = conn->data = data;
init_timer(&conn->tmabort_timer);
mutex_init(&conn->xmitmutex);
@@ -1410,8 +1436,6 @@ login_mtask_alloc_fail:
mgmtqueue_alloc_fail:
kfifo_free(conn->immqueue);
immqueue_alloc_fail:
- kfifo_free(conn->xmitqueue);
-xmitqueue_alloc_fail:
iscsi_destroy_conn(cls_conn);
return NULL;
}
@@ -1432,12 +1456,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
mutex_lock(&conn->xmitmutex);
- if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE) {
- if (session->tt->suspend_conn_recv)
- session->tt->suspend_conn_recv(conn);
-
- session->tt->terminate_conn(conn);
- }
spin_lock_bh(&session->lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
@@ -1474,7 +1492,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_lock_bh(&session->lock);
- kfree(conn->login_mtask->data);
+ kfree(conn->data);
+ kfree(conn->persistent_address);
__kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
sizeof(void*));
list_del(&conn->item);
@@ -1489,7 +1508,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
spin_unlock_bh(&session->lock);
- kfifo_free(conn->xmitqueue);
kfifo_free(conn->immqueue);
kfifo_free(conn->mgmtqueue);
@@ -1572,7 +1590,7 @@ static void fail_all_commands(struct iscsi_conn *conn)
struct iscsi_cmd_task *ctask, *tmp;
/* flush pending */
- while (__kfifo_get(conn->xmitqueue, (void*)&ctask, sizeof(void*))) {
+ list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
debug_scsi("failing pending sc %p itt 0x%x\n", ctask->sc,
ctask->itt);
fail_command(conn, ctask, DID_BUS_BUSY << 16);
@@ -1615,8 +1633,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
spin_unlock_bh(&session->lock);
- if (session->tt->suspend_conn_recv)
- session->tt->suspend_conn_recv(conn);
+ write_lock_bh(conn->recv_lock);
+ set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+ write_unlock_bh(conn->recv_lock);
mutex_lock(&conn->xmitmutex);
/*
@@ -1635,7 +1654,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
}
}
- session->tt->terminate_conn(conn);
/*
* flush queues.
*/
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5c68cdd..d384c16 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -222,7 +222,7 @@ lpfc_issue_lip(struct Scsi_Host *host)
pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->mb.mbxOwner = OWN_HOST;
- mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
@@ -884,7 +884,7 @@ sysfs_mbox_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
phba->sysfs_mbox.mbox == NULL ) {
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
- return -EINVAL;
+ return -EAGAIN;
}
}
@@ -1000,14 +1000,15 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
spin_unlock_irq(phba->host->host_lock);
rc = lpfc_sli_issue_mbox_wait (phba,
phba->sysfs_mbox.mbox,
- phba->fc_ratov * 2);
+ lpfc_mbox_tmo_val(phba,
+ phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
spin_lock_irq(phba->host->host_lock);
}
if (rc != MBX_SUCCESS) {
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
- return -ENODEV;
+ return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
}
phba->sysfs_mbox.state = SMBOX_READING;
}
@@ -1016,7 +1017,7 @@ sysfs_mbox_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
printk(KERN_WARNING "mbox_read: Bad State\n");
sysfs_mbox_idle(phba);
spin_unlock_irq(host->host_lock);
- return -EINVAL;
+ return -EAGAIN;
}
memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
@@ -1210,8 +1211,10 @@ lpfc_get_stats(struct Scsi_Host *shost)
struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
struct lpfc_sli *psli = &phba->sli;
struct fc_host_statistics *hs = &phba->link_stats;
+ struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
+ unsigned long seconds;
int rc = 0;
pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -1272,22 +1275,103 @@ lpfc_get_stats(struct Scsi_Host *shost)
hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
hs->error_frames = pmb->un.varRdLnk.crcCnt;
+ hs->link_failure_count -= lso->link_failure_count;
+ hs->loss_of_sync_count -= lso->loss_of_sync_count;
+ hs->loss_of_signal_count -= lso->loss_of_signal_count;
+ hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
+ hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
+ hs->invalid_crc_count -= lso->invalid_crc_count;
+ hs->error_frames -= lso->error_frames;
+
if (phba->fc_topology == TOPOLOGY_LOOP) {
hs->lip_count = (phba->fc_eventTag >> 1);
+ hs->lip_count -= lso->link_events;
hs->nos_count = -1;
} else {
hs->lip_count = -1;
hs->nos_count = (phba->fc_eventTag >> 1);
+ hs->nos_count -= lso->link_events;
}
hs->dumped_frames = -1;
-/* FIX ME */
- /*hs->SecondsSinceLastReset = (jiffies - lpfc_loadtime) / HZ;*/
+ seconds = get_seconds();
+ if (seconds < psli->stats_start)
+ hs->seconds_since_last_reset = seconds +
+ ((unsigned long)-1 - psli->stats_start);
+ else
+ hs->seconds_since_last_reset = seconds - psli->stats_start;
return hs;
}
+static void
+lpfc_reset_stats(struct Scsi_Host *shost)
+{
+ struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata;
+ struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
+ LPFC_MBOXQ_t *pmboxq;
+ MAILBOX_t *pmb;
+ int rc = 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return;
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+
+ pmb = &pmboxq->mb;
+ pmb->mbxCommand = MBX_READ_STATUS;
+ pmb->mbxOwner = OWN_HOST;
+ pmb->un.varWords[0] = 0x1; /* reset request */
+ pmboxq->context1 = NULL;
+
+ if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc == MBX_TIMEOUT)
+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ else
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return;
+ }
+
+ memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
+ pmb->mbxCommand = MBX_READ_LNK_STAT;
+ pmb->mbxOwner = OWN_HOST;
+ pmboxq->context1 = NULL;
+
+ if ((phba->fc_flag & FC_OFFLINE_MODE) ||
+ (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+ rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
+
+ if (rc != MBX_SUCCESS) {
+ if (rc == MBX_TIMEOUT)
+ pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ else
+ mempool_free( pmboxq, phba->mbox_mem_pool);
+ return;
+ }
+
+ lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
+ lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
+ lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
+ lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
+ lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
+ lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
+ lso->error_frames = pmb->un.varRdLnk.crcCnt;
+ lso->link_events = (phba->fc_eventTag >> 1);
+
+ psli->stats_start = get_seconds();
+
+ return;
+}
/*
* The LPFC driver treats linkdown handling as target loss events so there
@@ -1431,8 +1515,7 @@ struct fc_function_template lpfc_transport_functions = {
*/
.get_fc_host_stats = lpfc_get_stats,
-
- /* the LPFC driver doesn't support resetting stats yet */
+ .reset_fc_host_stats = lpfc_reset_stats,
.dd_fcrport_size = sizeof(struct lpfc_rport_data),
.show_rport_maxframe_size = 1,
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 517e9e4..2a17646 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -127,6 +127,7 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
int lpfc_mem_alloc(struct lpfc_hba *);
void lpfc_mem_free(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index b65ee57..bbb7310 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -131,6 +131,7 @@ lpfc_ct_unsol_event(struct lpfc_hba * phba,
}
ct_unsol_event_exit_piocbq:
+ list_del(&head);
if (pmbuf) {
list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) {
lpfc_mbuf_free(phba, matp->virt, matp->phys);
@@ -481,7 +482,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0239 NameServer Rsp "
+ "%d:0208 NameServer Rsp "
"Data: x%x\n",
phba->brd_no,
phba->fc_flag);
@@ -588,13 +589,9 @@ lpfc_get_hba_sym_node_name(struct lpfc_hba * phba, uint8_t * symbp)
lpfc_decode_firmware_rev(phba, fwrev, 0);
- if (phba->Port[0]) {
- sprintf(symbp, "Emulex %s Port %s FV%s DV%s", phba->ModelName,
- phba->Port, fwrev, lpfc_release_version);
- } else {
- sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
- fwrev, lpfc_release_version);
- }
+ sprintf(symbp, "Emulex %s FV%s DV%s", phba->ModelName,
+ fwrev, lpfc_release_version);
+ return;
}
/*
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b89f6cb..3567de61 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1848,9 +1848,12 @@ static void
lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
struct lpfc_iocbq * rspiocb)
{
+ IOCB_t *irsp;
struct lpfc_nodelist *ndlp;
LPFC_MBOXQ_t *mbox = NULL;
+ irsp = &rspiocb->iocb;
+
ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
if (cmdiocb->context_un.mbox)
mbox = cmdiocb->context_un.mbox;
@@ -1893,9 +1896,15 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
mempool_free( mbox, phba->mbox_mem_pool);
} else {
mempool_free( mbox, phba->mbox_mem_pool);
- if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
- lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
- ndlp = NULL;
+ /* Do not call NO_LIST for lpfc_els_abort'ed ELS cmds */
+ if (!((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+ ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
+ (irsp->un.ulpWord[4] == IOERR_LINK_DOWN) ||
+ (irsp->un.ulpWord[4] == IOERR_SLI_DOWN)))) {
+ if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
+ lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
+ ndlp = NULL;
+ }
}
}
}
@@ -2839,7 +2848,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* Xmit ELS RPS ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0128 Xmit ELS RPS ACC response tag x%x "
+ "%d:0118 Xmit ELS RPS ACC response tag x%x "
"Data: x%x x%x x%x x%x x%x\n",
phba->brd_no,
elsiocb->iocb.ulpIoTag,
@@ -2948,7 +2957,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_hba * phba, uint16_t cmdsize,
/* Xmit ELS RPL ACC response tag <ulpIoTag> */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "%d:0128 Xmit ELS RPL ACC response tag x%x "
+ "%d:0120 Xmit ELS RPL ACC response tag x%x "
"Data: x%x x%x x%x x%x x%x\n",
phba->brd_no,
elsiocb->iocb.ulpIoTag,
@@ -3109,7 +3118,7 @@ lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
struct lpfc_nodelist *ndlp, *next_ndlp;
/* FAN received */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:265 FAN received\n",
+ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0265 FAN received\n",
phba->brd_no);
icmd = &cmdiocb->iocb;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4d6cf99..b2f1552 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1557,6 +1557,8 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
}
+
+ spin_lock_irq(phba->host->host_lock);
list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -1569,6 +1571,7 @@ lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
mempool_free(mb, phba->mbox_mem_pool);
}
}
+ spin_unlock_irq(phba->host->host_lock);
lpfc_els_abort(phba,ndlp,0);
spin_lock_irq(phba->host->host_lock);
@@ -1782,7 +1785,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
/* LOG change to REGLOGIN */
/* FIND node DID reglogin */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0931 FIND node DID reglogin"
+ "%d:0901 FIND node DID reglogin"
" Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
@@ -1805,7 +1808,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
/* LOG change to PRLI */
/* FIND node DID prli */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0931 FIND node DID prli "
+ "%d:0902 FIND node DID prli "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
@@ -1828,7 +1831,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
/* LOG change to NPR */
/* FIND node DID npr */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0931 FIND node DID npr "
+ "%d:0903 FIND node DID npr "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
@@ -1851,7 +1854,7 @@ lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
/* LOG change to UNUSED */
/* FIND node DID unused */
lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
- "%d:0931 FIND node DID unused "
+ "%d:0905 FIND node DID unused "
"Data: x%p x%x x%x x%x\n",
phba->brd_no,
ndlp, ndlp->nlp_DID,
@@ -2335,7 +2338,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!initlinkmbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0226 Device Discovery "
+ "%d:0206 Device Discovery "
"completion error\n",
phba->brd_no);
phba->hba_state = LPFC_HBA_ERROR;
@@ -2365,7 +2368,7 @@ lpfc_disc_timeout_handler(struct lpfc_hba *phba)
if (!clearlambox) {
clrlaerr = 1;
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
- "%d:0226 Device Discovery "
+ "%d:0207 Device Discovery "
"completion error\n",
phba->brd_no);
phba->hba_state = LPFC_HBA_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ef47b82..f6948ff 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1379,6 +1379,7 @@ lpfc_offline(struct lpfc_hba * phba)
/* stop all timers associated with this hba */
lpfc_stop_timer(phba);
phba->work_hba_events = 0;
+ phba->work_ha = 0;
lpfc_printf_log(phba,
KERN_WARNING,
@@ -1616,7 +1617,11 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_iocbq;
}
- /* We can rely on a queue depth attribute only after SLI HBA setup */
+ /*
+ * Set initial can_queue value since 0 is no longer supported and
+ * scsi_add_host will fail. This will be adjusted later based on the
+ * max xri value determined in hba setup.
+ */
host->can_queue = phba->cfg_hba_queue_depth - 10;
/* Tell the midlayer we support 16 byte commands */
@@ -1656,6 +1661,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_irq;
}
+ /*
+ * hba setup may have changed the hba_queue_depth so we need to adjust
+ * the value of can_queue.
+ */
+ host->can_queue = phba->cfg_hba_queue_depth - 10;
+
lpfc_discovery_wait(phba);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index e42f22a..4d016c2 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -651,3 +651,19 @@ lpfc_mbox_get(struct lpfc_hba * phba)
return mbq;
}
+
+int
+lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
+{
+ switch (cmd) {
+ case MBX_WRITE_NV: /* 0x03 */
+ case MBX_UPDATE_CFG: /* 0x1B */
+ case MBX_DOWN_LOAD: /* 0x1C */
+ case MBX_DEL_LD_ENTRY: /* 0x1D */
+ case MBX_LOAD_AREA: /* 0x81 */
+ case MBX_FLASH_WR_ULA: /* 0x98 */
+ case MBX_LOAD_EXP_ROM: /* 0x9C */
+ return LPFC_MBOX_TMO_FLASH_CMD;
+ }
+ return LPFC_MBOX_TMO;
+}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd0b0e2..20449a8 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -179,7 +179,7 @@ lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
- "%d:0201 Abort outstanding I/O on NPort x%x "
+ "%d:0205 Abort outstanding I/O on NPort x%x "
"Data: x%x x%x x%x\n",
phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->nlp_state, ndlp->nlp_rpi);
@@ -393,6 +393,20 @@ lpfc_rcv_plogi(struct lpfc_hba * phba,
mbox->context2 = ndlp;
ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
+ /*
+ * If there is an outstanding PLOGI issued, abort it before
+ * sending ACC rsp for received PLOGI. If pending plogi
+ * is not canceled here, the plogi will be rejected by
+ * remote port and will be retried. On a configuration with
+ * single discovery thread, this will cause a huge delay in
+ * discovery. Also this will cause multiple state machines
+ * running in parallel for this node.
+ */
+ if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
+ /* software abort outstanding PLOGI */
+ lpfc_els_abort(phba, ndlp, 1);
+ }
+
lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
return 1;
@@ -1601,7 +1615,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
lpfc_rcv_padisc(phba, ndlp, cmdiocb);
- if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
+ /*
+ * Do not start discovery if discovery is about to start
+ * or discovery in progress for this node. Starting discovery
+ * here will affect the counting of discovery threads.
+ */
+ if ((!(ndlp->nlp_flag & NLP_DELAY_TMO)) &&
+ (ndlp->nlp_flag & NLP_NPR_2B_DISC)){
if (ndlp->nlp_flag & NLP_NPR_ADISC) {
ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a760a44..a8816a8 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -21,6 +21,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -841,6 +842,21 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
return 0;
}
+static void
+lpfc_block_error_handler(struct scsi_cmnd *cmnd)
+{
+ struct Scsi_Host *shost = cmnd->device->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+
+ spin_lock_irq(shost->host_lock);
+ while (rport->port_state == FC_PORTSTATE_BLOCKED) {
+ spin_unlock_irq(shost->host_lock);
+ msleep(1000);
+ spin_lock_irq(shost->host_lock);
+ }
+ spin_unlock_irq(shost->host_lock);
+ return;
+}
static int
lpfc_abort_handler(struct scsi_cmnd *cmnd)
@@ -855,6 +871,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
unsigned int loop_count = 0;
int ret = SUCCESS;
+ lpfc_block_error_handler(cmnd);
spin_lock_irq(shost->host_lock);
lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
@@ -957,6 +974,7 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
int ret = FAILED;
int cnt, loopcnt;
+ lpfc_block_error_handler(cmnd);
spin_lock_irq(shost->host_lock);
/*
* If target is not in a MAPPED state, delay the reset until
@@ -1073,6 +1091,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
int cnt, loopcnt;
struct lpfc_scsi_buf * lpfc_cmd;
+ lpfc_block_error_handler(cmnd);
spin_lock_irq(shost->host_lock);
lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -1104,7 +1123,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
ndlp->rport->dd_data);
if (ret != SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
- "%d:0713 Bus Reset on target %d failed\n",
+ "%d:0700 Bus Reset on target %d failed\n",
phba->brd_no, i);
err_count++;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 350a625..70f4d5a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -320,7 +320,8 @@ lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
kfree(old_arr);
return iotag;
}
- }
+ } else
+ spin_unlock_irq(phba->host->host_lock);
lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
"%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
@@ -969,9 +970,11 @@ void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
* resources need to be recovered.
*/
if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
- printk(KERN_INFO "%s: IOCB cmd 0x%x processed."
- " Skipping completion\n", __FUNCTION__,
- irsp->ulpCommand);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "%d:0314 IOCB cmd 0x%x"
+ " processed. Skipping"
+ " completion", phba->brd_no,
+ irsp->ulpCommand);
break;
}
@@ -1104,7 +1107,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
if (unlikely(irsp->ulpStatus)) {
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "%d:0326 Rsp Ring %d error: IOCB Data: "
+ "%d:0336 Rsp Ring %d error: IOCB Data: "
"x%x x%x x%x x%x x%x x%x x%x x%x\n",
phba->brd_no, pring->ringno,
irsp->un.ulpWord[0], irsp->un.ulpWord[1],
@@ -1122,9 +1125,11 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
* resources need to be recovered.
*/
if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
- printk(KERN_INFO "%s: IOCB cmd 0x%x processed. "
- "Skipping completion\n", __FUNCTION__,
- irsp->ulpCommand);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "%d:0333 IOCB cmd 0x%x"
+ " processed. Skipping"
+ " completion\n", phba->brd_no,
+ irsp->ulpCommand);
break;
}
@@ -1155,7 +1160,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "%d:0321 Unknown IOCB command "
+ "%d:0334 Unknown IOCB command "
"Data: x%x, x%x x%x x%x x%x\n",
phba->brd_no, type, irsp->ulpCommand,
irsp->ulpStatus, irsp->ulpIoTag,
@@ -1238,7 +1243,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
- "%d:0312 Ring %d handler: portRspPut %d "
+ "%d:0303 Ring %d handler: portRspPut %d "
"is bigger then rsp ring %d\n",
phba->brd_no,
pring->ringno, portRspPut, portRspMax);
@@ -1383,7 +1388,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
- "%d:0321 Unknown IOCB command "
+ "%d:0335 Unknown IOCB command "
"Data: x%x x%x x%x x%x\n",
phba->brd_no,
irsp->ulpCommand,
@@ -1399,11 +1404,11 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
next_iocb,
&saveq->list,
list) {
+ list_del(&rspiocbp->list);
lpfc_sli_release_iocbq(phba,
rspiocbp);
}
}
-
lpfc_sli_release_iocbq(phba, saveq);
}
}
@@ -1711,15 +1716,13 @@ lpfc_sli_brdreset(struct lpfc_hba * phba)
phba->fc_myDID = 0;
phba->fc_prevDID = 0;
- psli->sli_flag = 0;
-
/* Turn off parity checking and serr during the physical reset */
pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
pci_write_config_word(phba->pcidev, PCI_COMMAND,
(cfg_value &
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
- psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+ psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
/* Now toggle INITFF bit in the Host Control Register */
writel(HC_INITFF, phba->HCregaddr);
mdelay(1);
@@ -1760,7 +1763,7 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
/* Restart HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no,
+ "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
phba->hba_state, psli->sli_flag);
word0 = 0;
@@ -1792,6 +1795,9 @@ lpfc_sli_brdrestart(struct lpfc_hba * phba)
spin_unlock_irq(phba->host->host_lock);
+ memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+ psli->stats_start = get_seconds();
+
if (skip_post)
mdelay(100);
else
@@ -1902,6 +1908,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
}
while (resetcount < 2 && !done) {
+ spin_lock_irq(phba->host->host_lock);
+ phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irq(phba->host->host_lock);
phba->hba_state = LPFC_STATE_UNKNOWN;
lpfc_sli_brdrestart(phba);
msleep(2500);
@@ -1909,6 +1918,9 @@ lpfc_sli_hba_setup(struct lpfc_hba * phba)
if (rc)
break;
+ spin_lock_irq(phba->host->host_lock);
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ spin_unlock_irq(phba->host->host_lock);
resetcount++;
/* Call pre CONFIG_PORT mailbox command initialization. A value of 0
@@ -2194,7 +2206,8 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
return (MBX_NOT_FINISHED);
}
/* timeout active mbox command */
- mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO);
+ mod_timer(&psli->mbox_tmo, (jiffies +
+ (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
}
/* Mailbox cmd <cmd> issue */
@@ -2254,7 +2267,6 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
break;
case MBX_POLL:
- i = 0;
psli->mbox_active = NULL;
if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
/* First read mbox status word */
@@ -2268,11 +2280,14 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
/* Read the HBA Host Attention Register */
ha_copy = readl(phba->HAregaddr);
+ i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
+ i *= 1000; /* Convert to ms */
+
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
(!(ha_copy & HA_MBATT) &&
(phba->hba_state > LPFC_WARM_START))) {
- if (i++ >= 100) {
+ if (i-- <= 0) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(phba->host->host_lock,
drvr_flag);
@@ -2290,7 +2305,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
/* Can be in interrupt context, do not sleep */
/* (or might be called with interrupts disabled) */
- mdelay(i);
+ mdelay(1);
spin_lock_irqsave(phba->host->host_lock, drvr_flag);
@@ -3005,7 +3020,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
if (timeleft == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "%d:0329 IOCB wait timeout error - no "
+ "%d:0338 IOCB wait timeout error - no "
"wake response Data x%x\n",
phba->brd_no, timeout);
retval = IOCB_TIMEDOUT;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index d8ef0d2..e26de6809 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -172,6 +172,18 @@ struct lpfc_sli_stat {
uint32_t mbox_busy; /* Mailbox cmd busy */
};
+/* Structure to store link status values when port stats are reset */
+struct lpfc_lnk_stat {
+ uint32_t link_failure_count;
+ uint32_t loss_of_sync_count;
+ uint32_t loss_of_signal_count;
+ uint32_t prim_seq_protocol_err_count;
+ uint32_t invalid_tx_word_count;
+ uint32_t invalid_crc_count;
+ uint32_t error_frames;
+ uint32_t link_events;
+};
+
/* Structure used to hold SLI information */
struct lpfc_sli {
uint32_t num_rings;
@@ -201,6 +213,8 @@ struct lpfc_sli {
struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
size_t iocbq_lookup_len; /* current lengs of the array */
uint16_t last_iotag; /* last allocated IOTAG */
+ unsigned long stats_start; /* in seconds */
+ struct lpfc_lnk_stat lnk_stat_offsets;
};
/* Given a pointer to the start of the ring, and the slot number of
@@ -211,3 +225,9 @@ struct lpfc_sli {
#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox
command */
+#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write
+ * or erase cmds. This is especially
+ * long because of the potential of
+ * multiple flash erases that can be
+ * spawned.
+ */
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 10e89c6..c7091ea 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.1.7"
+#define LPFC_DRIVER_VERSION "8.1.9"
#define LPFC_DRIVER_NAME "lpfc"
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 4675343..8cd0bd1 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -37,6 +37,12 @@
#define LSI_MAX_CHANNELS 16
#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1)
+#define HBA_SIGNATURE_64_BIT 0x299
+#define PCI_CONF_AMISIG64 0xa4
+
+#define MEGA_SCSI_INQ_EVPD 1
+#define MEGA_INVALID_FIELD_IN_CDB 0x24
+
/**
* scb_t - scsi command control block
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
index bdaee14..b8aa342 100644
--- a/drivers/scsi/megaraid/megaraid_ioctl.h
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -132,6 +132,10 @@ typedef struct uioc {
/* Driver Data: */
void __user * user_data;
uint32_t user_data_len;
+
+ /* 64bit alignment */
+ uint32_t pad_for_64bit_align;
+
mraid_passthru_t __user *user_pthru;
mraid_passthru_t *pthru32;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 9271513..cd982c8 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_mbox.c
- * Version : v2.20.4.8 (Apr 11 2006)
+ * Version : v2.20.4.9 (Jul 16 2006)
*
* Authors:
* Atul Mukker <Atul.Mukker@lsil.com>
@@ -720,6 +720,7 @@ megaraid_init_mbox(adapter_t *adapter)
struct pci_dev *pdev;
mraid_device_t *raid_dev;
int i;
+ uint32_t magic64;
adapter->ito = MBOX_TIMEOUT;
@@ -863,12 +864,33 @@ megaraid_init_mbox(adapter_t *adapter)
// Set the DMA mask to 64-bit. All supported controllers as capable of
// DMA in this range
- if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK) != 0) {
-
- con_log(CL_ANN, (KERN_WARNING
- "megaraid: could not set DMA mask for 64-bit.\n"));
+ pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
+
+ if (((magic64 == HBA_SIGNATURE_64_BIT) &&
+ ((adapter->pdev->subsystem_device !=
+ PCI_SUBSYS_ID_MEGARAID_SATA_150_6) ||
+ (adapter->pdev->subsystem_device !=
+ PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
+ adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
+ (adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
+ adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
+ if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: DMA mask for 64-bit failed\n"));
- goto out_free_sysfs_res;
+ if (pci_set_dma_mask (adapter->pdev, DMA_32BIT_MASK)) {
+ con_log(CL_ANN, (KERN_WARNING
+ "megaraid: 32-bit DMA mask failed\n"));
+ goto out_free_sysfs_res;
+ }
+ }
}
// setup tasklet for DPC
@@ -1622,6 +1644,14 @@ megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
rdev->last_disp |= (1L << SCP2CHANNEL(scp));
}
+ if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
+ scp->sense_buffer[0] = 0x70;
+ scp->sense_buffer[2] = ILLEGAL_REQUEST;
+ scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
+ scp->result = CHECK_CONDITION << 1;
+ return NULL;
+ }
+
/* Fall through */
case READ_CAPACITY:
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index 868fb0e..2b5a328 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -21,8 +21,8 @@
#include "megaraid_ioctl.h"
-#define MEGARAID_VERSION "2.20.4.8"
-#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)"
+#define MEGARAID_VERSION "2.20.4.9"
+#define MEGARAID_EXT_VERSION "(Release Date: Sun Jul 16 12:27:22 EST 2006)"
/*
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index e8f534f..d85b9a8 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -10,7 +10,7 @@
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_mm.c
- * Version : v2.20.2.6 (Mar 7 2005)
+ * Version : v2.20.2.7 (Jul 16 2006)
*
* Common management module
*/
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
index 3d9e67d..c8762b2 100644
--- a/drivers/scsi/megaraid/megaraid_mm.h
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -27,9 +27,9 @@
#include "megaraid_ioctl.h"
-#define LSI_COMMON_MOD_VERSION "2.20.2.6"
+#define LSI_COMMON_MOD_VERSION "2.20.2.7"
#define LSI_COMMON_MOD_EXT_VERSION \
- "(Release Date: Mon Mar 7 00:01:03 EST 2005)"
+ "(Release Date: Sun Jul 16 00:01:03 EST 2006)"
#define LSI_DBGLVL dbglevel
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 139ea0e..0930260 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -487,6 +487,7 @@ typedef struct {
#define MBA_IP_RCV_BUFFER_EMPTY 0x8026 /* IP receive buffer queue empty. */
#define MBA_IP_HDR_DATA_SPLIT 0x8027 /* IP header/data splitting feature */
/* used. */
+#define MBA_TRACE_NOTIFICATION 0x8028 /* Trace/Diagnostic notification. */
#define MBA_POINT_TO_POINT 0x8030 /* Point to point mode. */
#define MBA_CMPLT_1_16BIT 0x8031 /* Completion 1 16bit IOSB. */
#define MBA_CMPLT_2_16BIT 0x8032 /* Completion 2 16bit IOSB. */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 9758dba..8596491 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3063,6 +3063,7 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
int
qla2x00_abort_isp(scsi_qla_host_t *ha)
{
+ int rval;
unsigned long flags = 0;
uint16_t cnt;
srb_t *sp;
@@ -3119,6 +3120,16 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
ha->isp_abort_cnt = 0;
clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
+
+ if (ha->eft) {
+ rval = qla2x00_trace_control(ha, TC_ENABLE,
+ ha->eft_dma, EFT_NUM_BUFFERS);
+ if (rval) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to reinitialize EFT "
+ "(%d).\n", rval);
+ }
+ }
} else { /* failed the ISP abort */
ha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 2b60a27..c5b3c61 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -471,6 +471,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
mrk24->nport_handle = cpu_to_le16(loop_id);
mrk24->lun[1] = LSB(lun);
mrk24->lun[2] = MSB(lun);
+ host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
mrk->lun = cpu_to_le16(lun);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 795bf15..de06131 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -587,6 +587,11 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
"%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
break;
+
+ case MBA_TRACE_NOTIFICATION:
+ DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
+ ha->host_no, mb[1], mb[2]));
+ break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ec7ebb6..65cbe2f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -744,7 +744,6 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- srb_t *sp;
int ret;
unsigned int id, lun;
unsigned long serial;
@@ -755,8 +754,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
serial = cmd->serial_number;
- sp = (srb_t *) CMD_SP(cmd);
- if (!sp || !fcport)
+ if (!fcport)
return ret;
qla_printk(KERN_INFO, ha,
@@ -875,7 +873,6 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- srb_t *sp;
int ret;
unsigned int id, lun;
unsigned long serial;
@@ -886,8 +883,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
serial = cmd->serial_number;
- sp = (srb_t *) CMD_SP(cmd);
- if (!sp || !fcport)
+ if (!fcport)
return ret;
qla_printk(KERN_INFO, ha,
@@ -936,7 +932,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *ha = to_qla_host(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
- srb_t *sp;
int ret;
unsigned int id, lun;
unsigned long serial;
@@ -947,8 +942,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
serial = cmd->serial_number;
- sp = (srb_t *) CMD_SP(cmd);
- if (!sp || !fcport)
+ if (!fcport)
return ret;
qla_printk(KERN_INFO, ha,
@@ -2244,9 +2238,6 @@ qla2x00_do_dpc(void *data)
next_loopid = 0;
list_for_each_entry(fcport, &ha->fcports, list) {
- if (fcport->port_type != FCT_TARGET)
- continue;
-
/*
* If the port is not ONLINE then try to login
* to it if we haven't run out of retries.
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d2d6834..9712590 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.01.05-k3"
+#define QLA2XXX_VERSION "8.01.07-k1"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 1
-#define QLA_DRIVER_PATCH_VER 5
+#define QLA_DRIVER_PATCH_VER 7
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6a5b731..a8ed5a2 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -460,7 +460,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
* Return value:
* SUCCESS or FAILED or NEEDS_RETRY
**/
-static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense)
+static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
+ int cmnd_size, int timeout, int copy_sense)
{
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
@@ -490,6 +491,9 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout, int copy_sense
old_cmd_len = scmd->cmd_len;
old_use_sg = scmd->use_sg;
+ memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
+ memcpy(scmd->cmnd, cmnd, cmnd_size);
+
if (copy_sense) {
int gfp_mask = GFP_ATOMIC;
@@ -610,8 +614,7 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)
static unsigned char generic_sense[6] =
{REQUEST_SENSE, 0, 0, 0, 252, 0};
- memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense));
- return scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 1);
+ return scsi_send_eh_cmnd(scmd, generic_sense, 6, SENSE_TIMEOUT, 1);
}
/**
@@ -736,10 +739,7 @@ static int scsi_eh_tur(struct scsi_cmnd *scmd)
int retry_cnt = 1, rtn;
retry_tur:
- memcpy(scmd->cmnd, tur_command, sizeof(tur_command));
-
-
- rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT, 0);
+ rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
__FUNCTION__, scmd, rtn));
@@ -839,8 +839,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
if (scmd->device->allow_restart) {
int rtn;
- memcpy(scmd->cmnd, stu_command, sizeof(stu_command));
- rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT, 0);
+ rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
+ START_UNIT_TIMEOUT, 0);
if (rtn == SUCCESS)
return 0;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 7b9e8fa..2ecd141 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -34,6 +34,7 @@
#define ISCSI_SESSION_ATTRS 11
#define ISCSI_CONN_ATTRS 11
#define ISCSI_HOST_ATTRS 0
+#define ISCSI_TRANSPORT_VERSION "1.1-646"
struct iscsi_internal {
int daemon_pid;
@@ -634,13 +635,13 @@ mempool_zone_get_skb(struct mempool_zone *zone)
}
static int
-iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb)
+iscsi_broadcast_skb(struct mempool_zone *zone, struct sk_buff *skb, gfp_t gfp)
{
unsigned long flags;
int rc;
skb_get(skb);
- rc = netlink_broadcast(nls, skb, 0, 1, GFP_KERNEL);
+ rc = netlink_broadcast(nls, skb, 0, 1, gfp);
if (rc < 0) {
mempool_free(skb, zone->pool);
printk(KERN_ERR "iscsi: can not broadcast skb (%d)\n", rc);
@@ -749,7 +750,7 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
ev->r.connerror.cid = conn->cid;
ev->r.connerror.sid = iscsi_conn_get_sid(conn);
- iscsi_broadcast_skb(conn->z_error, skb);
+ iscsi_broadcast_skb(conn->z_error, skb, GFP_ATOMIC);
dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
error);
@@ -895,7 +896,7 @@ int iscsi_if_destroy_session_done(struct iscsi_cls_conn *conn)
* this will occur if the daemon is not up, so we just warn
* the user and when the daemon is restarted it will handle it
*/
- rc = iscsi_broadcast_skb(conn->z_pdu, skb);
+ rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
if (rc < 0)
dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
"session destruction event. Check iscsi daemon\n");
@@ -958,7 +959,7 @@ int iscsi_if_create_session_done(struct iscsi_cls_conn *conn)
* this will occur if the daemon is not up, so we just warn
* the user and when the daemon is restarted it will handle it
*/
- rc = iscsi_broadcast_skb(conn->z_pdu, skb);
+ rc = iscsi_broadcast_skb(conn->z_pdu, skb, GFP_KERNEL);
if (rc < 0)
dev_printk(KERN_ERR, &conn->dev, "Cannot notify userspace of "
"session creation event. Check iscsi daemon\n");
@@ -1613,6 +1614,9 @@ static __init int iscsi_transport_init(void)
{
int err;
+ printk(KERN_INFO "Loading iSCSI transport class v%s.",
+ ISCSI_TRANSPORT_VERSION);
+
err = class_register(&iscsi_transport_class);
if (err)
return err;
@@ -1678,3 +1682,4 @@ MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu>, "
"Alex Aizman <itn780@yahoo.com>");
MODULE_DESCRIPTION("iSCSI Transport Interface");
MODULE_LICENSE("GPL");
+MODULE_VERSION(ISCSI_TRANSPORT_VERSION);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 65eef338..34f9343e 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -18,8 +18,8 @@
*
*/
-static int sg_version_num = 30533; /* 2 digits for each component */
-#define SG_VERSION_STR "3.5.33"
+static int sg_version_num = 30534; /* 2 digits for each component */
+#define SG_VERSION_STR "3.5.34"
/*
* D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
@@ -60,7 +60,7 @@ static int sg_version_num = 30533; /* 2 digits for each component */
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
-static char *sg_version_date = "20050908";
+static char *sg_version_date = "20060818";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
@@ -1164,7 +1164,7 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
len = vma->vm_end - sa;
len = (len < sg->length) ? len : sg->length;
if (offset < len) {
- page = sg->page;
+ page = virt_to_page(page_address(sg->page) + offset);
get_page(page); /* increment page count */
break;
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 8c50507..739d3ef 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -2084,7 +2084,7 @@ static struct pci_device_id sym2_id_table[] __devinitdata = {
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C860,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1510,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
+ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SCSI<<8, 0xffff00, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C896,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C895,
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index dc673e1..cfe20f7 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -886,6 +886,15 @@ static int sunsab_console_setup(struct console *con, char *options)
unsigned long flags;
unsigned int baud, quot;
+ /*
+ * The console framework calls us for each and every port
+ * registered. Defer the console setup until the requested
+ * port has been properly discovered. A bit of a hack,
+ * though...
+ */
+ if (up->port.type != PORT_SUNSAB)
+ return -1;
+
printk("Console: ttyS%d (SAB82532)\n",
(sunsab_reg.minor - 64) + con->index);
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 47bc3d5..d34f336 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1146,6 +1146,9 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
unsigned long flags;
int baud, brg;
+ if (up->port.type != PORT_SUNZILOG)
+ return -1;
+
printk(KERN_INFO "Console: ttyS%d (SunZilog zs%d)\n",
(sunzilog_reg.minor - 64) + con->index, con->index);
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index a4062a6..9c46746 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -208,7 +208,7 @@ static int cypress_probe(struct usb_interface *interface,
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
- dev_err(&dev->udev->dev, "Out of memory!\n");
+ dev_err(&interface->dev, "Out of memory!\n");
goto error;
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index efbbc0a..65e4d04 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -79,7 +79,6 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(SAGEM_VENDOR_ID, SAGEM_PRODUCT_ID) },
{ USB_DEVICE(LEADTEK_VENDOR_ID, LEADTEK_9531_PRODUCT_ID) },
{ USB_DEVICE(SPEEDDRAGON_VENDOR_ID, SPEEDDRAGON_PRODUCT_ID) },
- { USB_DEVICE(OTI_VENDOR_ID, OTI_PRODUCT_ID) },
{ USB_DEVICE(DATAPILOT_U2_VENDOR_ID, DATAPILOT_U2_PRODUCT_ID) },
{ USB_DEVICE(BELKIN_VENDOR_ID, BELKIN_PRODUCT_ID) },
{ } /* Terminating entry */
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index a692ac6..55195e7 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -82,10 +82,6 @@
#define SPEEDDRAGON_VENDOR_ID 0x0e55
#define SPEEDDRAGON_PRODUCT_ID 0x110b
-/* Ours Technology Inc DKU-5 clone, chipset: Prolific Technology Inc */
-#define OTI_VENDOR_ID 0x0ea0
-#define OTI_PRODUCT_ID 0x6858
-
/* DATAPILOT Universal-2 Phone Cable */
#define DATAPILOT_U2_VENDOR_ID 0x0731
#define DATAPILOT_U2_PRODUCT_ID 0x2003
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index fd158e0..4a803d6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1261,7 +1261,7 @@ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
* Tested on hardware version 1.10.
* Entry is needed only for the initializer function override.
*/
-UNUSUAL_DEV( 0x1019, 0x0c55, 0x0000, 0x9999,
+UNUSUAL_DEV( 0x1019, 0x0c55, 0x0110, 0x0110,
"Desknote",
"UCR-61S2B",
US_SC_DEVICE, US_PR_DEVICE, usb_stor_ucr61s2b_init,
diff --git a/drivers/video/imacfb.c b/drivers/video/imacfb.c
index b485bece..18ea4a5 100644
--- a/drivers/video/imacfb.c
+++ b/drivers/video/imacfb.c
@@ -71,10 +71,10 @@ static int set_system(struct dmi_system_id *id)
static struct dmi_system_id __initdata dmi_system_table[] = {
{ set_system, "iMac4,1", {
DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
- DMI_MATCH(DMI_BIOS_VERSION,"iMac4,1") }, (void*)M_I17},
+ DMI_MATCH(DMI_PRODUCT_NAME,"iMac4,1") }, (void*)M_I17},
{ set_system, "MacBookPro1,1", {
DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
- DMI_MATCH(DMI_BIOS_VERSION,"MacBookPro1,1") }, (void*)M_I17},
+ DMI_MATCH(DMI_PRODUCT_NAME,"MacBookPro1,1") }, (void*)M_I17},
{ set_system, "MacBook1,1", {
DMI_MATCH(DMI_BIOS_VENDOR,"Apple Computer, Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"MacBook1,1")}, (void *)M_MACBOOK},
diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c
index 440272a..7c76e07 100644
--- a/drivers/video/matrox/g450_pll.c
+++ b/drivers/video/matrox/g450_pll.c
@@ -331,7 +331,15 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll,
tmp |= M1064_XPIXCLKCTRL_PLL_UP;
}
matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp);
+#ifdef __powerpc__
+ /* This is necessary to avoid jitter on PowerPC
+ * (OpenFirmware) systems, but apparently
+ * introduces jitter, at least on a x86-64
+ * using DVI.
+ * A simple workaround is disable for non-PPC.
+ */
matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0);
+#endif /* __powerpc__ */
matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl);
matroxfb_DAC_unlock_irqrestore(flags);
OpenPOWER on IntegriCloud