diff options
author | James Bottomley <James.Bottomley@SteelEye.com> | 2006-08-29 09:22:51 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-08-29 09:52:29 -0500 |
commit | 2908d778ab3e244900c310974e1fc1c69066e450 (patch) | |
tree | 440d56e98414cd2a8ca711dcd6424df1982d474e | |
parent | f4ad7b5807385ad1fed0347d966e51a797cd1013 (diff) | |
download | op-kernel-dev-2908d778ab3e244900c310974e1fc1c69066e450.zip op-kernel-dev-2908d778ab3e244900c310974e1fc1c69066e450.tar.gz |
[SCSI] aic94xx: new driver
This is the end point of the separate aic94xx driver based on the
original driver and transport class from Luben Tuikov
<ltuikov@yahoo.com>
The log of the separate development is:
Alexis Bruemmer:
o aic94xx: fix hotplug/unplug for expanderless systems
o aic94xx: disable split completion timer/setting by default
o aic94xx: wide port off expander support
o aic94xx: remove various inline functions
o aic94xx: use bitops
o aic94xx: remove queue comment
o aic94xx: remove sas_common.c
o aic94xx: sas remove depot's
o aic94xx: use available list_for_each_entry_safe_reverse()
o aic94xx: sas header file merge
James Bottomley:
o aic94xx: fix TF_TMF_NO_CTX processing
o aic94xx: convert to request_firmware interface
o aic94xx: fix hotplug/unplug
o aic94xx: add link error counts to the expander phys
o aic94xx: add transport class phy reset capability
o aic94xx: remove local_attached flag
o Remove README
o Fixup Makefile variable for libsas rename
o Rename sas->libsas
o aic94xx: correct return code for sas_discover_event
o aic94xx: use parent backlink port
o aic94xx: remove channel abstraction
o aic94xx: fix routing algorithms
o aic94xx: add backlink port
o aic94xx: fix cascaded expander properties
o aic94xx: fix sleep under lock
o aic94xx: fix panic on module removal in complex topology
o aic94xx: make use of the new sas_port
o rename sas_port to asd_sas_port
o Fix for eh_strategy_handler move
o aic94xx: move entirely over to correct transport class formulation
o remove last vestages of sas_rphy_alloc()
o update for eh_timed_out move
o Preliminary expander support for aic94xx
o sas: remove event thread
o minor warning cleanups
o remove last vestiges of id mapping arrays
o Further updates
o Convert aic94xx over entirely to the transport class end device and
o update aic94xx/sas to use the new sas transport class end device
o [PATCH] aic94xx: attaching to the sas transport class
o Add missing completion removal from prior patch
o [PATCH] aic94xx: attaching to the sas transport class
o Build fixes from akpm
Jeff Garzik:
o [scsi aic94xx] Remove ->owner from PCI info table
Luben Tuikov:
o initial aic94xx driver
Mike Anderson:
o aic94xx: fix panic on module insertion
o aic94xx: stub out SATA_DEV case
o aic94xx: compile warning cleanups
o aic94xx: sas_alloc_task
o aic94xx: ref count update
o aic94xx nexus loss time value
o [PATCH] aic94xx: driver assertion in non-x86 BIOS env
Randy Dunlap:
o libsas: externs not needed
Robert Tarte:
o aic94xx: sequence patch - fixes SATA support
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
37 files changed, 18866 insertions, 1 deletions
diff --git a/Documentation/scsi/libsas.txt b/Documentation/scsi/libsas.txt new file mode 100644 index 0000000..9e2078b --- /dev/null +++ b/Documentation/scsi/libsas.txt @@ -0,0 +1,484 @@ +SAS Layer +--------- + +The SAS Layer is a management infrastructure which manages +SAS LLDDs. It sits between SCSI Core and SAS LLDDs. The +layout is as follows: while SCSI Core is concerned with +SAM/SPC issues, and a SAS LLDD+sequencer is concerned with +phy/OOB/link management, the SAS layer is concerned with: + + * SAS Phy/Port/HA event management (LLDD generates, + SAS Layer processes), + * SAS Port management (creation/destruction), + * SAS Domain discovery and revalidation, + * SAS Domain device management, + * SCSI Host registration/unregistration, + * Device registration with SCSI Core (SAS) or libata + (SATA), and + * Expander management and exporting expander control + to user space. + +A SAS LLDD is a PCI device driver. It is concerned with +phy/OOB management, and vendor specific tasks and generates +events to the SAS layer. + +The SAS Layer does most SAS tasks as outlined in the SAS 1.1 +spec. + +The sas_ha_struct describes the SAS LLDD to the SAS layer. +Most of it is used by the SAS Layer but a few fields need to +be initialized by the LLDDs. + +After initializing your hardware, from the probe() function +you call sas_register_ha(). It will register your LLDD with +the SCSI subsystem, creating a SCSI host and it will +register your SAS driver with the sysfs SAS tree it creates. +It will then return. Then you enable your phys to actually +start OOB (at which point your driver will start calling the +notify_* event callbacks). + +Structure descriptions: + +struct sas_phy -------------------- +Normally this is statically embedded to your driver's +phy structure: + struct my_phy { + blah; + struct sas_phy sas_phy; + bleh; + }; +And then all the phys are an array of my_phy in your HA +struct (shown below). + +Then as you go along and initialize your phys you also +initialize the sas_phy struct, along with your own +phy structure. + +In general, the phys are managed by the LLDD and the ports +are managed by the SAS layer. So the phys are initialized +and updated by the LLDD and the ports are initialized and +updated by the SAS layer. + +There is a scheme where the LLDD can RW certain fields, +and the SAS layer can only read such ones, and vice versa. +The idea is to avoid unnecessary locking. + +enabled -- must be set (0/1) +id -- must be set [0,MAX_PHYS) +class, proto, type, role, oob_mode, linkrate -- must be set +oob_mode -- you set this when OOB has finished and then notify +the SAS Layer. + +sas_addr -- this normally points to an array holding the sas +address of the phy, possibly somewhere in your my_phy +struct. + +attached_sas_addr -- set this when you (LLDD) receive an +IDENTIFY frame or a FIS frame, _before_ notifying the SAS +layer. The idea is that sometimes the LLDD may want to fake +or provide a different SAS address on that phy/port and this +allows it to do this. At best you should copy the sas +address from the IDENTIFY frame or maybe generate a SAS +address for SATA directly attached devices. The Discover +process may later change this. + +frame_rcvd -- this is where you copy the IDENTIFY/FIS frame +when you get it; you lock, copy, set frame_rcvd_size and +unlock the lock, and then call the event. It is a pointer +since there's no way to know your hw frame size _exactly_, +so you define the actual array in your phy struct and let +this pointer point to it. You copy the frame from your +DMAable memory to that area holding the lock. + +sas_prim -- this is where primitives go when they're +received. See sas.h. Grab the lock, set the primitive, +release the lock, notify. + +port -- this points to the sas_port if the phy belongs +to a port -- the LLDD only reads this. It points to the +sas_port this phy is part of. Set by the SAS Layer. + +ha -- may be set; the SAS layer sets it anyway. + +lldd_phy -- you should set this to point to your phy so you +can find your way around faster when the SAS layer calls one +of your callbacks and passes you a phy. If the sas_phy is +embedded you can also use container_of -- whatever you +prefer. + + +struct sas_port -------------------- +The LLDD doesn't set any fields of this struct -- it only +reads them. They should be self explanatory. + +phy_mask is 32 bit, this should be enough for now, as I +haven't heard of a HA having more than 8 phys. + +lldd_port -- I haven't found use for that -- maybe other +LLDD who wish to have internal port representation can make +use of this. + + +struct sas_ha_struct -------------------- +It normally is statically declared in your own LLDD +structure describing your adapter: +struct my_sas_ha { + blah; + struct sas_ha_struct sas_ha; + struct my_phy phys[MAX_PHYS]; + struct sas_port sas_ports[MAX_PHYS]; /* (1) */ + bleh; +}; + +(1) If your LLDD doesn't have its own port representation. + +What needs to be initialized (sample function given below). + +pcidev +sas_addr -- since the SAS layer doesn't want to mess with + memory allocation, etc, this points to statically + allocated array somewhere (say in your host adapter + structure) and holds the SAS address of the host + adapter as given by you or the manufacturer, etc. +sas_port +sas_phy -- an array of pointers to structures. (see + note above on sas_addr). + These must be set. See more notes below. +num_phys -- the number of phys present in the sas_phy array, + and the number of ports present in the sas_port + array. There can be a maximum num_phys ports (one per + port) so we drop the num_ports, and only use + num_phys. + +The event interface: + + /* LLDD calls these to notify the class of an event. */ + void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event); + void (*notify_port_event)(struct sas_phy *, enum port_event); + void (*notify_phy_event)(struct sas_phy *, enum phy_event); + +When sas_register_ha() returns, those are set and can be +called by the LLDD to notify the SAS layer of such events +the SAS layer. + +The port notification: + + /* The class calls these to notify the LLDD of an event. */ + void (*lldd_port_formed)(struct sas_phy *); + void (*lldd_port_deformed)(struct sas_phy *); + +If the LLDD wants notification when a port has been formed +or deformed it sets those to a function satisfying the type. + +A SAS LLDD should also implement at least one of the Task +Management Functions (TMFs) described in SAM: + + /* Task Management Functions. Must be called from process context. */ + int (*lldd_abort_task)(struct sas_task *); + int (*lldd_abort_task_set)(struct domain_device *, u8 *lun); + int (*lldd_clear_aca)(struct domain_device *, u8 *lun); + int (*lldd_clear_task_set)(struct domain_device *, u8 *lun); + int (*lldd_I_T_nexus_reset)(struct domain_device *); + int (*lldd_lu_reset)(struct domain_device *, u8 *lun); + int (*lldd_query_task)(struct sas_task *); + +For more information please read SAM from T10.org. + +Port and Adapter management: + + /* Port and Adapter management */ + int (*lldd_clear_nexus_port)(struct sas_port *); + int (*lldd_clear_nexus_ha)(struct sas_ha_struct *); + +A SAS LLDD should implement at least one of those. + +Phy management: + + /* Phy management */ + int (*lldd_control_phy)(struct sas_phy *, enum phy_func); + +lldd_ha -- set this to point to your HA struct. You can also +use container_of if you embedded it as shown above. + +A sample initialization and registration function +can look like this (called last thing from probe()) +*but* before you enable the phys to do OOB: + +static int register_sas_ha(struct my_sas_ha *my_ha) +{ + int i; + static struct sas_phy *sas_phys[MAX_PHYS]; + static struct sas_port *sas_ports[MAX_PHYS]; + + my_ha->sas_ha.sas_addr = &my_ha->sas_addr[0]; + + for (i = 0; i < MAX_PHYS; i++) { + sas_phys[i] = &my_ha->phys[i].sas_phy; + sas_ports[i] = &my_ha->sas_ports[i]; + } + + my_ha->sas_ha.sas_phy = sas_phys; + my_ha->sas_ha.sas_port = sas_ports; + my_ha->sas_ha.num_phys = MAX_PHYS; + + my_ha->sas_ha.lldd_port_formed = my_port_formed; + + my_ha->sas_ha.lldd_dev_found = my_dev_found; + my_ha->sas_ha.lldd_dev_gone = my_dev_gone; + + my_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num; (1) + + my_ha->sas_ha.lldd_queue_size = ha_can_queue; + my_ha->sas_ha.lldd_execute_task = my_execute_task; + + my_ha->sas_ha.lldd_abort_task = my_abort_task; + my_ha->sas_ha.lldd_abort_task_set = my_abort_task_set; + my_ha->sas_ha.lldd_clear_aca = my_clear_aca; + my_ha->sas_ha.lldd_clear_task_set = my_clear_task_set; + my_ha->sas_ha.lldd_I_T_nexus_reset= NULL; (2) + my_ha->sas_ha.lldd_lu_reset = my_lu_reset; + my_ha->sas_ha.lldd_query_task = my_query_task; + + my_ha->sas_ha.lldd_clear_nexus_port = my_clear_nexus_port; + my_ha->sas_ha.lldd_clear_nexus_ha = my_clear_nexus_ha; + + my_ha->sas_ha.lldd_control_phy = my_control_phy; + + return sas_register_ha(&my_ha->sas_ha); +} + +(1) This is normally a LLDD parameter, something of the +lines of a task collector. What it tells the SAS Layer is +whether the SAS layer should run in Direct Mode (default: +value 0 or 1) or Task Collector Mode (value greater than 1). + +In Direct Mode, the SAS Layer calls Execute Task as soon as +it has a command to send to the SDS, _and_ this is a single +command, i.e. not linked. + +Some hardware (e.g. aic94xx) has the capability to DMA more +than one task at a time (interrupt) from host memory. Task +Collector Mode is an optional feature for HAs which support +this in their hardware. (Again, it is completely optional +even if your hardware supports it.) + +In Task Collector Mode, the SAS Layer would do _natural_ +coalescing of tasks and at the appropriate moment it would +call your driver to DMA more than one task in a single HA +interrupt. DMBS may want to use this by insmod/modprobe +setting the lldd_max_execute_num to something greater than +1. + +(2) SAS 1.1 does not define I_T Nexus Reset TMF. + +Events +------ + +Events are _the only way_ a SAS LLDD notifies the SAS layer +of anything. There is no other method or way a LLDD to tell +the SAS layer of anything happening internally or in the SAS +domain. + +Phy events: + PHYE_LOSS_OF_SIGNAL, (C) + PHYE_OOB_DONE, + PHYE_OOB_ERROR, (C) + PHYE_SPINUP_HOLD. + +Port events, passed on a _phy_: + PORTE_BYTES_DMAED, (M) + PORTE_BROADCAST_RCVD, (E) + PORTE_LINK_RESET_ERR, (C) + PORTE_TIMER_EVENT, (C) + PORTE_HARD_RESET. + +Host Adapter event: + HAE_RESET + +A SAS LLDD should be able to generate + - at least one event from group C (choice), + - events marked M (mandatory) are mandatory (only one), + - events marked E (expander) if it wants the SAS layer + to handle domain revalidation (only one such). + - Unmarked events are optional. + +Meaning: + +HAE_RESET -- when your HA got internal error and was reset. + +PORTE_BYTES_DMAED -- on receiving an IDENTIFY/FIS frame +PORTE_BROADCAST_RCVD -- on receiving a primitive +PORTE_LINK_RESET_ERR -- timer expired, loss of signal, loss +of DWS, etc. (*) +PORTE_TIMER_EVENT -- DWS reset timeout timer expired (*) +PORTE_HARD_RESET -- Hard Reset primitive received. + +PHYE_LOSS_OF_SIGNAL -- the device is gone (*) +PHYE_OOB_DONE -- OOB went fine and oob_mode is valid +PHYE_OOB_ERROR -- Error while doing OOB, the device probably +got disconnected. (*) +PHYE_SPINUP_HOLD -- SATA is present, COMWAKE not sent. + +(*) should set/clear the appropriate fields in the phy, + or alternatively call the inlined sas_phy_disconnected() + which is just a helper, from their tasklet. + +The Execute Command SCSI RPC: + + int (*lldd_execute_task)(struct sas_task *, int num, + unsigned long gfp_flags); + +Used to queue a task to the SAS LLDD. @task is the tasks to +be executed. @num should be the number of tasks being +queued at this function call (they are linked listed via +task::list), @gfp_mask should be the gfp_mask defining the +context of the caller. + +This function should implement the Execute Command SCSI RPC, +or if you're sending a SCSI Task as linked commands, you +should also use this function. + +That is, when lldd_execute_task() is called, the command(s) +go out on the transport *immediately*. There is *no* +queuing of any sort and at any level in a SAS LLDD. + +The use of task::list is two-fold, one for linked commands, +the other discussed below. + +It is possible to queue up more than one task at a time, by +initializing the list element of struct sas_task, and +passing the number of tasks enlisted in this manner in num. + +Returns: -SAS_QUEUE_FULL, -ENOMEM, nothing was queued; + 0, the task(s) were queued. + +If you want to pass num > 1, then either +A) you're the only caller of this function and keep track + of what you've queued to the LLDD, or +B) you know what you're doing and have a strategy of + retrying. + +As opposed to queuing one task at a time (function call), +batch queuing of tasks, by having num > 1, greatly +simplifies LLDD code, sequencer code, and _hardware design_, +and has some performance advantages in certain situations +(DBMS). + +The LLDD advertises if it can take more than one command at +a time at lldd_execute_task(), by setting the +lldd_max_execute_num parameter (controlled by "collector" +module parameter in aic94xx SAS LLDD). + +You should leave this to the default 1, unless you know what +you're doing. + +This is a function of the LLDD, to which the SAS layer can +cater to. + +int lldd_queue_size + The host adapter's queue size. This is the maximum +number of commands the lldd can have pending to domain +devices on behalf of all upper layers submitting through +lldd_execute_task(). + +You really want to set this to something (much) larger than +1. + +This _really_ has absolutely nothing to do with queuing. +There is no queuing in SAS LLDDs. + +struct sas_task { + dev -- the device this task is destined to + list -- must be initialized (INIT_LIST_HEAD) + task_proto -- _one_ of enum sas_proto + scatter -- pointer to scatter gather list array + num_scatter -- number of elements in scatter + total_xfer_len -- total number of bytes expected to be transfered + data_dir -- PCI_DMA_... + task_done -- callback when the task has finished execution +}; + +When an external entity, entity other than the LLDD or the +SAS Layer, wants to work with a struct domain_device, it +_must_ call kobject_get() when getting a handle on the +device and kobject_put() when it is done with the device. + +This does two things: + A) implements proper kfree() for the device; + B) increments/decrements the kref for all players: + domain_device + all domain_device's ... (if past an expander) + port + host adapter + pci device + and up the ladder, etc. + +DISCOVERY +--------- + +The sysfs tree has the following purposes: + a) It shows you the physical layout of the SAS domain at + the current time, i.e. how the domain looks in the + physical world right now. + b) Shows some device parameters _at_discovery_time_. + +This is a link to the tree(1) program, very useful in +viewing the SAS domain: +ftp://mama.indstate.edu/linux/tree/ +I expect user space applications to actually create a +graphical interface of this. + +That is, the sysfs domain tree doesn't show or keep state if +you e.g., change the meaning of the READY LED MEANING +setting, but it does show you the current connection status +of the domain device. + +Keeping internal device state changes is responsibility of +upper layers (Command set drivers) and user space. + +When a device or devices are unplugged from the domain, this +is reflected in the sysfs tree immediately, and the device(s) +removed from the system. + +The structure domain_device describes any device in the SAS +domain. It is completely managed by the SAS layer. A task +points to a domain device, this is how the SAS LLDD knows +where to send the task(s) to. A SAS LLDD only reads the +contents of the domain_device structure, but it never creates +or destroys one. + +Expander management from User Space +----------------------------------- + +In each expander directory in sysfs, there is a file called +"smp_portal". It is a binary sysfs attribute file, which +implements an SMP portal (Note: this is *NOT* an SMP port), +to which user space applications can send SMP requests and +receive SMP responses. + +Functionality is deceptively simple: + +1. Build the SMP frame you want to send. The format and layout + is described in the SAS spec. Leave the CRC field equal 0. +open(2) +2. Open the expander's SMP portal sysfs file in RW mode. +write(2) +3. Write the frame you built in 1. +read(2) +4. Read the amount of data you expect to receive for the frame you built. + If you receive different amount of data you expected to receive, + then there was some kind of error. +close(2) +All this process is shown in detail in the function do_smp_func() +and its callers, in the file "expander_conf.c". + +The kernel functionality is implemented in the file +"sas_expander.c". + +The program "expander_conf.c" implements this. It takes one +argument, the sysfs file name of the SMP portal to the +expander, and gives expander information, including routing +tables. + +The SMP portal gives you complete control of the expander, +so please be careful. diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index d61662c..7de5fdf 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -209,7 +209,7 @@ config SCSI_LOGGING there should be no noticeable performance impact as long as you have logging turned off. -menu "SCSI Transport Attributes" +menu "SCSI Transports" depends on SCSI config SCSI_SPI_ATTRS @@ -242,6 +242,8 @@ config SCSI_SAS_ATTRS If you wish to export transport-specific information about each attached SAS device to sysfs, say Y. +source "drivers/scsi/libsas/Kconfig" + endmenu menu "SCSI low-level drivers" @@ -431,6 +433,7 @@ config SCSI_AIC7XXX_OLD module will be called aic7xxx_old. source "drivers/scsi/aic7xxx/Kconfig.aic79xx" +source "drivers/scsi/aic94xx/Kconfig" # All the I2O code and drivers do not seem to be 64bit safe. config SCSI_DPT_I2O diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index b2de9bf..83da70d 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_SCSI_SPI_ATTRS) += scsi_transport_spi.o obj-$(CONFIG_SCSI_FC_ATTRS) += scsi_transport_fc.o obj-$(CONFIG_SCSI_ISCSI_ATTRS) += scsi_transport_iscsi.o obj-$(CONFIG_SCSI_SAS_ATTRS) += scsi_transport_sas.o +obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o @@ -68,6 +69,7 @@ obj-$(CONFIG_SCSI_AIC7XXX) += aic7xxx/ obj-$(CONFIG_SCSI_AIC79XX) += aic7xxx/ obj-$(CONFIG_SCSI_AACRAID) += aacraid/ obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o +obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/ obj-$(CONFIG_SCSI_IPS) += ips.o obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o diff --git a/drivers/scsi/aic94xx/Kconfig b/drivers/scsi/aic94xx/Kconfig new file mode 100644 index 0000000..0ed391d --- /dev/null +++ b/drivers/scsi/aic94xx/Kconfig @@ -0,0 +1,41 @@ +# +# Kernel configuration file for aic94xx SAS/SATA driver. +# +# Copyright (c) 2005 Adaptec, Inc. All rights reserved. +# Copyright (c) 2005 Luben Tuikov <luben_tuikov@adaptec.com> +# +# This file is licensed under GPLv2. +# +# This file is part of the aic94xx driver. +# +# The aic94xx driver is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2 of the +# License. +# +# The aic94xx driver is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Aic94xx Driver; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# + +config SCSI_AIC94XX + tristate "Adaptec AIC94xx SAS/SATA support" + depends on PCI + select SCSI_SAS_LIBSAS + help + This driver supports Adaptec's SAS/SATA 3Gb/s 64 bit PCI-X + AIC94xx chip based host adapters. + +config AIC94XX_DEBUG + bool "Compile in debug mode" + default y + depends on SCSI_AIC94XX + help + Compiles the aic94xx driver in debug mode. In debug mode, + the driver prints some messages to the console. diff --git a/drivers/scsi/aic94xx/Makefile b/drivers/scsi/aic94xx/Makefile new file mode 100644 index 0000000..e6b7012 --- /dev/null +++ b/drivers/scsi/aic94xx/Makefile @@ -0,0 +1,39 @@ +# +# Makefile for Adaptec aic94xx SAS/SATA driver. +# +# Copyright (C) 2005 Adaptec, Inc. All rights reserved. +# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> +# +# This file is licensed under GPLv2. +# +# This file is part of the the aic94xx driver. +# +# The aic94xx driver is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2 of the +# License. +# +# The aic94xx driver is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with the aic94xx driver; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +ifeq ($(CONFIG_AIC94XX_DEBUG),y) + EXTRA_CFLAGS += -DASD_DEBUG -DASD_ENTER_EXIT +endif + +obj-$(CONFIG_SCSI_AIC94XX) += aic94xx.o +aic94xx-y += aic94xx_init.o \ + aic94xx_hwi.o \ + aic94xx_reg.o \ + aic94xx_sds.o \ + aic94xx_seq.o \ + aic94xx_dump.o \ + aic94xx_scb.o \ + aic94xx_dev.o \ + aic94xx_tmf.o \ + aic94xx_task.o diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h new file mode 100644 index 0000000..cb7caf1 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx.h @@ -0,0 +1,114 @@ +/* + * Aic94xx SAS/SATA driver header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * $Id: //depot/aic94xx/aic94xx.h#31 $ + */ + +#ifndef _AIC94XX_H_ +#define _AIC94XX_H_ + +#include <linux/slab.h> +#include <linux/ctype.h> +#include <scsi/libsas.h> + +#define ASD_DRIVER_NAME "aic94xx" +#define ASD_DRIVER_DESCRIPTION "Adaptec aic94xx SAS/SATA driver" + +#define asd_printk(fmt, ...) printk(KERN_NOTICE ASD_DRIVER_NAME ": " fmt, ## __VA_ARGS__) + +#ifdef ASD_ENTER_EXIT +#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \ + __FUNCTION__) +#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \ + __FUNCTION__) +#else +#define ENTER +#define EXIT +#endif + +#ifdef ASD_DEBUG +#define ASD_DPRINTK asd_printk +#else +#define ASD_DPRINTK(fmt, ...) +#endif + +/* 2*ITNL timeout + 1 second */ +#define AIC94XX_SCB_TIMEOUT (5*HZ) + +extern kmem_cache_t *asd_dma_token_cache; +extern kmem_cache_t *asd_ascb_cache; +extern char sas_addr_str[2*SAS_ADDR_SIZE + 1]; + +static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr) +{ + int i; + for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2) + snprintf(p, 3, "%02X", sas_addr[i]); + *p = '\0'; +} + +static inline void asd_destringify_sas_addr(u8 *sas_addr, const char *p) +{ + int i; + for (i = 0; i < SAS_ADDR_SIZE; i++) { + u8 h, l; + if (!*p) + break; + h = isdigit(*p) ? *p-'0' : *p-'A'+10; + p++; + l = isdigit(*p) ? *p-'0' : *p-'A'+10; + p++; + sas_addr[i] = (h<<4) | l; + } +} + +struct asd_ha_struct; +struct asd_ascb; + +int asd_read_ocm(struct asd_ha_struct *asd_ha); +int asd_read_flash(struct asd_ha_struct *asd_ha); + +int asd_dev_found(struct domain_device *dev); +void asd_dev_gone(struct domain_device *dev); + +void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id); + +int asd_execute_task(struct sas_task *, int num, unsigned long gfp_flags); + +/* ---------- TMFs ---------- */ +int asd_abort_task(struct sas_task *); +int asd_abort_task_set(struct domain_device *, u8 *lun); +int asd_clear_aca(struct domain_device *, u8 *lun); +int asd_clear_task_set(struct domain_device *, u8 *lun); +int asd_lu_reset(struct domain_device *, u8 *lun); +int asd_query_task(struct sas_task *); + +/* ---------- Adapter and Port management ---------- */ +int asd_clear_nexus_port(struct asd_sas_port *port); +int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha); + +/* ---------- Phy Management ---------- */ +int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func); + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c new file mode 100644 index 0000000..6f8901b --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_dev.c @@ -0,0 +1,353 @@ +/* + * Aic94xx SAS/SATA DDB management + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * $Id: //depot/aic94xx/aic94xx_dev.c#21 $ + */ + +#include "aic94xx.h" +#include "aic94xx_hwi.h" +#include "aic94xx_reg.h" +#include "aic94xx_sas.h" + +#define FIND_FREE_DDB(_ha) find_first_zero_bit((_ha)->hw_prof.ddb_bitmap, \ + (_ha)->hw_prof.max_ddbs) +#define SET_DDB(_ddb, _ha) set_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) +#define CLEAR_DDB(_ddb, _ha) clear_bit(_ddb, (_ha)->hw_prof.ddb_bitmap) + +static inline int asd_get_ddb(struct asd_ha_struct *asd_ha) +{ + unsigned long flags; + int ddb, i; + + spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); + ddb = FIND_FREE_DDB(asd_ha); + if (ddb >= asd_ha->hw_prof.max_ddbs) { + ddb = -ENOMEM; + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); + goto out; + } + SET_DDB(ddb, asd_ha); + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); + + for (i = 0; i < sizeof(struct asd_ddb_ssp_smp_target_port); i+= 4) + asd_ddbsite_write_dword(asd_ha, ddb, i, 0); +out: + return ddb; +} + +#define INIT_CONN_TAG offsetof(struct asd_ddb_ssp_smp_target_port, init_conn_tag) +#define DEST_SAS_ADDR offsetof(struct asd_ddb_ssp_smp_target_port, dest_sas_addr) +#define SEND_QUEUE_HEAD offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_head) +#define DDB_TYPE offsetof(struct asd_ddb_ssp_smp_target_port, ddb_type) +#define CONN_MASK offsetof(struct asd_ddb_ssp_smp_target_port, conn_mask) +#define DDB_TARG_FLAGS offsetof(struct asd_ddb_ssp_smp_target_port, flags) +#define DDB_TARG_FLAGS2 offsetof(struct asd_ddb_stp_sata_target_port, flags2) +#define EXEC_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, exec_queue_tail) +#define SEND_QUEUE_TAIL offsetof(struct asd_ddb_ssp_smp_target_port, send_queue_tail) +#define SISTER_DDB offsetof(struct asd_ddb_ssp_smp_target_port, sister_ddb) +#define MAX_CCONN offsetof(struct asd_ddb_ssp_smp_target_port, max_concurrent_conn) +#define NUM_CTX offsetof(struct asd_ddb_ssp_smp_target_port, num_contexts) +#define ATA_CMD_SCBPTR offsetof(struct asd_ddb_stp_sata_target_port, ata_cmd_scbptr) +#define SATA_TAG_ALLOC_MASK offsetof(struct asd_ddb_stp_sata_target_port, sata_tag_alloc_mask) +#define NUM_SATA_TAGS offsetof(struct asd_ddb_stp_sata_target_port, num_sata_tags) +#define SATA_STATUS offsetof(struct asd_ddb_stp_sata_target_port, sata_status) +#define NCQ_DATA_SCB_PTR offsetof(struct asd_ddb_stp_sata_target_port, ncq_data_scb_ptr) +#define ITNL_TIMEOUT offsetof(struct asd_ddb_ssp_smp_target_port, itnl_timeout) + +static inline void asd_free_ddb(struct asd_ha_struct *asd_ha, int ddb) +{ + unsigned long flags; + + if (!ddb || ddb >= 0xFFFF) + return; + asd_ddbsite_write_byte(asd_ha, ddb, DDB_TYPE, DDB_TYPE_UNUSED); + spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); + CLEAR_DDB(ddb, asd_ha); + spin_unlock_irqrestore(&asd_ha->hw_prof.ddb_lock, flags); +} + +static inline void asd_set_ddb_type(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb = (int) (unsigned long) dev->lldd_dev; + + if (dev->dev_type == SATA_PM_PORT) + asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); + else if (dev->tproto) + asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); + else + asd_ddbsite_write_byte(asd_ha,ddb,DDB_TYPE,DDB_TYPE_INITIATOR); +} + +static int asd_init_sata_tag_ddb(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb, i; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + for (i = 0; i < sizeof(struct asd_ddb_sata_tag); i += 2) + asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF); + + asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev, + SISTER_DDB, ddb); + return 0; +} + +static inline int asd_init_sata(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb = (int) (unsigned long) dev->lldd_dev; + u32 qdepth = 0; + int res = 0; + + asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); + if ((dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) && + dev->sata_dev.identify_device && + dev->sata_dev.identify_device[10] != 0) { + u16 w75 = le16_to_cpu(dev->sata_dev.identify_device[75]); + u16 w76 = le16_to_cpu(dev->sata_dev.identify_device[76]); + + if (w76 & 0x100) /* NCQ? */ + qdepth = (w75 & 0x1F) + 1; + asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, + (1<<qdepth)-1); + asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth); + } + if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || + dev->dev_type == SATA_PM_PORT) { + struct dev_to_host_fis *fis = (struct dev_to_host_fis *) + dev->frame_rcvd; + asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); + } + asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF); + if (qdepth > 0) + res = asd_init_sata_tag_ddb(dev); + return res; +} + +static int asd_init_target_ddb(struct domain_device *dev) +{ + int ddb, i; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + u8 flags = 0; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + dev->lldd_dev = (void *) (unsigned long) ddb; + + asd_ddbsite_write_byte(asd_ha, ddb, 0, DDB_TP_CONN_TYPE); + asd_ddbsite_write_byte(asd_ha, ddb, 1, 0); + asd_ddbsite_write_word(asd_ha, ddb, INIT_CONN_TAG, 0xFFFF); + for (i = 0; i < SAS_ADDR_SIZE; i++) + asd_ddbsite_write_byte(asd_ha, ddb, DEST_SAS_ADDR+i, + dev->sas_addr[i]); + asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_HEAD, 0xFFFF); + asd_set_ddb_type(dev); + asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); + if (dev->port->oob_mode != SATA_OOB_MODE) { + flags |= OPEN_REQUIRED; + if ((dev->dev_type == SATA_DEV) || + (dev->tproto & SAS_PROTO_STP)) { + struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; + if (rps_resp->frame_type == SMP_RESPONSE && + rps_resp->function == SMP_REPORT_PHY_SATA && + rps_resp->result == SMP_RESP_FUNC_ACC) { + if (rps_resp->rps.affil_valid) + flags |= STP_AFFIL_POL; + if (rps_resp->rps.affil_supp) + flags |= SUPPORTS_AFFIL; + } + } else { + flags |= CONCURRENT_CONN_SUPP; + if (!dev->parent && + (dev->dev_type == EDGE_DEV || + dev->dev_type == FANOUT_DEV)) + asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, + 4); + else + asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, + dev->pathways); + asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); + } + } + if (dev->dev_type == SATA_PM) + flags |= SATA_MULTIPORT; + asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); + + flags = 0; + if (dev->tproto & SAS_PROTO_STP) + flags |= STP_CL_POL_NO_TX; + asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS2, flags); + + asd_ddbsite_write_word(asd_ha, ddb, EXEC_QUEUE_TAIL, 0xFFFF); + asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); + asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); + + if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTO_STP)) { + i = asd_init_sata(dev); + if (i < 0) { + asd_free_ddb(asd_ha, ddb); + return i; + } + } + + if (dev->dev_type == SAS_END_DEV) { + struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); + if (rdev->I_T_nexus_loss_timeout > 0) + asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, + min(rdev->I_T_nexus_loss_timeout, + (u16)ITNL_TIMEOUT_CONST)); + else + asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, + (u16)ITNL_TIMEOUT_CONST); + } + return 0; +} + +static int asd_init_sata_pm_table_ddb(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + int ddb, i; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + for (i = 0; i < 32; i += 2) + asd_ddbsite_write_word(asd_ha, ddb, i, 0xFFFF); + + asd_ddbsite_write_word(asd_ha, (int) (unsigned long) dev->lldd_dev, + SISTER_DDB, ddb); + + return 0; +} + +#define PM_PORT_FLAGS offsetof(struct asd_ddb_sata_pm_port, pm_port_flags) +#define PARENT_DDB offsetof(struct asd_ddb_sata_pm_port, parent_ddb) + +/** + * asd_init_sata_pm_port_ddb -- SATA Port Multiplier Port + * dev: pointer to domain device + * + * For SATA Port Multiplier Ports we need to allocate one SATA Port + * Multiplier Port DDB and depending on whether the target on it + * supports SATA II NCQ, one SATA Tag DDB. + */ +static int asd_init_sata_pm_port_ddb(struct domain_device *dev) +{ + int ddb, i, parent_ddb, pmtable_ddb; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + u8 flags; + + ddb = asd_get_ddb(asd_ha); + if (ddb < 0) + return ddb; + + asd_set_ddb_type(dev); + flags = (dev->sata_dev.port_no << 4) | PM_PORT_SET; + asd_ddbsite_write_byte(asd_ha, ddb, PM_PORT_FLAGS, flags); + asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); + asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); + asd_init_sata(dev); + + parent_ddb = (int) (unsigned long) dev->parent->lldd_dev; + asd_ddbsite_write_word(asd_ha, ddb, PARENT_DDB, parent_ddb); + pmtable_ddb = asd_ddbsite_read_word(asd_ha, parent_ddb, SISTER_DDB); + asd_ddbsite_write_word(asd_ha, pmtable_ddb, dev->sata_dev.port_no,ddb); + + if (asd_ddbsite_read_byte(asd_ha, ddb, NUM_SATA_TAGS) > 0) { + i = asd_init_sata_tag_ddb(dev); + if (i < 0) { + asd_free_ddb(asd_ha, ddb); + return i; + } + } + return 0; +} + +static int asd_init_initiator_ddb(struct domain_device *dev) +{ + return -ENODEV; +} + +/** + * asd_init_sata_pm_ddb -- SATA Port Multiplier + * dev: pointer to domain device + * + * For STP and direct-attached SATA Port Multipliers we need + * one target port DDB entry and one SATA PM table DDB entry. + */ +static int asd_init_sata_pm_ddb(struct domain_device *dev) +{ + int res = 0; + + res = asd_init_target_ddb(dev); + if (res) + goto out; + res = asd_init_sata_pm_table_ddb(dev); + if (res) + asd_free_ddb(dev->port->ha->lldd_ha, + (int) (unsigned long) dev->lldd_dev); +out: + return res; +} + +int asd_dev_found(struct domain_device *dev) +{ + int res = 0; + + switch (dev->dev_type) { + case SATA_PM: + res = asd_init_sata_pm_ddb(dev); + break; + case SATA_PM_PORT: + res = asd_init_sata_pm_port_ddb(dev); + break; + default: + if (dev->tproto) + res = asd_init_target_ddb(dev); + else + res = asd_init_initiator_ddb(dev); + } + return res; +} + +void asd_dev_gone(struct domain_device *dev) +{ + int ddb, sister_ddb; + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + + ddb = (int) (unsigned long) dev->lldd_dev; + sister_ddb = asd_ddbsite_read_word(asd_ha, ddb, SISTER_DDB); + + if (sister_ddb != 0xFFFF) + asd_free_ddb(asd_ha, sister_ddb); + asd_free_ddb(asd_ha, ddb); + dev->lldd_dev = NULL; +} diff --git a/drivers/scsi/aic94xx/aic94xx_dump.c b/drivers/scsi/aic94xx/aic94xx_dump.c new file mode 100644 index 0000000..e6ade59 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_dump.c @@ -0,0 +1,959 @@ +/* + * Aic94xx SAS/SATA driver dump interface. + * + * Copyright (C) 2004 Adaptec, Inc. All rights reserved. + * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com> + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * 2005/07/14/LT Complete overhaul of this file. Update pages, register + * locations, names, etc. Make use of macros. Print more information. + * Print all cseq and lseq mip and mdp. + * + */ + +#include "linux/pci.h" +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_reg_def.h" +#include "aic94xx_sas.h" + +#include "aic94xx_dump.h" + +#ifdef ASD_DEBUG + +#define MD(x) (1 << (x)) +#define MODE_COMMON (1 << 31) +#define MODE_0_7 (0xFF) + +static const struct lseq_cio_regs { + char *name; + u32 offs; + u8 width; + u32 mode; +} LSEQmCIOREGS[] = { + {"LmMnSCBPTR", 0x20, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) }, + {"LmMnDDBPTR", 0x22, 16, MD(0)|MD(1)|MD(2)|MD(3)|MD(4) }, + {"LmREQMBX", 0x30, 32, MODE_COMMON }, + {"LmRSPMBX", 0x34, 32, MODE_COMMON }, + {"LmMnINT", 0x38, 32, MODE_0_7 }, + {"LmMnINTEN", 0x3C, 32, MODE_0_7 }, + {"LmXMTPRIMD", 0x40, 32, MODE_COMMON }, + {"LmXMTPRIMCS", 0x44, 8, MODE_COMMON }, + {"LmCONSTAT", 0x45, 8, MODE_COMMON }, + {"LmMnDMAERRS", 0x46, 8, MD(0)|MD(1) }, + {"LmMnSGDMAERRS", 0x47, 8, MD(0)|MD(1) }, + {"LmMnEXPHDRP", 0x48, 8, MD(0) }, + {"LmMnSASAALIGN", 0x48, 8, MD(1) }, + {"LmMnMSKHDRP", 0x49, 8, MD(0) }, + {"LmMnSTPALIGN", 0x49, 8, MD(1) }, + {"LmMnRCVHDRP", 0x4A, 8, MD(0) }, + {"LmMnXMTHDRP", 0x4A, 8, MD(1) }, + {"LmALIGNMODE", 0x4B, 8, MD(1) }, + {"LmMnEXPRCVCNT", 0x4C, 32, MD(0) }, + {"LmMnXMTCNT", 0x4C, 32, MD(1) }, + {"LmMnCURRTAG", 0x54, 16, MD(0) }, + {"LmMnPREVTAG", 0x56, 16, MD(0) }, + {"LmMnACKOFS", 0x58, 8, MD(1) }, + {"LmMnXFRLVL", 0x59, 8, MD(0)|MD(1) }, + {"LmMnSGDMACTL", 0x5A, 8, MD(0)|MD(1) }, + {"LmMnSGDMASTAT", 0x5B, 8, MD(0)|MD(1) }, + {"LmMnDDMACTL", 0x5C, 8, MD(0)|MD(1) }, + {"LmMnDDMASTAT", 0x5D, 8, MD(0)|MD(1) }, + {"LmMnDDMAMODE", 0x5E, 16, MD(0)|MD(1) }, + {"LmMnPIPECTL", 0x61, 8, MD(0)|MD(1) }, + {"LmMnACTSCB", 0x62, 16, MD(0)|MD(1) }, + {"LmMnSGBHADR", 0x64, 8, MD(0)|MD(1) }, + {"LmMnSGBADR", 0x65, 8, MD(0)|MD(1) }, + {"LmMnSGDCNT", 0x66, 8, MD(0)|MD(1) }, + {"LmMnSGDMADR", 0x68, 32, MD(0)|MD(1) }, + {"LmMnSGDMADR", 0x6C, 32, MD(0)|MD(1) }, + {"LmMnXFRCNT", 0x70, 32, MD(0)|MD(1) }, + {"LmMnXMTCRC", 0x74, 32, MD(1) }, + {"LmCURRTAG", 0x74, 16, MD(0) }, + {"LmPREVTAG", 0x76, 16, MD(0) }, + {"LmMnDPSEL", 0x7B, 8, MD(0)|MD(1) }, + {"LmDPTHSTAT", 0x7C, 8, MODE_COMMON }, + {"LmMnHOLDLVL", 0x7D, 8, MD(0) }, + {"LmMnSATAFS", 0x7E, 8, MD(1) }, + {"LmMnCMPLTSTAT", 0x7F, 8, MD(0)|MD(1) }, + {"LmPRMSTAT0", 0x80, 32, MODE_COMMON }, + {"LmPRMSTAT1", 0x84, 32, MODE_COMMON }, + {"LmGPRMINT", 0x88, 8, MODE_COMMON }, + {"LmMnCURRSCB", 0x8A, 16, MD(0) }, + {"LmPRMICODE", 0x8C, 32, MODE_COMMON }, + {"LmMnRCVCNT", 0x90, 16, MD(0) }, + {"LmMnBUFSTAT", 0x92, 16, MD(0) }, + {"LmMnXMTHDRSIZE",0x92, 8, MD(1) }, + {"LmMnXMTSIZE", 0x93, 8, MD(1) }, + {"LmMnTGTXFRCNT", 0x94, 32, MD(0) }, + {"LmMnEXPROFS", 0x98, 32, MD(0) }, + {"LmMnXMTROFS", 0x98, 32, MD(1) }, + {"LmMnRCVROFS", 0x9C, 32, MD(0) }, + {"LmCONCTL", 0xA0, 16, MODE_COMMON }, + {"LmBITLTIMER", 0xA2, 16, MODE_COMMON }, + {"LmWWNLOW", 0xA8, 32, MODE_COMMON }, + {"LmWWNHIGH", 0xAC, 32, MODE_COMMON }, + {"LmMnFRMERR", 0xB0, 32, MD(0) }, + {"LmMnFRMERREN", 0xB4, 32, MD(0) }, + {"LmAWTIMER", 0xB8, 16, MODE_COMMON }, + {"LmAWTCTL", 0xBA, 8, MODE_COMMON }, + {"LmMnHDRCMPS", 0xC0, 32, MD(0) }, + {"LmMnXMTSTAT", 0xC4, 8, MD(1) }, + {"LmHWTSTATEN", 0xC5, 8, MODE_COMMON }, + {"LmMnRRDYRC", 0xC6, 8, MD(0) }, + {"LmMnRRDYTC", 0xC6, 8, MD(1) }, + {"LmHWTSTAT", 0xC7, 8, MODE_COMMON }, + {"LmMnDATABUFADR",0xC8, 16, MD(0)|MD(1) }, + {"LmDWSSTATUS", 0xCB, 8, MODE_COMMON }, + {"LmMnACTSTAT", 0xCE, 16, MD(0)|MD(1) }, + {"LmMnREQSCB", 0xD2, 16, MD(0)|MD(1) }, + {"LmXXXPRIM", 0xD4, 32, MODE_COMMON }, + {"LmRCVASTAT", 0xD9, 8, MODE_COMMON }, + {"LmINTDIS1", 0xDA, 8, MODE_COMMON }, + {"LmPSTORESEL", 0xDB, 8, MODE_COMMON }, + {"LmPSTORE", 0xDC, 32, MODE_COMMON }, + {"LmPRIMSTAT0EN", 0xE0, 32, MODE_COMMON }, + {"LmPRIMSTAT1EN", 0xE4, 32, MODE_COMMON }, + {"LmDONETCTL", 0xF2, 16, MODE_COMMON }, + {NULL, 0, 0, 0 } +}; +/* +static struct lseq_cio_regs LSEQmOOBREGS[] = { + {"OOB_BFLTR" ,0x100, 8, MD(5)}, + {"OOB_INIT_MIN" ,0x102,16, MD(5)}, + {"OOB_INIT_MAX" ,0x104,16, MD(5)}, + {"OOB_INIT_NEG" ,0x106,16, MD(5)}, + {"OOB_SAS_MIN" ,0x108,16, MD(5)}, + {"OOB_SAS_MAX" ,0x10A,16, MD(5)}, + {"OOB_SAS_NEG" ,0x10C,16, MD(5)}, + {"OOB_WAKE_MIN" ,0x10E,16, MD(5)}, + {"OOB_WAKE_MAX" ,0x110,16, MD(5)}, + {"OOB_WAKE_NEG" ,0x112,16, MD(5)}, + {"OOB_IDLE_MAX" ,0x114,16, MD(5)}, + {"OOB_BURST_MAX" ,0x116,16, MD(5)}, + {"OOB_XMIT_BURST" ,0x118, 8, MD(5)}, + {"OOB_SEND_PAIRS" ,0x119, 8, MD(5)}, + {"OOB_INIT_IDLE" ,0x11A, 8, MD(5)}, + {"OOB_INIT_NEGO" ,0x11C, 8, MD(5)}, + {"OOB_SAS_IDLE" ,0x11E, 8, MD(5)}, + {"OOB_SAS_NEGO" ,0x120, 8, MD(5)}, + {"OOB_WAKE_IDLE" ,0x122, 8, MD(5)}, + {"OOB_WAKE_NEGO" ,0x124, 8, MD(5)}, + {"OOB_DATA_KBITS" ,0x126, 8, MD(5)}, + {"OOB_BURST_DATA" ,0x128,32, MD(5)}, + {"OOB_ALIGN_0_DATA" ,0x12C,32, MD(5)}, + {"OOB_ALIGN_1_DATA" ,0x130,32, MD(5)}, + {"OOB_SYNC_DATA" ,0x134,32, MD(5)}, + {"OOB_D10_2_DATA" ,0x138,32, MD(5)}, + {"OOB_PHY_RST_CNT" ,0x13C,32, MD(5)}, + {"OOB_SIG_GEN" ,0x140, 8, MD(5)}, + {"OOB_XMIT" ,0x141, 8, MD(5)}, + {"FUNCTION_MAKS" ,0x142, 8, MD(5)}, + {"OOB_MODE" ,0x143, 8, MD(5)}, + {"CURRENT_STATUS" ,0x144, 8, MD(5)}, + {"SPEED_MASK" ,0x145, 8, MD(5)}, + {"PRIM_COUNT" ,0x146, 8, MD(5)}, + {"OOB_SIGNALS" ,0x148, 8, MD(5)}, + {"OOB_DATA_DET" ,0x149, 8, MD(5)}, + {"OOB_TIME_OUT" ,0x14C, 8, MD(5)}, + {"OOB_TIMER_ENABLE" ,0x14D, 8, MD(5)}, + {"OOB_STATUS" ,0x14E, 8, MD(5)}, + {"HOT_PLUG_DELAY" ,0x150, 8, MD(5)}, + {"RCD_DELAY" ,0x151, 8, MD(5)}, + {"COMSAS_TIMER" ,0x152, 8, MD(5)}, + {"SNTT_DELAY" ,0x153, 8, MD(5)}, + {"SPD_CHNG_DELAY" ,0x154, 8, MD(5)}, + {"SNLT_DELAY" ,0x155, 8, MD(5)}, + {"SNWT_DELAY" ,0x156, 8, MD(5)}, + {"ALIGN_DELAY" ,0x157, 8, MD(5)}, + {"INT_ENABLE_0" ,0x158, 8, MD(5)}, + {"INT_ENABLE_1" ,0x159, 8, MD(5)}, + {"INT_ENABLE_2" ,0x15A, 8, MD(5)}, + {"INT_ENABLE_3" ,0x15B, 8, MD(5)}, + {"OOB_TEST_REG" ,0x15C, 8, MD(5)}, + {"PHY_CONTROL_0" ,0x160, 8, MD(5)}, + {"PHY_CONTROL_1" ,0x161, 8, MD(5)}, + {"PHY_CONTROL_2" ,0x162, 8, MD(5)}, + {"PHY_CONTROL_3" ,0x163, 8, MD(5)}, + {"PHY_OOB_CAL_TX" ,0x164, 8, MD(5)}, + {"PHY_OOB_CAL_RX" ,0x165, 8, MD(5)}, + {"OOB_PHY_CAL_TX" ,0x166, 8, MD(5)}, + {"OOB_PHY_CAL_RX" ,0x167, 8, MD(5)}, + {"PHY_CONTROL_4" ,0x168, 8, MD(5)}, + {"PHY_TEST" ,0x169, 8, MD(5)}, + {"PHY_PWR_CTL" ,0x16A, 8, MD(5)}, + {"PHY_PWR_DELAY" ,0x16B, 8, MD(5)}, + {"OOB_SM_CON" ,0x16C, 8, MD(5)}, + {"ADDR_TRAP_1" ,0x16D, 8, MD(5)}, + {"ADDR_NEXT_1" ,0x16E, 8, MD(5)}, + {"NEXT_ST_1" ,0x16F, 8, MD(5)}, + {"OOB_SM_STATE" ,0x170, 8, MD(5)}, + {"ADDR_TRAP_2" ,0x171, 8, MD(5)}, + {"ADDR_NEXT_2" ,0x172, 8, MD(5)}, + {"NEXT_ST_2" ,0x173, 8, MD(5)}, + {NULL, 0, 0, 0 } +}; +*/ +#define STR_8BIT " %30s[0x%04x]:0x%02x\n" +#define STR_16BIT " %30s[0x%04x]:0x%04x\n" +#define STR_32BIT " %30s[0x%04x]:0x%08x\n" +#define STR_64BIT " %30s[0x%04x]:0x%llx\n" + +#define PRINT_REG_8bit(_ha, _n, _r) asd_printk(STR_8BIT, #_n, _n, \ + asd_read_reg_byte(_ha, _r)) +#define PRINT_REG_16bit(_ha, _n, _r) asd_printk(STR_16BIT, #_n, _n, \ + asd_read_reg_word(_ha, _r)) +#define PRINT_REG_32bit(_ha, _n, _r) asd_printk(STR_32BIT, #_n, _n, \ + asd_read_reg_dword(_ha, _r)) + +#define PRINT_CREG_8bit(_ha, _n) asd_printk(STR_8BIT, #_n, _n, \ + asd_read_reg_byte(_ha, C##_n)) +#define PRINT_CREG_16bit(_ha, _n) asd_printk(STR_16BIT, #_n, _n, \ + asd_read_reg_word(_ha, C##_n)) +#define PRINT_CREG_32bit(_ha, _n) asd_printk(STR_32BIT, #_n, _n, \ + asd_read_reg_dword(_ha, C##_n)) + +#define MSTR_8BIT " Mode:%02d %30s[0x%04x]:0x%02x\n" +#define MSTR_16BIT " Mode:%02d %30s[0x%04x]:0x%04x\n" +#define MSTR_32BIT " Mode:%02d %30s[0x%04x]:0x%08x\n" + +#define PRINT_MREG_8bit(_ha, _m, _n, _r) asd_printk(MSTR_8BIT, _m, #_n, _n, \ + asd_read_reg_byte(_ha, _r)) +#define PRINT_MREG_16bit(_ha, _m, _n, _r) asd_printk(MSTR_16BIT, _m, #_n, _n, \ + asd_read_reg_word(_ha, _r)) +#define PRINT_MREG_32bit(_ha, _m, _n, _r) asd_printk(MSTR_32BIT, _m, #_n, _n, \ + asd_read_reg_dword(_ha, _r)) + +/* can also be used for MD when the register is mode aware already */ +#define PRINT_MIS_byte(_ha, _n) asd_printk(STR_8BIT, #_n,CSEQ_##_n-CMAPPEDSCR,\ + asd_read_reg_byte(_ha, CSEQ_##_n)) +#define PRINT_MIS_word(_ha, _n) asd_printk(STR_16BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\ + asd_read_reg_word(_ha, CSEQ_##_n)) +#define PRINT_MIS_dword(_ha, _n) \ + asd_printk(STR_32BIT,#_n,CSEQ_##_n-CMAPPEDSCR,\ + asd_read_reg_dword(_ha, CSEQ_##_n)) +#define PRINT_MIS_qword(_ha, _n) \ + asd_printk(STR_64BIT, #_n,CSEQ_##_n-CMAPPEDSCR, \ + (unsigned long long)(((u64)asd_read_reg_dword(_ha, CSEQ_##_n)) \ + | (((u64)asd_read_reg_dword(_ha, (CSEQ_##_n)+4))<<32))) + +#define CMDP_REG(_n, _m) (_m*(CSEQ_PAGE_SIZE*2)+CSEQ_##_n) +#define PRINT_CMDP_word(_ha, _n) \ +asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \ + #_n, \ + asd_read_reg_word(_ha, CMDP_REG(_n, 0)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 1)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 2)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 3)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 4)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 5)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 6)), \ + asd_read_reg_word(_ha, CMDP_REG(_n, 7))) + +#define PRINT_CMDP_byte(_ha, _n) \ +asd_printk("%20s 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", \ + #_n, \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 0)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 1)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 2)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 3)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 4)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 5)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 6)), \ + asd_read_reg_byte(_ha, CMDP_REG(_n, 7))) + +static void asd_dump_cseq_state(struct asd_ha_struct *asd_ha) +{ + int mode; + + asd_printk("CSEQ STATE\n"); + + asd_printk("ARP2 REGISTERS\n"); + + PRINT_CREG_32bit(asd_ha, ARP2CTL); + PRINT_CREG_32bit(asd_ha, ARP2INT); + PRINT_CREG_32bit(asd_ha, ARP2INTEN); + PRINT_CREG_8bit(asd_ha, MODEPTR); + PRINT_CREG_8bit(asd_ha, ALTMODE); + PRINT_CREG_8bit(asd_ha, FLAG); + PRINT_CREG_8bit(asd_ha, ARP2INTCTL); + PRINT_CREG_16bit(asd_ha, STACK); + PRINT_CREG_16bit(asd_ha, PRGMCNT); + PRINT_CREG_16bit(asd_ha, ACCUM); + PRINT_CREG_16bit(asd_ha, SINDEX); + PRINT_CREG_16bit(asd_ha, DINDEX); + PRINT_CREG_8bit(asd_ha, SINDIR); + PRINT_CREG_8bit(asd_ha, DINDIR); + PRINT_CREG_8bit(asd_ha, JUMLDIR); + PRINT_CREG_8bit(asd_ha, ARP2HALTCODE); + PRINT_CREG_16bit(asd_ha, CURRADDR); + PRINT_CREG_16bit(asd_ha, LASTADDR); + PRINT_CREG_16bit(asd_ha, NXTLADDR); + + asd_printk("IOP REGISTERS\n"); + + PRINT_REG_32bit(asd_ha, BISTCTL1, CBISTCTL); + PRINT_CREG_32bit(asd_ha, MAPPEDSCR); + + asd_printk("CIO REGISTERS\n"); + + for (mode = 0; mode < 9; mode++) + PRINT_MREG_16bit(asd_ha, mode, MnSCBPTR, CMnSCBPTR(mode)); + PRINT_MREG_16bit(asd_ha, 15, MnSCBPTR, CMnSCBPTR(15)); + + for (mode = 0; mode < 9; mode++) + PRINT_MREG_16bit(asd_ha, mode, MnDDBPTR, CMnDDBPTR(mode)); + PRINT_MREG_16bit(asd_ha, 15, MnDDBPTR, CMnDDBPTR(15)); + + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnREQMBX, CMnREQMBX(mode)); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnRSPMBX, CMnRSPMBX(mode)); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnINT, CMnINT(mode)); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_32bit(asd_ha, mode, MnINTEN, CMnINTEN(mode)); + + PRINT_CREG_8bit(asd_ha, SCRATCHPAGE); + for (mode = 0; mode < 8; mode++) + PRINT_MREG_8bit(asd_ha, mode, MnSCRATCHPAGE, + CMnSCRATCHPAGE(mode)); + + PRINT_REG_32bit(asd_ha, CLINKCON, CLINKCON); + PRINT_REG_8bit(asd_ha, CCONMSK, CCONMSK); + PRINT_REG_8bit(asd_ha, CCONEXIST, CCONEXIST); + PRINT_REG_16bit(asd_ha, CCONMODE, CCONMODE); + PRINT_REG_32bit(asd_ha, CTIMERCALC, CTIMERCALC); + PRINT_REG_8bit(asd_ha, CINTDIS, CINTDIS); + + asd_printk("SCRATCH MEMORY\n"); + + asd_printk("MIP 4 >>>>>\n"); + PRINT_MIS_word(asd_ha, Q_EXE_HEAD); + PRINT_MIS_word(asd_ha, Q_EXE_TAIL); + PRINT_MIS_word(asd_ha, Q_DONE_HEAD); + PRINT_MIS_word(asd_ha, Q_DONE_TAIL); + PRINT_MIS_word(asd_ha, Q_SEND_HEAD); + PRINT_MIS_word(asd_ha, Q_SEND_TAIL); + PRINT_MIS_word(asd_ha, Q_DMA2CHIM_HEAD); + PRINT_MIS_word(asd_ha, Q_DMA2CHIM_TAIL); + PRINT_MIS_word(asd_ha, Q_COPY_HEAD); + PRINT_MIS_word(asd_ha, Q_COPY_TAIL); + PRINT_MIS_word(asd_ha, REG0); + PRINT_MIS_word(asd_ha, REG1); + PRINT_MIS_dword(asd_ha, REG2); + PRINT_MIS_byte(asd_ha, LINK_CTL_Q_MAP); + PRINT_MIS_byte(asd_ha, MAX_CSEQ_MODE); + PRINT_MIS_byte(asd_ha, FREE_LIST_HACK_COUNT); + + asd_printk("MIP 5 >>>>\n"); + PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_QUEUE); + PRINT_MIS_qword(asd_ha, EST_NEXUS_REQ_COUNT); + PRINT_MIS_word(asd_ha, Q_EST_NEXUS_HEAD); + PRINT_MIS_word(asd_ha, Q_EST_NEXUS_TAIL); + PRINT_MIS_word(asd_ha, NEED_EST_NEXUS_SCB); + PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_HEAD); + PRINT_MIS_byte(asd_ha, EST_NEXUS_REQ_TAIL); + PRINT_MIS_byte(asd_ha, EST_NEXUS_SCB_OFFSET); + + asd_printk("MIP 6 >>>>\n"); + PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR0); + PRINT_MIS_word(asd_ha, INT_ROUT_RET_ADDR1); + PRINT_MIS_word(asd_ha, INT_ROUT_SCBPTR); + PRINT_MIS_byte(asd_ha, INT_ROUT_MODE); + PRINT_MIS_byte(asd_ha, ISR_SCRATCH_FLAGS); + PRINT_MIS_word(asd_ha, ISR_SAVE_SINDEX); + PRINT_MIS_word(asd_ha, ISR_SAVE_DINDEX); + PRINT_MIS_word(asd_ha, Q_MONIRTT_HEAD); + PRINT_MIS_word(asd_ha, Q_MONIRTT_TAIL); + PRINT_MIS_byte(asd_ha, FREE_SCB_MASK); + PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_HEAD); + PRINT_MIS_word(asd_ha, BUILTIN_FREE_SCB_TAIL); + PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_HEAD); + PRINT_MIS_word(asd_ha, EXTENDED_FREE_SCB_TAIL); + + asd_printk("MIP 7 >>>>\n"); + PRINT_MIS_qword(asd_ha, EMPTY_REQ_QUEUE); + PRINT_MIS_qword(asd_ha, EMPTY_REQ_COUNT); + PRINT_MIS_word(asd_ha, Q_EMPTY_HEAD); + PRINT_MIS_word(asd_ha, Q_EMPTY_TAIL); + PRINT_MIS_word(asd_ha, NEED_EMPTY_SCB); + PRINT_MIS_byte(asd_ha, EMPTY_REQ_HEAD); + PRINT_MIS_byte(asd_ha, EMPTY_REQ_TAIL); + PRINT_MIS_byte(asd_ha, EMPTY_SCB_OFFSET); + PRINT_MIS_word(asd_ha, PRIMITIVE_DATA); + PRINT_MIS_dword(asd_ha, TIMEOUT_CONST); + + asd_printk("MDP 0 >>>>\n"); + asd_printk("%-20s %6s %6s %6s %6s %6s %6s %6s %6s\n", + "Mode: ", "0", "1", "2", "3", "4", "5", "6", "7"); + PRINT_CMDP_word(asd_ha, LRM_SAVE_SINDEX); + PRINT_CMDP_word(asd_ha, LRM_SAVE_SCBPTR); + PRINT_CMDP_word(asd_ha, Q_LINK_HEAD); + PRINT_CMDP_word(asd_ha, Q_LINK_TAIL); + PRINT_CMDP_byte(asd_ha, LRM_SAVE_SCRPAGE); + + asd_printk("MDP 0 Mode 8 >>>>\n"); + PRINT_MIS_word(asd_ha, RET_ADDR); + PRINT_MIS_word(asd_ha, RET_SCBPTR); + PRINT_MIS_word(asd_ha, SAVE_SCBPTR); + PRINT_MIS_word(asd_ha, EMPTY_TRANS_CTX); + PRINT_MIS_word(asd_ha, RESP_LEN); + PRINT_MIS_word(asd_ha, TMF_SCBPTR); + PRINT_MIS_word(asd_ha, GLOBAL_PREV_SCB); + PRINT_MIS_word(asd_ha, GLOBAL_HEAD); + PRINT_MIS_word(asd_ha, CLEAR_LU_HEAD); + PRINT_MIS_byte(asd_ha, TMF_OPCODE); + PRINT_MIS_byte(asd_ha, SCRATCH_FLAGS); + PRINT_MIS_word(asd_ha, HSB_SITE); + PRINT_MIS_word(asd_ha, FIRST_INV_SCB_SITE); + PRINT_MIS_word(asd_ha, FIRST_INV_DDB_SITE); + + asd_printk("MDP 1 Mode 8 >>>>\n"); + PRINT_MIS_qword(asd_ha, LUN_TO_CLEAR); + PRINT_MIS_qword(asd_ha, LUN_TO_CHECK); + + asd_printk("MDP 2 Mode 8 >>>>\n"); + PRINT_MIS_qword(asd_ha, HQ_NEW_POINTER); + PRINT_MIS_qword(asd_ha, HQ_DONE_BASE); + PRINT_MIS_dword(asd_ha, HQ_DONE_POINTER); + PRINT_MIS_byte(asd_ha, HQ_DONE_PASS); +} + +#define PRINT_LREG_8bit(_h, _lseq, _n) \ + asd_printk(STR_8BIT, #_n, _n, asd_read_reg_byte(_h, Lm##_n(_lseq))) +#define PRINT_LREG_16bit(_h, _lseq, _n) \ + asd_printk(STR_16BIT, #_n, _n, asd_read_reg_word(_h, Lm##_n(_lseq))) +#define PRINT_LREG_32bit(_h, _lseq, _n) \ + asd_printk(STR_32BIT, #_n, _n, asd_read_reg_dword(_h, Lm##_n(_lseq))) + +#define PRINT_LMIP_byte(_h, _lseq, _n) \ + asd_printk(STR_8BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + asd_read_reg_byte(_h, LmSEQ_##_n(_lseq))) +#define PRINT_LMIP_word(_h, _lseq, _n) \ + asd_printk(STR_16BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + asd_read_reg_word(_h, LmSEQ_##_n(_lseq))) +#define PRINT_LMIP_dword(_h, _lseq, _n) \ + asd_printk(STR_32BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + asd_read_reg_dword(_h, LmSEQ_##_n(_lseq))) +#define PRINT_LMIP_qword(_h, _lseq, _n) \ + asd_printk(STR_64BIT, #_n, LmSEQ_##_n(_lseq)-LmSCRATCH(_lseq), \ + (unsigned long long)(((unsigned long long) \ + asd_read_reg_dword(_h, LmSEQ_##_n(_lseq))) \ + | (((unsigned long long) \ + asd_read_reg_dword(_h, LmSEQ_##_n(_lseq)+4))<<32))) + +static void asd_print_lseq_cio_reg(struct asd_ha_struct *asd_ha, + u32 lseq_cio_addr, int i) +{ + switch (LSEQmCIOREGS[i].width) { + case 8: + asd_printk("%20s[0x%x]: 0x%02x\n", LSEQmCIOREGS[i].name, + LSEQmCIOREGS[i].offs, + asd_read_reg_byte(asd_ha, lseq_cio_addr + + LSEQmCIOREGS[i].offs)); + + break; + case 16: + asd_printk("%20s[0x%x]: 0x%04x\n", LSEQmCIOREGS[i].name, + LSEQmCIOREGS[i].offs, + asd_read_reg_word(asd_ha, lseq_cio_addr + + LSEQmCIOREGS[i].offs)); + + break; + case 32: + asd_printk("%20s[0x%x]: 0x%08x\n", LSEQmCIOREGS[i].name, + LSEQmCIOREGS[i].offs, + asd_read_reg_dword(asd_ha, lseq_cio_addr + + LSEQmCIOREGS[i].offs)); + break; + } +} + +static void asd_dump_lseq_state(struct asd_ha_struct *asd_ha, int lseq) +{ + u32 moffs; + int mode; + + asd_printk("LSEQ %d STATE\n", lseq); + + asd_printk("LSEQ%d: ARP2 REGISTERS\n", lseq); + PRINT_LREG_32bit(asd_ha, lseq, ARP2CTL); + PRINT_LREG_32bit(asd_ha, lseq, ARP2INT); + PRINT_LREG_32bit(asd_ha, lseq, ARP2INTEN); + PRINT_LREG_8bit(asd_ha, lseq, MODEPTR); + PRINT_LREG_8bit(asd_ha, lseq, ALTMODE); + PRINT_LREG_8bit(asd_ha, lseq, FLAG); + PRINT_LREG_8bit(asd_ha, lseq, ARP2INTCTL); + PRINT_LREG_16bit(asd_ha, lseq, STACK); + PRINT_LREG_16bit(asd_ha, lseq, PRGMCNT); + PRINT_LREG_16bit(asd_ha, lseq, ACCUM); + PRINT_LREG_16bit(asd_ha, lseq, SINDEX); + PRINT_LREG_16bit(asd_ha, lseq, DINDEX); + PRINT_LREG_8bit(asd_ha, lseq, SINDIR); + PRINT_LREG_8bit(asd_ha, lseq, DINDIR); + PRINT_LREG_8bit(asd_ha, lseq, JUMLDIR); + PRINT_LREG_8bit(asd_ha, lseq, ARP2HALTCODE); + PRINT_LREG_16bit(asd_ha, lseq, CURRADDR); + PRINT_LREG_16bit(asd_ha, lseq, LASTADDR); + PRINT_LREG_16bit(asd_ha, lseq, NXTLADDR); + + asd_printk("LSEQ%d: IOP REGISTERS\n", lseq); + + PRINT_LREG_32bit(asd_ha, lseq, MODECTL); + PRINT_LREG_32bit(asd_ha, lseq, DBGMODE); + PRINT_LREG_32bit(asd_ha, lseq, CONTROL); + PRINT_REG_32bit(asd_ha, BISTCTL0, LmBISTCTL0(lseq)); + PRINT_REG_32bit(asd_ha, BISTCTL1, LmBISTCTL1(lseq)); + + asd_printk("LSEQ%d: CIO REGISTERS\n", lseq); + asd_printk("Mode common:\n"); + + for (mode = 0; mode < 8; mode++) { + u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq); + int i; + + for (i = 0; LSEQmCIOREGS[i].name; i++) + if (LSEQmCIOREGS[i].mode == MODE_COMMON) + asd_print_lseq_cio_reg(asd_ha,lseq_cio_addr,i); + } + + asd_printk("Mode unique:\n"); + for (mode = 0; mode < 8; mode++) { + u32 lseq_cio_addr = LmSEQ_PHY_BASE(mode, lseq); + int i; + + asd_printk("Mode %d\n", mode); + for (i = 0; LSEQmCIOREGS[i].name; i++) { + if (!(LSEQmCIOREGS[i].mode & (1 << mode))) + continue; + asd_print_lseq_cio_reg(asd_ha, lseq_cio_addr, i); + } + } + + asd_printk("SCRATCH MEMORY\n"); + + asd_printk("LSEQ%d MIP 0 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_HEAD); + PRINT_LMIP_word(asd_ha, lseq, Q_TGTXFR_TAIL); + PRINT_LMIP_byte(asd_ha, lseq, LINK_NUMBER); + PRINT_LMIP_byte(asd_ha, lseq, SCRATCH_FLAGS); + PRINT_LMIP_qword(asd_ha, lseq, CONNECTION_STATE); + PRINT_LMIP_word(asd_ha, lseq, CONCTL); + PRINT_LMIP_byte(asd_ha, lseq, CONSTAT); + PRINT_LMIP_byte(asd_ha, lseq, CONNECTION_MODES); + PRINT_LMIP_word(asd_ha, lseq, REG1_ISR); + PRINT_LMIP_word(asd_ha, lseq, REG2_ISR); + PRINT_LMIP_word(asd_ha, lseq, REG3_ISR); + PRINT_LMIP_qword(asd_ha, lseq,REG0_ISR); + + asd_printk("LSEQ%d MIP 1 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR0); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR1); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR2); + PRINT_LMIP_word(asd_ha, lseq, EST_NEXUS_SCBPTR3); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE0); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE1); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE2); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_OPCODE3); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_HEAD); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_SCB_TAIL); + PRINT_LMIP_byte(asd_ha, lseq, EST_NEXUS_BUF_AVAIL); + PRINT_LMIP_dword(asd_ha, lseq, TIMEOUT_CONST); + PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_SINDEX); + PRINT_LMIP_word(asd_ha, lseq, ISR_SAVE_DINDEX); + + asd_printk("LSEQ%d MIP 2 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR0); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR1); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR2); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_SCB_PTR3); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD0); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD1); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD2); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_OPCD3); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_HEAD); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_SCB_TAIL); + PRINT_LMIP_byte(asd_ha, lseq, EMPTY_BUFS_AVAIL); + + asd_printk("LSEQ%d MIP 3 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TMR_TOUT_CONST); + PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, SRST_ASSERT_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, ONE_MILLISEC_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, TEN_MS_COMINIT_TIMEOUT); + PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMEOUT); + + for (mode = 0; mode < 3; mode++) { + asd_printk("LSEQ%d MDP 0 MODE %d >>>>\n", lseq, mode); + moffs = mode * LSEQ_MODE_SCRATCH_SIZE; + + asd_printk(STR_16BIT, "RET_ADDR", 0, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq) + + moffs)); + asd_printk(STR_16BIT, "REG0_MODE", 2, + asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq) + + moffs)); + asd_printk(STR_16BIT, "MODE_FLAGS", 4, + asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq) + + moffs)); + asd_printk(STR_16BIT, "RET_ADDR2", 0x6, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq) + + moffs)); + asd_printk(STR_16BIT, "RET_ADDR1", 0x8, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq) + + moffs)); + asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB, + asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq) + + moffs)); + asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC, + asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq) + + moffs)); + } + + asd_printk("LSEQ%d MDP 0 MODE 5 >>>>\n", lseq); + moffs = LSEQ_MODE5_PAGE0_OFFSET; + asd_printk(STR_16BIT, "RET_ADDR", 0, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq) + moffs)); + asd_printk(STR_16BIT, "REG0_MODE", 2, + asd_read_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq) + moffs)); + asd_printk(STR_16BIT, "MODE_FLAGS", 4, + asd_read_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq) + moffs)); + asd_printk(STR_16BIT, "RET_ADDR2", 0x6, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq) + moffs)); + asd_printk(STR_16BIT, "RET_ADDR1", 0x8, + asd_read_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq) + moffs)); + asd_printk(STR_8BIT, "OPCODE_TO_CSEQ", 0xB, + asd_read_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq) + moffs)); + asd_printk(STR_16BIT, "DATA_TO_CSEQ", 0xC, + asd_read_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq) + moffs)); + + asd_printk("LSEQ%d MDP 0 MODE 0 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_DDB_SITE); + PRINT_LMIP_word(asd_ha, lseq, EMPTY_TRANS_CTX); + PRINT_LMIP_word(asd_ha, lseq, RESP_LEN); + PRINT_LMIP_word(asd_ha, lseq, FIRST_INV_SCB_SITE); + PRINT_LMIP_dword(asd_ha, lseq, INTEN_SAVE); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_FRM_LEN); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_PROTOCOL); + PRINT_LMIP_byte(asd_ha, lseq, RESP_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, LAST_LOADED_SGE); + PRINT_LMIP_byte(asd_ha, lseq, SAVE_SCBPTR); + + asd_printk("LSEQ%d MDP 0 MODE 1 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, Q_XMIT_HEAD); + PRINT_LMIP_word(asd_ha, lseq, M1_EMPTY_TRANS_CTX); + PRINT_LMIP_word(asd_ha, lseq, INI_CONN_TAG); + PRINT_LMIP_byte(asd_ha, lseq, FAILED_OPEN_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, XMIT_REQUEST_TYPE); + PRINT_LMIP_byte(asd_ha, lseq, M1_RESP_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, M1_LAST_LOADED_SGE); + PRINT_LMIP_word(asd_ha, lseq, M1_SAVE_SCBPTR); + + asd_printk("LSEQ%d MDP 0 MODE 2 >>>>\n", lseq); + PRINT_LMIP_word(asd_ha, lseq, PORT_COUNTER); + PRINT_LMIP_word(asd_ha, lseq, PM_TABLE_PTR); + PRINT_LMIP_word(asd_ha, lseq, SATA_INTERLOCK_TMR_SAVE); + PRINT_LMIP_word(asd_ha, lseq, IP_BITL); + PRINT_LMIP_word(asd_ha, lseq, COPY_SMP_CONN_TAG); + PRINT_LMIP_byte(asd_ha, lseq, P0M2_OFFS1AH); + + asd_printk("LSEQ%d MDP 0 MODE 4/5 >>>>\n", lseq); + PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_STATUS); + PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_MODE); + PRINT_LMIP_word(asd_ha, lseq, Q_LINK_HEAD); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RST_ERR); + PRINT_LMIP_byte(asd_ha, lseq, SAVED_OOB_SIGNALS); + PRINT_LMIP_byte(asd_ha, lseq, SAS_RESET_MODE); + PRINT_LMIP_byte(asd_ha, lseq, LINK_RESET_RETRY_COUNT); + PRINT_LMIP_byte(asd_ha, lseq, NUM_LINK_RESET_RETRIES); + PRINT_LMIP_word(asd_ha, lseq, OOB_INT_ENABLES); + PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_TIMEOUT); + PRINT_LMIP_word(asd_ha, lseq, NOTIFY_TIMER_DOWN_COUNT); + + asd_printk("LSEQ%d MDP 1 MODE 0 >>>>\n", lseq); + PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR0); + PRINT_LMIP_qword(asd_ha, lseq, SG_LIST_PTR_ADDR1); + + asd_printk("LSEQ%d MDP 1 MODE 1 >>>>\n", lseq); + PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR0); + PRINT_LMIP_qword(asd_ha, lseq, M1_SG_LIST_PTR_ADDR1); + + asd_printk("LSEQ%d MDP 1 MODE 2 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, INVALID_DWORD_COUNT); + PRINT_LMIP_dword(asd_ha, lseq, DISPARITY_ERROR_COUNT); + PRINT_LMIP_dword(asd_ha, lseq, LOSS_OF_SYNC_COUNT); + + asd_printk("LSEQ%d MDP 1 MODE 4/5 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, FRAME_TYPE_MASK); + PRINT_LMIP_dword(asd_ha, lseq, HASHED_SRC_ADDR_MASK_PRINT); + PRINT_LMIP_byte(asd_ha, lseq, NUM_FILL_BYTES_MASK); + PRINT_LMIP_word(asd_ha, lseq, TAG_MASK); + PRINT_LMIP_word(asd_ha, lseq, TARGET_PORT_XFER_TAG); + PRINT_LMIP_dword(asd_ha, lseq, DATA_OFFSET); + + asd_printk("LSEQ%d MDP 2 MODE 0 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, SMP_RCV_TIMER_TERM_TS); + PRINT_LMIP_byte(asd_ha, lseq, DEVICE_BITS); + PRINT_LMIP_word(asd_ha, lseq, SDB_DDB); + PRINT_LMIP_word(asd_ha, lseq, SDB_NUM_TAGS); + PRINT_LMIP_word(asd_ha, lseq, SDB_CURR_TAG); + + asd_printk("LSEQ%d MDP 2 MODE 1 >>>>\n", lseq); + PRINT_LMIP_qword(asd_ha, lseq, TX_ID_ADDR_FRAME); + PRINT_LMIP_dword(asd_ha, lseq, OPEN_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, SRST_AS_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, LAST_LOADED_SG_EL); + + asd_printk("LSEQ%d MDP 2 MODE 2 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, CLOSE_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, BREAK_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, DWS_RESET_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, SATA_INTERLOCK_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, MCTL_TIMER_TERM_TS); + + asd_printk("LSEQ%d MDP 2 MODE 4/5 >>>>\n", lseq); + PRINT_LMIP_dword(asd_ha, lseq, COMINIT_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, RCV_ID_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, RCV_FIS_TIMER_TERM_TS); + PRINT_LMIP_dword(asd_ha, lseq, DEV_PRES_TIMER_TERM_TS); +} + +/** + * asd_dump_ddb_site -- dump a CSEQ DDB site + * @asd_ha: pointer to host adapter structure + * @site_no: site number of interest + */ +void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no) +{ + if (site_no >= asd_ha->hw_prof.max_ddbs) + return; + +#define DDB_FIELDB(__name) \ + asd_ddbsite_read_byte(asd_ha, site_no, \ + offsetof(struct asd_ddb_ssp_smp_target_port, __name)) +#define DDB2_FIELDB(__name) \ + asd_ddbsite_read_byte(asd_ha, site_no, \ + offsetof(struct asd_ddb_stp_sata_target_port, __name)) +#define DDB_FIELDW(__name) \ + asd_ddbsite_read_word(asd_ha, site_no, \ + offsetof(struct asd_ddb_ssp_smp_target_port, __name)) + +#define DDB_FIELDD(__name) \ + asd_ddbsite_read_dword(asd_ha, site_no, \ + offsetof(struct asd_ddb_ssp_smp_target_port, __name)) + + asd_printk("DDB: 0x%02x\n", site_no); + asd_printk("conn_type: 0x%02x\n", DDB_FIELDB(conn_type)); + asd_printk("conn_rate: 0x%02x\n", DDB_FIELDB(conn_rate)); + asd_printk("init_conn_tag: 0x%04x\n", be16_to_cpu(DDB_FIELDW(init_conn_tag))); + asd_printk("send_queue_head: 0x%04x\n", be16_to_cpu(DDB_FIELDW(send_queue_head))); + asd_printk("sq_suspended: 0x%02x\n", DDB_FIELDB(sq_suspended)); + asd_printk("DDB Type: 0x%02x\n", DDB_FIELDB(ddb_type)); + asd_printk("AWT Default: 0x%04x\n", DDB_FIELDW(awt_def)); + asd_printk("compat_features: 0x%02x\n", DDB_FIELDB(compat_features)); + asd_printk("Pathway Blocked Count: 0x%02x\n", + DDB_FIELDB(pathway_blocked_count)); + asd_printk("arb_wait_time: 0x%04x\n", DDB_FIELDW(arb_wait_time)); + asd_printk("more_compat_features: 0x%08x\n", + DDB_FIELDD(more_compat_features)); + asd_printk("Conn Mask: 0x%02x\n", DDB_FIELDB(conn_mask)); + asd_printk("flags: 0x%02x\n", DDB_FIELDB(flags)); + asd_printk("flags2: 0x%02x\n", DDB2_FIELDB(flags2)); + asd_printk("ExecQ Tail: 0x%04x\n",DDB_FIELDW(exec_queue_tail)); + asd_printk("SendQ Tail: 0x%04x\n",DDB_FIELDW(send_queue_tail)); + asd_printk("Active Task Count: 0x%04x\n", + DDB_FIELDW(active_task_count)); + asd_printk("ITNL Reason: 0x%02x\n", DDB_FIELDB(itnl_reason)); + asd_printk("ITNL Timeout Const: 0x%04x\n", DDB_FIELDW(itnl_timeout)); + asd_printk("ITNL timestamp: 0x%08x\n", DDB_FIELDD(itnl_timestamp)); +} + +void asd_dump_ddb_0(struct asd_ha_struct *asd_ha) +{ +#define DDB0_FIELDB(__name) \ + asd_ddbsite_read_byte(asd_ha, 0, \ + offsetof(struct asd_ddb_seq_shared, __name)) +#define DDB0_FIELDW(__name) \ + asd_ddbsite_read_word(asd_ha, 0, \ + offsetof(struct asd_ddb_seq_shared, __name)) + +#define DDB0_FIELDD(__name) \ + asd_ddbsite_read_dword(asd_ha,0 , \ + offsetof(struct asd_ddb_seq_shared, __name)) + +#define DDB0_FIELDA(__name, _o) \ + asd_ddbsite_read_byte(asd_ha, 0, \ + offsetof(struct asd_ddb_seq_shared, __name)+_o) + + + asd_printk("DDB: 0\n"); + asd_printk("q_free_ddb_head:%04x\n", DDB0_FIELDW(q_free_ddb_head)); + asd_printk("q_free_ddb_tail:%04x\n", DDB0_FIELDW(q_free_ddb_tail)); + asd_printk("q_free_ddb_cnt:%04x\n", DDB0_FIELDW(q_free_ddb_cnt)); + asd_printk("q_used_ddb_head:%04x\n", DDB0_FIELDW(q_used_ddb_head)); + asd_printk("q_used_ddb_tail:%04x\n", DDB0_FIELDW(q_used_ddb_tail)); + asd_printk("shared_mem_lock:%04x\n", DDB0_FIELDW(shared_mem_lock)); + asd_printk("smp_conn_tag:%04x\n", DDB0_FIELDW(smp_conn_tag)); + asd_printk("est_nexus_buf_cnt:%04x\n", DDB0_FIELDW(est_nexus_buf_cnt)); + asd_printk("est_nexus_buf_thresh:%04x\n", + DDB0_FIELDW(est_nexus_buf_thresh)); + asd_printk("conn_not_active:%02x\n", DDB0_FIELDB(conn_not_active)); + asd_printk("phy_is_up:%02x\n", DDB0_FIELDB(phy_is_up)); + asd_printk("port_map_by_links:%02x %02x %02x %02x " + "%02x %02x %02x %02x\n", + DDB0_FIELDA(port_map_by_links, 0), + DDB0_FIELDA(port_map_by_links, 1), + DDB0_FIELDA(port_map_by_links, 2), + DDB0_FIELDA(port_map_by_links, 3), + DDB0_FIELDA(port_map_by_links, 4), + DDB0_FIELDA(port_map_by_links, 5), + DDB0_FIELDA(port_map_by_links, 6), + DDB0_FIELDA(port_map_by_links, 7)); +} + +static void asd_dump_scb_site(struct asd_ha_struct *asd_ha, u16 site_no) +{ + +#define SCB_FIELDB(__name) \ + asd_scbsite_read_byte(asd_ha, site_no, sizeof(struct scb_header) \ + + offsetof(struct initiate_ssp_task, __name)) +#define SCB_FIELDW(__name) \ + asd_scbsite_read_word(asd_ha, site_no, sizeof(struct scb_header) \ + + offsetof(struct initiate_ssp_task, __name)) +#define SCB_FIELDD(__name) \ + asd_scbsite_read_dword(asd_ha, site_no, sizeof(struct scb_header) \ + + offsetof(struct initiate_ssp_task, __name)) + + asd_printk("Total Xfer Len: 0x%08x.\n", SCB_FIELDD(total_xfer_len)); + asd_printk("Frame Type: 0x%02x.\n", SCB_FIELDB(ssp_frame.frame_type)); + asd_printk("Tag: 0x%04x.\n", SCB_FIELDW(ssp_frame.tag)); + asd_printk("Target Port Xfer Tag: 0x%04x.\n", + SCB_FIELDW(ssp_frame.tptt)); + asd_printk("Data Offset: 0x%08x.\n", SCB_FIELDW(ssp_frame.data_offs)); + asd_printk("Retry Count: 0x%02x.\n", SCB_FIELDB(retry_count)); +} + +/** + * asd_dump_scb_sites -- dump currently used CSEQ SCB sites + * @asd_ha: pointer to host adapter struct + */ +void asd_dump_scb_sites(struct asd_ha_struct *asd_ha) +{ + u16 site_no; + + for (site_no = 0; site_no < asd_ha->hw_prof.max_scbs; site_no++) { + u8 opcode; + + if (!SCB_SITE_VALID(site_no)) + continue; + + /* We are only interested in SCB sites currently used. + */ + opcode = asd_scbsite_read_byte(asd_ha, site_no, + offsetof(struct scb_header, + opcode)); + if (opcode == 0xFF) + continue; + + asd_printk("\nSCB: 0x%x\n", site_no); + asd_dump_scb_site(asd_ha, site_no); + } +} + +/** + * ads_dump_seq_state -- dump CSEQ and LSEQ states + * @asd_ha: pointer to host adapter structure + * @lseq_mask: mask of LSEQs of interest + */ +void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask) +{ + int lseq; + + asd_dump_cseq_state(asd_ha); + + if (lseq_mask != 0) + for_each_sequencer(lseq_mask, lseq_mask, lseq) + asd_dump_lseq_state(asd_ha, lseq); +} + +void asd_dump_frame_rcvd(struct asd_phy *phy, + struct done_list_struct *dl) +{ + unsigned long flags; + int i; + + switch ((dl->status_block[1] & 0x70) >> 3) { + case SAS_PROTO_STP: + ASD_DPRINTK("STP proto device-to-host FIS:\n"); + break; + default: + case SAS_PROTO_SSP: + ASD_DPRINTK("SAS proto IDENTIFY:\n"); + break; + } + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + for (i = 0; i < phy->sas_phy.frame_rcvd_size; i+=4) + ASD_DPRINTK("%02x: %02x %02x %02x %02x\n", + i, + phy->frame_rcvd[i], + phy->frame_rcvd[i+1], + phy->frame_rcvd[i+2], + phy->frame_rcvd[i+3]); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); +} + +static inline void asd_dump_scb(struct asd_ascb *ascb, int ind) +{ + asd_printk("scb%d: vaddr: 0x%p, dma_handle: 0x%llx, next: 0x%llx, " + "index:%d, opcode:0x%02x\n", + ind, ascb->dma_scb.vaddr, + (unsigned long long)ascb->dma_scb.dma_handle, + (unsigned long long) + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); +} + +void asd_dump_scb_list(struct asd_ascb *ascb, int num) +{ + int i = 0; + + asd_printk("dumping %d scbs:\n", num); + + asd_dump_scb(ascb, i++); + --num; + + if (num > 0 && !list_empty(&ascb->list)) { + struct list_head *el; + + list_for_each(el, &ascb->list) { + struct asd_ascb *s = list_entry(el, struct asd_ascb, + list); + asd_dump_scb(s, i++); + if (--num <= 0) + break; + } + } +} + +#endif /* ASD_DEBUG */ diff --git a/drivers/scsi/aic94xx/aic94xx_dump.h b/drivers/scsi/aic94xx/aic94xx_dump.h new file mode 100644 index 0000000..0c388e7 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_dump.h @@ -0,0 +1,52 @@ +/* + * Aic94xx SAS/SATA driver dump header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef _AIC94XX_DUMP_H_ +#define _AIC94XX_DUMP_H_ + +#ifdef ASD_DEBUG + +void asd_dump_ddb_0(struct asd_ha_struct *asd_ha); +void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, u16 site_no); +void asd_dump_scb_sites(struct asd_ha_struct *asd_ha); +void asd_dump_seq_state(struct asd_ha_struct *asd_ha, u8 lseq_mask); +void asd_dump_frame_rcvd(struct asd_phy *phy, + struct done_list_struct *dl); +void asd_dump_scb_list(struct asd_ascb *ascb, int num); +#else /* ASD_DEBUG */ + +static inline void asd_dump_ddb_0(struct asd_ha_struct *asd_ha) { } +static inline void asd_dump_target_ddb(struct asd_ha_struct *asd_ha, + u16 site_no) { } +static inline void asd_dump_scb_sites(struct asd_ha_struct *asd_ha) { } +static inline void asd_dump_seq_state(struct asd_ha_struct *asd_ha, + u8 lseq_mask) { } +static inline void asd_dump_frame_rcvd(struct asd_phy *phy, + struct done_list_struct *dl) { } +static inline void asd_dump_scb_list(struct asd_ascb *ascb, int num) { } +#endif /* ASD_DEBUG */ + +#endif /* _AIC94XX_DUMP_H_ */ diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c new file mode 100644 index 0000000..075cea8 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -0,0 +1,1376 @@ +/* + * Aic94xx SAS/SATA driver hardware interface. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/module.h> + +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" +#include "aic94xx_seq.h" +#include "aic94xx_dump.h" + +u32 MBAR0_SWB_SIZE; + +/* ---------- Initialization ---------- */ + +static void asd_get_user_sas_addr(struct asd_ha_struct *asd_ha) +{ + extern char sas_addr_str[]; + /* If the user has specified a WWN it overrides other settings + */ + if (sas_addr_str[0] != '\0') + asd_destringify_sas_addr(asd_ha->hw_prof.sas_addr, + sas_addr_str); + else if (asd_ha->hw_prof.sas_addr[0] != 0) + asd_stringify_sas_addr(sas_addr_str, asd_ha->hw_prof.sas_addr); +} + +static void asd_propagate_sas_addr(struct asd_ha_struct *asd_ha) +{ + int i; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + if (asd_ha->hw_prof.phy_desc[i].sas_addr[0] == 0) + continue; + /* Set a phy's address only if it has none. + */ + ASD_DPRINTK("setting phy%d addr to %llx\n", i, + SAS_ADDR(asd_ha->hw_prof.sas_addr)); + memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, + asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE); + } +} + +/* ---------- PHY initialization ---------- */ + +static void asd_init_phy_identify(struct asd_phy *phy) +{ + phy->identify_frame = phy->id_frm_tok->vaddr; + + memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); + + phy->identify_frame->dev_type = SAS_END_DEV; + if (phy->sas_phy.role & PHY_ROLE_INITIATOR) + phy->identify_frame->initiator_bits = phy->sas_phy.iproto; + if (phy->sas_phy.role & PHY_ROLE_TARGET) + phy->identify_frame->target_bits = phy->sas_phy.tproto; + memcpy(phy->identify_frame->sas_addr, phy->phy_desc->sas_addr, + SAS_ADDR_SIZE); + phy->identify_frame->phy_id = phy->sas_phy.id; +} + +static int asd_init_phy(struct asd_phy *phy) +{ + struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + sas_phy->enabled = 1; + sas_phy->class = SAS; + sas_phy->iproto = SAS_PROTO_ALL; + sas_phy->tproto = 0; + sas_phy->type = PHY_TYPE_PHYSICAL; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = PHY_LINKRATE_NONE; + + phy->id_frm_tok = asd_alloc_coherent(asd_ha, + sizeof(*phy->identify_frame), + GFP_KERNEL); + if (!phy->id_frm_tok) { + asd_printk("no mem for IDENTIFY for phy%d\n", sas_phy->id); + return -ENOMEM; + } else + asd_init_phy_identify(phy); + + memset(phy->frame_rcvd, 0, sizeof(phy->frame_rcvd)); + + return 0; +} + +static int asd_init_phys(struct asd_ha_struct *asd_ha) +{ + u8 i; + u8 phy_mask = asd_ha->hw_prof.enabled_phys; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + struct asd_phy *phy = &asd_ha->phys[i]; + + phy->phy_desc = &asd_ha->hw_prof.phy_desc[i]; + + phy->sas_phy.enabled = 0; + phy->sas_phy.id = i; + phy->sas_phy.sas_addr = &phy->phy_desc->sas_addr[0]; + phy->sas_phy.frame_rcvd = &phy->frame_rcvd[0]; + phy->sas_phy.ha = &asd_ha->sas_ha; + phy->sas_phy.lldd_phy = phy; + } + + /* Now enable and initialize only the enabled phys. */ + for_each_phy(phy_mask, phy_mask, i) { + int err = asd_init_phy(&asd_ha->phys[i]); + if (err) + return err; + } + + return 0; +} + +/* ---------- Sliding windows ---------- */ + +static int asd_init_sw(struct asd_ha_struct *asd_ha) +{ + struct pci_dev *pcidev = asd_ha->pcidev; + int err; + u32 v; + + /* Unlock MBARs */ + err = pci_read_config_dword(pcidev, PCI_CONF_MBAR_KEY, &v); + if (err) { + asd_printk("couldn't access conf. space of %s\n", + pci_name(pcidev)); + goto Err; + } + if (v) + err = pci_write_config_dword(pcidev, PCI_CONF_MBAR_KEY, v); + if (err) { + asd_printk("couldn't write to MBAR_KEY of %s\n", + pci_name(pcidev)); + goto Err; + } + + /* Set sliding windows A, B and C to point to proper internal + * memory regions. + */ + pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWA, REG_BASE_ADDR); + pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWB, + REG_BASE_ADDR_CSEQCIO); + pci_write_config_dword(pcidev, PCI_CONF_MBAR0_SWC, REG_BASE_ADDR_EXSI); + asd_ha->io_handle[0].swa_base = REG_BASE_ADDR; + asd_ha->io_handle[0].swb_base = REG_BASE_ADDR_CSEQCIO; + asd_ha->io_handle[0].swc_base = REG_BASE_ADDR_EXSI; + MBAR0_SWB_SIZE = asd_ha->io_handle[0].len - 0x80; + if (!asd_ha->iospace) { + /* MBAR1 will point to OCM (On Chip Memory) */ + pci_write_config_dword(pcidev, PCI_CONF_MBAR1, OCM_BASE_ADDR); + asd_ha->io_handle[1].swa_base = OCM_BASE_ADDR; + } + spin_lock_init(&asd_ha->iolock); +Err: + return err; +} + +/* ---------- SCB initialization ---------- */ + +/** + * asd_init_scbs - manually allocate the first SCB. + * @asd_ha: pointer to host adapter structure + * + * This allocates the very first SCB which would be sent to the + * sequencer for execution. Its bus address is written to + * CSEQ_Q_NEW_POINTER, mode page 2, mode 8. Since the bus address of + * the _next_ scb to be DMA-ed to the host adapter is read from the last + * SCB DMA-ed to the host adapter, we have to always stay one step + * ahead of the sequencer and keep one SCB already allocated. + */ +static int asd_init_scbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int bitmap_bytes; + + /* allocate the index array and bitmap */ + asd_ha->seq.tc_index_bitmap_bits = asd_ha->hw_prof.max_scbs; + asd_ha->seq.tc_index_array = kzalloc(asd_ha->seq.tc_index_bitmap_bits* + sizeof(void *), GFP_KERNEL); + if (!asd_ha->seq.tc_index_array) + return -ENOMEM; + + bitmap_bytes = (asd_ha->seq.tc_index_bitmap_bits+7)/8; + bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); + asd_ha->seq.tc_index_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); + if (!asd_ha->seq.tc_index_bitmap) + return -ENOMEM; + + spin_lock_init(&seq->tc_index_lock); + + seq->next_scb.size = sizeof(struct scb); + seq->next_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, GFP_KERNEL, + &seq->next_scb.dma_handle); + if (!seq->next_scb.vaddr) { + kfree(asd_ha->seq.tc_index_bitmap); + kfree(asd_ha->seq.tc_index_array); + asd_ha->seq.tc_index_bitmap = NULL; + asd_ha->seq.tc_index_array = NULL; + return -ENOMEM; + } + + seq->pending = 0; + spin_lock_init(&seq->pend_q_lock); + INIT_LIST_HEAD(&seq->pend_q); + + return 0; +} + +static inline void asd_get_max_scb_ddb(struct asd_ha_struct *asd_ha) +{ + asd_ha->hw_prof.max_scbs = asd_get_cmdctx_size(asd_ha)/ASD_SCB_SIZE; + asd_ha->hw_prof.max_ddbs = asd_get_devctx_size(asd_ha)/ASD_DDB_SIZE; + ASD_DPRINTK("max_scbs:%d, max_ddbs:%d\n", + asd_ha->hw_prof.max_scbs, + asd_ha->hw_prof.max_ddbs); +} + +/* ---------- Done List initialization ---------- */ + +static void asd_dl_tasklet_handler(unsigned long); + +static int asd_init_dl(struct asd_ha_struct *asd_ha) +{ + asd_ha->seq.actual_dl + = asd_alloc_coherent(asd_ha, + ASD_DL_SIZE * sizeof(struct done_list_struct), + GFP_KERNEL); + if (!asd_ha->seq.actual_dl) + return -ENOMEM; + asd_ha->seq.dl = asd_ha->seq.actual_dl->vaddr; + asd_ha->seq.dl_toggle = ASD_DEF_DL_TOGGLE; + asd_ha->seq.dl_next = 0; + tasklet_init(&asd_ha->seq.dl_tasklet, asd_dl_tasklet_handler, + (unsigned long) asd_ha); + + return 0; +} + +/* ---------- EDB and ESCB init ---------- */ + +static int asd_alloc_edbs(struct asd_ha_struct *asd_ha, unsigned int gfp_flags) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + seq->edb_arr = kmalloc(seq->num_edbs*sizeof(*seq->edb_arr), gfp_flags); + if (!seq->edb_arr) + return -ENOMEM; + + for (i = 0; i < seq->num_edbs; i++) { + seq->edb_arr[i] = asd_alloc_coherent(asd_ha, ASD_EDB_SIZE, + gfp_flags); + if (!seq->edb_arr[i]) + goto Err_unroll; + memset(seq->edb_arr[i]->vaddr, 0, ASD_EDB_SIZE); + } + + ASD_DPRINTK("num_edbs:%d\n", seq->num_edbs); + + return 0; + +Err_unroll: + for (i-- ; i >= 0; i--) + asd_free_coherent(asd_ha, seq->edb_arr[i]); + kfree(seq->edb_arr); + seq->edb_arr = NULL; + + return -ENOMEM; +} + +static int asd_alloc_escbs(struct asd_ha_struct *asd_ha, + unsigned int gfp_flags) +{ + struct asd_seq_data *seq = &asd_ha->seq; + struct asd_ascb *escb; + int i, escbs; + + seq->escb_arr = kmalloc(seq->num_escbs*sizeof(*seq->escb_arr), + gfp_flags); + if (!seq->escb_arr) + return -ENOMEM; + + escbs = seq->num_escbs; + escb = asd_ascb_alloc_list(asd_ha, &escbs, gfp_flags); + if (!escb) { + asd_printk("couldn't allocate list of escbs\n"); + goto Err; + } + seq->num_escbs -= escbs; /* subtract what was not allocated */ + ASD_DPRINTK("num_escbs:%d\n", seq->num_escbs); + + for (i = 0; i < seq->num_escbs; i++, escb = list_entry(escb->list.next, + struct asd_ascb, + list)) { + seq->escb_arr[i] = escb; + escb->scb->header.opcode = EMPTY_SCB; + } + + return 0; +Err: + kfree(seq->escb_arr); + seq->escb_arr = NULL; + return -ENOMEM; + +} + +static void asd_assign_edbs2escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i, k, z = 0; + + for (i = 0; i < seq->num_escbs; i++) { + struct asd_ascb *ascb = seq->escb_arr[i]; + struct empty_scb *escb = &ascb->scb->escb; + + ascb->edb_index = z; + + escb->num_valid = ASD_EDBS_PER_SCB; + + for (k = 0; k < ASD_EDBS_PER_SCB; k++) { + struct sg_el *eb = &escb->eb[k]; + struct asd_dma_tok *edb = seq->edb_arr[z++]; + + memset(eb, 0, sizeof(*eb)); + eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle)); + eb->size = cpu_to_le32(((u32) edb->size)); + } + } +} + +/** + * asd_init_escbs -- allocate and initialize empty scbs + * @asd_ha: pointer to host adapter structure + * + * An empty SCB has sg_elements of ASD_EDBS_PER_SCB (7) buffers. + * They transport sense data, etc. + */ +static int asd_init_escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int err = 0; + + /* Allocate two empty data buffers (edb) per sequencer. */ + int edbs = 2*(1+asd_ha->hw_prof.num_phys); + + seq->num_escbs = (edbs+ASD_EDBS_PER_SCB-1)/ASD_EDBS_PER_SCB; + seq->num_edbs = seq->num_escbs * ASD_EDBS_PER_SCB; + + err = asd_alloc_edbs(asd_ha, GFP_KERNEL); + if (err) { + asd_printk("couldn't allocate edbs\n"); + return err; + } + + err = asd_alloc_escbs(asd_ha, GFP_KERNEL); + if (err) { + asd_printk("couldn't allocate escbs\n"); + return err; + } + + asd_assign_edbs2escbs(asd_ha); + /* In order to insure that normal SCBs do not overfill sequencer + * memory and leave no space for escbs (halting condition), + * we increment pending here by the number of escbs. However, + * escbs are never pending. + */ + seq->pending = seq->num_escbs; + seq->can_queue = 1 + (asd_ha->hw_prof.max_scbs - seq->pending)/2; + + return 0; +} + +/* ---------- HW initialization ---------- */ + +/** + * asd_chip_hardrst -- hard reset the chip + * @asd_ha: pointer to host adapter structure + * + * This takes 16 cycles and is synchronous to CFCLK, which runs + * at 200 MHz, so this should take at most 80 nanoseconds. + */ +int asd_chip_hardrst(struct asd_ha_struct *asd_ha) +{ + int i; + int count = 100; + u32 reg; + + for (i = 0 ; i < 4 ; i++) { + asd_write_reg_dword(asd_ha, COMBIST, HARDRST); + } + + do { + udelay(1); + reg = asd_read_reg_dword(asd_ha, CHIMINT); + if (reg & HARDRSTDET) { + asd_write_reg_dword(asd_ha, CHIMINT, + HARDRSTDET|PORRSTDET); + return 0; + } + } while (--count > 0); + + return -ENODEV; +} + +/** + * asd_init_chip -- initialize the chip + * @asd_ha: pointer to host adapter structure + * + * Hard resets the chip, disables HA interrupts, downloads the sequnecer + * microcode and starts the sequencers. The caller has to explicitly + * enable HA interrupts with asd_enable_ints(asd_ha). + */ +static int asd_init_chip(struct asd_ha_struct *asd_ha) +{ + int err; + + err = asd_chip_hardrst(asd_ha); + if (err) { + asd_printk("couldn't hard reset %s\n", + pci_name(asd_ha->pcidev)); + goto out; + } + + asd_disable_ints(asd_ha); + + err = asd_init_seqs(asd_ha); + if (err) { + asd_printk("couldn't init seqs for %s\n", + pci_name(asd_ha->pcidev)); + goto out; + } + + err = asd_start_seqs(asd_ha); + if (err) { + asd_printk("coudln't start seqs for %s\n", + pci_name(asd_ha->pcidev)); + goto out; + } +out: + return err; +} + +#define MAX_DEVS ((OCM_MAX_SIZE) / (ASD_DDB_SIZE)) + +static int max_devs = 0; +module_param_named(max_devs, max_devs, int, S_IRUGO); +MODULE_PARM_DESC(max_devs, "\n" + "\tMaximum number of SAS devices to support (not LUs).\n" + "\tDefault: 2176, Maximum: 65663.\n"); + +static int max_cmnds = 0; +module_param_named(max_cmnds, max_cmnds, int, S_IRUGO); +MODULE_PARM_DESC(max_cmnds, "\n" + "\tMaximum number of commands queuable.\n" + "\tDefault: 512, Maximum: 66047.\n"); + +static void asd_extend_devctx_ocm(struct asd_ha_struct *asd_ha) +{ + unsigned long dma_addr = OCM_BASE_ADDR; + u32 d; + + dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; + asd_write_reg_addr(asd_ha, DEVCTXBASE, (dma_addr_t) dma_addr); + d = asd_read_reg_dword(asd_ha, CTXDOMAIN); + d |= 4; + asd_write_reg_dword(asd_ha, CTXDOMAIN, d); + asd_ha->hw_prof.max_ddbs += MAX_DEVS; +} + +static int asd_extend_devctx(struct asd_ha_struct *asd_ha) +{ + dma_addr_t dma_handle; + unsigned long dma_addr; + u32 d; + int size; + + asd_extend_devctx_ocm(asd_ha); + + asd_ha->hw_prof.ddb_ext = NULL; + if (max_devs <= asd_ha->hw_prof.max_ddbs || max_devs > 0xFFFF) { + max_devs = asd_ha->hw_prof.max_ddbs; + return 0; + } + + size = (max_devs - asd_ha->hw_prof.max_ddbs + 1) * ASD_DDB_SIZE; + + asd_ha->hw_prof.ddb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); + if (!asd_ha->hw_prof.ddb_ext) { + asd_printk("couldn't allocate memory for %d devices\n", + max_devs); + max_devs = asd_ha->hw_prof.max_ddbs; + return -ENOMEM; + } + dma_handle = asd_ha->hw_prof.ddb_ext->dma_handle; + dma_addr = ALIGN((unsigned long) dma_handle, ASD_DDB_SIZE); + dma_addr -= asd_ha->hw_prof.max_ddbs * ASD_DDB_SIZE; + dma_handle = (dma_addr_t) dma_addr; + asd_write_reg_addr(asd_ha, DEVCTXBASE, dma_handle); + d = asd_read_reg_dword(asd_ha, CTXDOMAIN); + d &= ~4; + asd_write_reg_dword(asd_ha, CTXDOMAIN, d); + + asd_ha->hw_prof.max_ddbs = max_devs; + + return 0; +} + +static int asd_extend_cmdctx(struct asd_ha_struct *asd_ha) +{ + dma_addr_t dma_handle; + unsigned long dma_addr; + u32 d; + int size; + + asd_ha->hw_prof.scb_ext = NULL; + if (max_cmnds <= asd_ha->hw_prof.max_scbs || max_cmnds > 0xFFFF) { + max_cmnds = asd_ha->hw_prof.max_scbs; + return 0; + } + + size = (max_cmnds - asd_ha->hw_prof.max_scbs + 1) * ASD_SCB_SIZE; + + asd_ha->hw_prof.scb_ext = asd_alloc_coherent(asd_ha, size, GFP_KERNEL); + if (!asd_ha->hw_prof.scb_ext) { + asd_printk("couldn't allocate memory for %d commands\n", + max_cmnds); + max_cmnds = asd_ha->hw_prof.max_scbs; + return -ENOMEM; + } + dma_handle = asd_ha->hw_prof.scb_ext->dma_handle; + dma_addr = ALIGN((unsigned long) dma_handle, ASD_SCB_SIZE); + dma_addr -= asd_ha->hw_prof.max_scbs * ASD_SCB_SIZE; + dma_handle = (dma_addr_t) dma_addr; + asd_write_reg_addr(asd_ha, CMDCTXBASE, dma_handle); + d = asd_read_reg_dword(asd_ha, CTXDOMAIN); + d &= ~1; + asd_write_reg_dword(asd_ha, CTXDOMAIN, d); + + asd_ha->hw_prof.max_scbs = max_cmnds; + + return 0; +} + +/** + * asd_init_ctxmem -- initialize context memory + * asd_ha: pointer to host adapter structure + * + * This function sets the maximum number of SCBs and + * DDBs which can be used by the sequencer. This is normally + * 512 and 128 respectively. If support for more SCBs or more DDBs + * is required then CMDCTXBASE, DEVCTXBASE and CTXDOMAIN are + * initialized here to extend context memory to point to host memory, + * thus allowing unlimited support for SCBs and DDBs -- only limited + * by host memory. + */ +static int asd_init_ctxmem(struct asd_ha_struct *asd_ha) +{ + int bitmap_bytes; + + asd_get_max_scb_ddb(asd_ha); + asd_extend_devctx(asd_ha); + asd_extend_cmdctx(asd_ha); + + /* The kernel wants bitmaps to be unsigned long sized. */ + bitmap_bytes = (asd_ha->hw_prof.max_ddbs+7)/8; + bitmap_bytes = BITS_TO_LONGS(bitmap_bytes*8)*sizeof(unsigned long); + asd_ha->hw_prof.ddb_bitmap = kzalloc(bitmap_bytes, GFP_KERNEL); + if (!asd_ha->hw_prof.ddb_bitmap) + return -ENOMEM; + spin_lock_init(&asd_ha->hw_prof.ddb_lock); + + return 0; +} + +int asd_init_hw(struct asd_ha_struct *asd_ha) +{ + int err; + u32 v; + + err = asd_init_sw(asd_ha); + if (err) + return err; + + err = pci_read_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, &v); + if (err) { + asd_printk("couldn't read PCIC_HSTPCIX_CNTRL of %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, + v | SC_TMR_DIS); + if (err) { + asd_printk("couldn't disable split completion timer of %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + + err = asd_read_ocm(asd_ha); + if (err) { + asd_printk("couldn't read ocm(%d)\n", err); + /* While suspicios, it is not an error that we + * couldn't read the OCM. */ + } + + err = asd_read_flash(asd_ha); + if (err) { + asd_printk("couldn't read flash(%d)\n", err); + /* While suspicios, it is not an error that we + * couldn't read FLASH memory. + */ + } + + asd_init_ctxmem(asd_ha); + + asd_get_user_sas_addr(asd_ha); + if (!asd_ha->hw_prof.sas_addr[0]) { + asd_printk("No SAS Address provided for %s\n", + pci_name(asd_ha->pcidev)); + err = -ENODEV; + goto Out; + } + + asd_propagate_sas_addr(asd_ha); + + err = asd_init_phys(asd_ha); + if (err) { + asd_printk("couldn't initialize phys for %s\n", + pci_name(asd_ha->pcidev)); + goto Out; + } + + err = asd_init_scbs(asd_ha); + if (err) { + asd_printk("couldn't initialize scbs for %s\n", + pci_name(asd_ha->pcidev)); + goto Out; + } + + err = asd_init_dl(asd_ha); + if (err) { + asd_printk("couldn't initialize the done list:%d\n", + err); + goto Out; + } + + err = asd_init_escbs(asd_ha); + if (err) { + asd_printk("couldn't initialize escbs\n"); + goto Out; + } + + err = asd_init_chip(asd_ha); + if (err) { + asd_printk("couldn't init the chip\n"); + goto Out; + } +Out: + return err; +} + +/* ---------- Chip reset ---------- */ + +/** + * asd_chip_reset -- reset the host adapter, etc + * @asd_ha: pointer to host adapter structure of interest + * + * Called from the ISR. Hard reset the chip. Let everything + * timeout. This should be no different than hot-unplugging the + * host adapter. Once everything times out we'll init the chip with + * a call to asd_init_chip() and enable interrupts with asd_enable_ints(). + * XXX finish. + */ +static void asd_chip_reset(struct asd_ha_struct *asd_ha) +{ + struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; + + ASD_DPRINTK("chip reset for %s\n", pci_name(asd_ha->pcidev)); + asd_chip_hardrst(asd_ha); + sas_ha->notify_ha_event(sas_ha, HAE_RESET); +} + +/* ---------- Done List Routines ---------- */ + +static void asd_dl_tasklet_handler(unsigned long data) +{ + struct asd_ha_struct *asd_ha = (struct asd_ha_struct *) data; + struct asd_seq_data *seq = &asd_ha->seq; + unsigned long flags; + + while (1) { + struct done_list_struct *dl = &seq->dl[seq->dl_next]; + struct asd_ascb *ascb; + + if ((dl->toggle & DL_TOGGLE_MASK) != seq->dl_toggle) + break; + + /* find the aSCB */ + spin_lock_irqsave(&seq->tc_index_lock, flags); + ascb = asd_tc_index_find(seq, (int)le16_to_cpu(dl->index)); + spin_unlock_irqrestore(&seq->tc_index_lock, flags); + if (unlikely(!ascb)) { + ASD_DPRINTK("BUG:sequencer:dl:no ascb?!\n"); + goto next_1; + } else if (ascb->scb->header.opcode == EMPTY_SCB) { + goto out; + } else if (!ascb->uldd_timer && !del_timer(&ascb->timer)) { + goto next_1; + } + spin_lock_irqsave(&seq->pend_q_lock, flags); + list_del_init(&ascb->list); + seq->pending--; + spin_unlock_irqrestore(&seq->pend_q_lock, flags); + out: + ascb->tasklet_complete(ascb, dl); + + next_1: + seq->dl_next = (seq->dl_next + 1) & (ASD_DL_SIZE-1); + if (!seq->dl_next) + seq->dl_toggle ^= DL_TOGGLE_MASK; + } +} + +/* ---------- Interrupt Service Routines ---------- */ + +/** + * asd_process_donelist_isr -- schedule processing of done list entries + * @asd_ha: pointer to host adapter structure + */ +static inline void asd_process_donelist_isr(struct asd_ha_struct *asd_ha) +{ + tasklet_schedule(&asd_ha->seq.dl_tasklet); +} + +/** + * asd_com_sas_isr -- process device communication interrupt (COMINT) + * @asd_ha: pointer to host adapter structure + */ +static inline void asd_com_sas_isr(struct asd_ha_struct *asd_ha) +{ + u32 comstat = asd_read_reg_dword(asd_ha, COMSTAT); + + /* clear COMSTAT int */ + asd_write_reg_dword(asd_ha, COMSTAT, 0xFFFFFFFF); + + if (comstat & CSBUFPERR) { + asd_printk("%s: command/status buffer dma parity error\n", + pci_name(asd_ha->pcidev)); + } else if (comstat & CSERR) { + int i; + u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); + dmaerr &= 0xFF; + asd_printk("%s: command/status dma error, DMAERR: 0x%02x, " + "CSDMAADR: 0x%04x, CSDMAADR+4: 0x%04x\n", + pci_name(asd_ha->pcidev), + dmaerr, + asd_read_reg_dword(asd_ha, CSDMAADR), + asd_read_reg_dword(asd_ha, CSDMAADR+4)); + asd_printk("CSBUFFER:\n"); + for (i = 0; i < 8; i++) { + asd_printk("%08x %08x %08x %08x\n", + asd_read_reg_dword(asd_ha, CSBUFFER), + asd_read_reg_dword(asd_ha, CSBUFFER+4), + asd_read_reg_dword(asd_ha, CSBUFFER+8), + asd_read_reg_dword(asd_ha, CSBUFFER+12)); + } + asd_dump_seq_state(asd_ha, 0); + } else if (comstat & OVLYERR) { + u32 dmaerr = asd_read_reg_dword(asd_ha, DMAERR); + dmaerr = (dmaerr >> 8) & 0xFF; + asd_printk("%s: overlay dma error:0x%x\n", + pci_name(asd_ha->pcidev), + dmaerr); + } + asd_chip_reset(asd_ha); +} + +static inline void asd_arp2_err(struct asd_ha_struct *asd_ha, u32 dchstatus) +{ + static const char *halt_code[256] = { + "UNEXPECTED_INTERRUPT0", + "UNEXPECTED_INTERRUPT1", + "UNEXPECTED_INTERRUPT2", + "UNEXPECTED_INTERRUPT3", + "UNEXPECTED_INTERRUPT4", + "UNEXPECTED_INTERRUPT5", + "UNEXPECTED_INTERRUPT6", + "UNEXPECTED_INTERRUPT7", + "UNEXPECTED_INTERRUPT8", + "UNEXPECTED_INTERRUPT9", + "UNEXPECTED_INTERRUPT10", + [11 ... 19] = "unknown[11,19]", + "NO_FREE_SCB_AVAILABLE", + "INVALID_SCB_OPCODE", + "INVALID_MBX_OPCODE", + "INVALID_ATA_STATE", + "ATA_QUEUE_FULL", + "ATA_TAG_TABLE_FAULT", + "ATA_TAG_MASK_FAULT", + "BAD_LINK_QUEUE_STATE", + "DMA2CHIM_QUEUE_ERROR", + "EMPTY_SCB_LIST_FULL", + "unknown[30]", + "IN_USE_SCB_ON_FREE_LIST", + "BAD_OPEN_WAIT_STATE", + "INVALID_STP_AFFILIATION", + "unknown[34]", + "EXEC_QUEUE_ERROR", + "TOO_MANY_EMPTIES_NEEDED", + "EMPTY_REQ_QUEUE_ERROR", + "Q_MONIRTT_MGMT_ERROR", + "TARGET_MODE_FLOW_ERROR", + "DEVICE_QUEUE_NOT_FOUND", + "START_IRTT_TIMER_ERROR", + "ABORT_TASK_ILLEGAL_REQ", + [43 ... 255] = "unknown[43,255]" + }; + + if (dchstatus & CSEQINT) { + u32 arp2int = asd_read_reg_dword(asd_ha, CARP2INT); + + if (arp2int & (ARP2WAITTO|ARP2ILLOPC|ARP2PERR|ARP2CIOPERR)) { + asd_printk("%s: CSEQ arp2int:0x%x\n", + pci_name(asd_ha->pcidev), + arp2int); + } else if (arp2int & ARP2HALTC) + asd_printk("%s: CSEQ halted: %s\n", + pci_name(asd_ha->pcidev), + halt_code[(arp2int>>16)&0xFF]); + else + asd_printk("%s: CARP2INT:0x%x\n", + pci_name(asd_ha->pcidev), + arp2int); + } + if (dchstatus & LSEQINT_MASK) { + int lseq; + u8 lseq_mask = dchstatus & LSEQINT_MASK; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + u32 arp2int = asd_read_reg_dword(asd_ha, + LmARP2INT(lseq)); + if (arp2int & (ARP2WAITTO | ARP2ILLOPC | ARP2PERR + | ARP2CIOPERR)) { + asd_printk("%s: LSEQ%d arp2int:0x%x\n", + pci_name(asd_ha->pcidev), + lseq, arp2int); + /* XXX we should only do lseq reset */ + } else if (arp2int & ARP2HALTC) + asd_printk("%s: LSEQ%d halted: %s\n", + pci_name(asd_ha->pcidev), + lseq,halt_code[(arp2int>>16)&0xFF]); + else + asd_printk("%s: LSEQ%d ARP2INT:0x%x\n", + pci_name(asd_ha->pcidev), lseq, + arp2int); + } + } + asd_chip_reset(asd_ha); +} + +/** + * asd_dch_sas_isr -- process device channel interrupt (DEVINT) + * @asd_ha: pointer to host adapter structure + */ +static inline void asd_dch_sas_isr(struct asd_ha_struct *asd_ha) +{ + u32 dchstatus = asd_read_reg_dword(asd_ha, DCHSTATUS); + + if (dchstatus & CFIFTOERR) { + asd_printk("%s: CFIFTOERR\n", pci_name(asd_ha->pcidev)); + asd_chip_reset(asd_ha); + } else + asd_arp2_err(asd_ha, dchstatus); +} + +/** + * ads_rbi_exsi_isr -- process external system interface interrupt (INITERR) + * @asd_ha: pointer to host adapter structure + */ +static inline void asd_rbi_exsi_isr(struct asd_ha_struct *asd_ha) +{ + u32 stat0r = asd_read_reg_dword(asd_ha, ASISTAT0R); + + if (!(stat0r & ASIERR)) { + asd_printk("hmm, EXSI interrupted but no error?\n"); + return; + } + + if (stat0r & ASIFMTERR) { + asd_printk("ASI SEEPROM format error for %s\n", + pci_name(asd_ha->pcidev)); + } else if (stat0r & ASISEECHKERR) { + u32 stat1r = asd_read_reg_dword(asd_ha, ASISTAT1R); + asd_printk("ASI SEEPROM checksum 0x%x error for %s\n", + stat1r & CHECKSUM_MASK, + pci_name(asd_ha->pcidev)); + } else { + u32 statr = asd_read_reg_dword(asd_ha, ASIERRSTATR); + + if (!(statr & CPI2ASIMSTERR_MASK)) { + ASD_DPRINTK("hmm, ASIERR?\n"); + return; + } else { + u32 addr = asd_read_reg_dword(asd_ha, ASIERRADDR); + u32 data = asd_read_reg_dword(asd_ha, ASIERRDATAR); + + asd_printk("%s: CPI2 xfer err: addr: 0x%x, wdata: 0x%x, " + "count: 0x%x, byteen: 0x%x, targerr: 0x%x " + "master id: 0x%x, master err: 0x%x\n", + pci_name(asd_ha->pcidev), + addr, data, + (statr & CPI2ASIBYTECNT_MASK) >> 16, + (statr & CPI2ASIBYTEEN_MASK) >> 12, + (statr & CPI2ASITARGERR_MASK) >> 8, + (statr & CPI2ASITARGMID_MASK) >> 4, + (statr & CPI2ASIMSTERR_MASK)); + } + } + asd_chip_reset(asd_ha); +} + +/** + * asd_hst_pcix_isr -- process host interface interrupts + * @asd_ha: pointer to host adapter structure + * + * Asserted on PCIX errors: target abort, etc. + */ +static inline void asd_hst_pcix_isr(struct asd_ha_struct *asd_ha) +{ + u16 status; + u32 pcix_status; + u32 ecc_status; + + pci_read_config_word(asd_ha->pcidev, PCI_STATUS, &status); + pci_read_config_dword(asd_ha->pcidev, PCIX_STATUS, &pcix_status); + pci_read_config_dword(asd_ha->pcidev, ECC_CTRL_STAT, &ecc_status); + + if (status & PCI_STATUS_DETECTED_PARITY) + asd_printk("parity error for %s\n", pci_name(asd_ha->pcidev)); + else if (status & PCI_STATUS_REC_MASTER_ABORT) + asd_printk("master abort for %s\n", pci_name(asd_ha->pcidev)); + else if (status & PCI_STATUS_REC_TARGET_ABORT) + asd_printk("target abort for %s\n", pci_name(asd_ha->pcidev)); + else if (status & PCI_STATUS_PARITY) + asd_printk("data parity for %s\n", pci_name(asd_ha->pcidev)); + else if (pcix_status & RCV_SCE) { + asd_printk("received split completion error for %s\n", + pci_name(asd_ha->pcidev)); + pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); + /* XXX: Abort task? */ + return; + } else if (pcix_status & UNEXP_SC) { + asd_printk("unexpected split completion for %s\n", + pci_name(asd_ha->pcidev)); + pci_write_config_dword(asd_ha->pcidev,PCIX_STATUS,pcix_status); + /* ignore */ + return; + } else if (pcix_status & SC_DISCARD) + asd_printk("split completion discarded for %s\n", + pci_name(asd_ha->pcidev)); + else if (ecc_status & UNCOR_ECCERR) + asd_printk("uncorrectable ECC error for %s\n", + pci_name(asd_ha->pcidev)); + asd_chip_reset(asd_ha); +} + +/** + * asd_hw_isr -- host adapter interrupt service routine + * @irq: ignored + * @dev_id: pointer to host adapter structure + * @regs: ignored + * + * The ISR processes done list entries and level 3 error handling. + */ +irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs) +{ + struct asd_ha_struct *asd_ha = dev_id; + u32 chimint = asd_read_reg_dword(asd_ha, CHIMINT); + + if (!chimint) + return IRQ_NONE; + + asd_write_reg_dword(asd_ha, CHIMINT, chimint); + (void) asd_read_reg_dword(asd_ha, CHIMINT); + + if (chimint & DLAVAIL) + asd_process_donelist_isr(asd_ha); + if (chimint & COMINT) + asd_com_sas_isr(asd_ha); + if (chimint & DEVINT) + asd_dch_sas_isr(asd_ha); + if (chimint & INITERR) + asd_rbi_exsi_isr(asd_ha); + if (chimint & HOSTERR) + asd_hst_pcix_isr(asd_ha); + + return IRQ_HANDLED; +} + +/* ---------- SCB handling ---------- */ + +static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha, + unsigned int gfp_flags) +{ + extern kmem_cache_t *asd_ascb_cache; + struct asd_seq_data *seq = &asd_ha->seq; + struct asd_ascb *ascb; + unsigned long flags; + + ascb = kmem_cache_alloc(asd_ascb_cache, gfp_flags); + + if (ascb) { + memset(ascb, 0, sizeof(*ascb)); + ascb->dma_scb.size = sizeof(struct scb); + ascb->dma_scb.vaddr = dma_pool_alloc(asd_ha->scb_pool, + gfp_flags, + &ascb->dma_scb.dma_handle); + if (!ascb->dma_scb.vaddr) { + kmem_cache_free(asd_ascb_cache, ascb); + return NULL; + } + memset(ascb->dma_scb.vaddr, 0, sizeof(struct scb)); + asd_init_ascb(asd_ha, ascb); + + spin_lock_irqsave(&seq->tc_index_lock, flags); + ascb->tc_index = asd_tc_index_get(seq, ascb); + spin_unlock_irqrestore(&seq->tc_index_lock, flags); + if (ascb->tc_index == -1) + goto undo; + + ascb->scb->header.index = cpu_to_le16((u16)ascb->tc_index); + } + + return ascb; +undo: + dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, + ascb->dma_scb.dma_handle); + kmem_cache_free(asd_ascb_cache, ascb); + ASD_DPRINTK("no index for ascb\n"); + return NULL; +} + +/** + * asd_ascb_alloc_list -- allocate a list of aSCBs + * @asd_ha: pointer to host adapter structure + * @num: pointer to integer number of aSCBs + * @gfp_flags: GFP_ flags. + * + * This is the only function which is used to allocate aSCBs. + * It can allocate one or many. If more than one, then they form + * a linked list in two ways: by their list field of the ascb struct + * and by the next_scb field of the scb_header. + * + * Returns NULL if no memory was available, else pointer to a list + * of ascbs. When this function returns, @num would be the number + * of SCBs which were not able to be allocated, 0 if all requested + * were able to be allocated. + */ +struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct + *asd_ha, int *num, + unsigned int gfp_flags) +{ + struct asd_ascb *first = NULL; + + for ( ; *num > 0; --*num) { + struct asd_ascb *ascb = asd_ascb_alloc(asd_ha, gfp_flags); + + if (!ascb) + break; + else if (!first) + first = ascb; + else { + struct asd_ascb *last = list_entry(first->list.prev, + struct asd_ascb, + list); + list_add_tail(&ascb->list, &first->list); + last->scb->header.next_scb = + cpu_to_le64(((u64)ascb->dma_scb.dma_handle)); + } + } + + return first; +} + +/** + * asd_swap_head_scb -- swap the head scb + * @asd_ha: pointer to host adapter structure + * @ascb: pointer to the head of an ascb list + * + * The sequencer knows the DMA address of the next SCB to be DMAed to + * the host adapter, from initialization or from the last list DMAed. + * seq->next_scb keeps the address of this SCB. The sequencer will + * DMA to the host adapter this list of SCBs. But the head (first + * element) of this list is not known to the sequencer. Here we swap + * the head of the list with the known SCB (memcpy()). + * Only one memcpy() is required per list so it is in our interest + * to keep the list of SCB as long as possible so that the ratio + * of number of memcpy calls to the number of SCB DMA-ed is as small + * as possible. + * + * LOCKING: called with the pending list lock held. + */ +static inline void asd_swap_head_scb(struct asd_ha_struct *asd_ha, + struct asd_ascb *ascb) +{ + struct asd_seq_data *seq = &asd_ha->seq; + struct asd_ascb *last = list_entry(ascb->list.prev, + struct asd_ascb, + list); + struct asd_dma_tok t = ascb->dma_scb; + + memcpy(seq->next_scb.vaddr, ascb->scb, sizeof(*ascb->scb)); + ascb->dma_scb = seq->next_scb; + ascb->scb = ascb->dma_scb.vaddr; + seq->next_scb = t; + last->scb->header.next_scb = + cpu_to_le64(((u64)seq->next_scb.dma_handle)); +} + +/** + * asd_start_timers -- (add and) start timers of SCBs + * @list: pointer to struct list_head of the scbs + * @to: timeout in jiffies + * + * If an SCB in the @list has no timer function, assign the default + * one, then start the timer of the SCB. This function is + * intended to be called from asd_post_ascb_list(), just prior to + * posting the SCBs to the sequencer. + */ +static inline void asd_start_scb_timers(struct list_head *list) +{ + struct asd_ascb *ascb; + list_for_each_entry(ascb, list, list) { + if (!ascb->uldd_timer) { + ascb->timer.data = (unsigned long) ascb; + ascb->timer.function = asd_ascb_timedout; + ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; + add_timer(&ascb->timer); + } + } +} + +/** + * asd_post_ascb_list -- post a list of 1 or more aSCBs to the host adapter + * @asd_ha: pointer to a host adapter structure + * @ascb: pointer to the first aSCB in the list + * @num: number of aSCBs in the list (to be posted) + * + * See queueing comment in asd_post_escb_list(). + * + * Additional note on queuing: In order to minimize the ratio of memcpy() + * to the number of ascbs sent, we try to batch-send as many ascbs as possible + * in one go. + * Two cases are possible: + * A) can_queue >= num, + * B) can_queue < num. + * Case A: we can send the whole batch at once. Increment "pending" + * in the beginning of this function, when it is checked, in order to + * eliminate races when this function is called by multiple processes. + * Case B: should never happen if the managing layer considers + * lldd_queue_size. + */ +int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num) +{ + unsigned long flags; + LIST_HEAD(list); + int can_queue; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + can_queue = asd_ha->hw_prof.max_scbs - asd_ha->seq.pending; + if (can_queue >= num) + asd_ha->seq.pending += num; + else + can_queue = 0; + + if (!can_queue) { + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + asd_printk("%s: scb queue full\n", pci_name(asd_ha->pcidev)); + return -SAS_QUEUE_FULL; + } + + asd_swap_head_scb(asd_ha, ascb); + + __list_add(&list, ascb->list.prev, &ascb->list); + + asd_start_scb_timers(&list); + + asd_ha->seq.scbpro += num; + list_splice_init(&list, asd_ha->seq.pend_q.prev); + asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + return 0; +} + +/** + * asd_post_escb_list -- post a list of 1 or more empty scb + * @asd_ha: pointer to a host adapter structure + * @ascb: pointer to the first empty SCB in the list + * @num: number of aSCBs in the list (to be posted) + * + * This is essentially the same as asd_post_ascb_list, but we do not + * increment pending, add those to the pending list or get indexes. + * See asd_init_escbs() and asd_init_post_escbs(). + * + * Since sending a list of ascbs is a superset of sending a single + * ascb, this function exists to generalize this. More specifically, + * when sending a list of those, we want to do only a _single_ + * memcpy() at swap head, as opposed to for each ascb sent (in the + * case of sending them one by one). That is, we want to minimize the + * ratio of memcpy() operations to the number of ascbs sent. The same + * logic applies to asd_post_ascb_list(). + */ +int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num) +{ + unsigned long flags; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + asd_swap_head_scb(asd_ha, ascb); + asd_ha->seq.scbpro += num; + asd_write_reg_dword(asd_ha, SCBPRO, (u32)asd_ha->seq.scbpro); + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + return 0; +} + +/* ---------- LED ---------- */ + +/** + * asd_turn_led -- turn on/off an LED + * @asd_ha: pointer to host adapter structure + * @phy_id: the PHY id whose LED we want to manupulate + * @op: 1 to turn on, 0 to turn off + */ +void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op) +{ + if (phy_id < ASD_MAX_PHYS) { + u32 v = asd_read_reg_dword(asd_ha, LmCONTROL(phy_id)); + if (op) + v |= LEDPOL; + else + v &= ~LEDPOL; + asd_write_reg_dword(asd_ha, LmCONTROL(phy_id), v); + } +} + +/** + * asd_control_led -- enable/disable an LED on the board + * @asd_ha: pointer to host adapter structure + * @phy_id: integer, the phy id + * @op: integer, 1 to enable, 0 to disable the LED + * + * First we output enable the LED, then we set the source + * to be an external module. + */ +void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op) +{ + if (phy_id < ASD_MAX_PHYS) { + u32 v; + + v = asd_read_reg_dword(asd_ha, GPIOOER); + if (op) + v |= (1 << phy_id); + else + v &= ~(1 << phy_id); + asd_write_reg_dword(asd_ha, GPIOOER, v); + + v = asd_read_reg_dword(asd_ha, GPIOCNFGR); + if (op) + v |= (1 << phy_id); + else + v &= ~(1 << phy_id); + asd_write_reg_dword(asd_ha, GPIOCNFGR, v); + } +} + +/* ---------- PHY enable ---------- */ + +static int asd_enable_phy(struct asd_ha_struct *asd_ha, int phy_id) +{ + struct asd_phy *phy = &asd_ha->phys[phy_id]; + + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, INT_ENABLE_2), 0); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, HOT_PLUG_DELAY), + HOTPLUG_DELAY_TIMEOUT); + + /* Get defaults from manuf. sector */ + /* XXX we need defaults for those in case MS is broken. */ + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_0), + phy->phy_desc->phy_control_0); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_1), + phy->phy_desc->phy_control_1); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_2), + phy->phy_desc->phy_control_2); + asd_write_reg_byte(asd_ha, LmSEQ_OOB_REG(phy_id, PHY_CONTROL_3), + phy->phy_desc->phy_control_3); + + asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(phy_id), + ASD_COMINIT_TIMEOUT); + + asd_write_reg_addr(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(phy_id), + phy->id_frm_tok->dma_handle); + + asd_control_led(asd_ha, phy_id, 1); + + return 0; +} + +int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask) +{ + u8 phy_m; + u8 i; + int num = 0, k; + struct asd_ascb *ascb; + struct asd_ascb *ascb_list; + + if (!phy_mask) { + asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__); + return 0; + } + + for_each_phy(phy_mask, phy_m, i) { + num++; + asd_enable_phy(asd_ha, i); + } + + k = num; + ascb_list = asd_ascb_alloc_list(asd_ha, &k, GFP_KERNEL); + if (!ascb_list) { + asd_printk("no memory for control phy ascb list\n"); + return -ENOMEM; + } + num -= k; + + ascb = ascb_list; + for_each_phy(phy_mask, phy_m, i) { + asd_build_control_phy(ascb, i, ENABLE_PHY); + ascb = list_entry(ascb->list.next, struct asd_ascb, list); + } + ASD_DPRINTK("posting %d control phy scbs\n", num); + k = asd_post_ascb_list(asd_ha, ascb_list, num); + if (k) + asd_ascb_free_list(ascb_list); + + return k; +} diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h new file mode 100644 index 0000000..c7d5053 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_hwi.h @@ -0,0 +1,397 @@ +/* + * Aic94xx SAS/SATA driver hardware interface header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef _AIC94XX_HWI_H_ +#define _AIC94XX_HWI_H_ + +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> + +#include <scsi/libsas.h> + +#include "aic94xx.h" +#include "aic94xx_sas.h" + +/* Define ASD_MAX_PHYS to the maximum phys ever. Currently 8. */ +#define ASD_MAX_PHYS 8 +#define ASD_PCBA_SN_SIZE 12 + +/* Those are to be further named properly, the "RAZORx" part, and + * subsequently included in include/linux/pci_ids.h. + */ +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR10 0x410 +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR12 0x412 +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR1E 0x41E +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR30 0x430 +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR32 0x432 +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR3E 0x43E +#define PCI_DEVICE_ID_ADAPTEC2_RAZOR3F 0x43F + +struct asd_ha_addrspace { + void __iomem *addr; + unsigned long start; /* pci resource start */ + unsigned long len; /* pci resource len */ + unsigned long flags; /* pci resource flags */ + + /* addresses internal to the host adapter */ + u32 swa_base; /* mmspace 1 (MBAR1) uses this only */ + u32 swb_base; + u32 swc_base; +}; + +struct bios_struct { + int present; + u8 maj; + u8 min; + u32 bld; +}; + +struct unit_element_struct { + u16 num; + u16 size; + void *area; +}; + +struct flash_struct { + u32 bar; + int present; + int wide; + u8 manuf; + u8 dev_id; + u8 sec_prot; + + u32 dir_offs; +}; + +struct asd_phy_desc { + /* From CTRL-A settings, then set to what is appropriate */ + u8 sas_addr[SAS_ADDR_SIZE]; + u8 max_sas_lrate; + u8 min_sas_lrate; + u8 max_sata_lrate; + u8 min_sata_lrate; + u8 flags; +#define ASD_CRC_DIS 1 +#define ASD_SATA_SPINUP_HOLD 2 + + u8 phy_control_0; /* mode 5 reg 0x160 */ + u8 phy_control_1; /* mode 5 reg 0x161 */ + u8 phy_control_2; /* mode 5 reg 0x162 */ + u8 phy_control_3; /* mode 5 reg 0x163 */ +}; + +struct asd_dma_tok { + void *vaddr; + dma_addr_t dma_handle; + size_t size; +}; + +struct hw_profile { + struct bios_struct bios; + struct unit_element_struct ue; + struct flash_struct flash; + + u8 sas_addr[SAS_ADDR_SIZE]; + char pcba_sn[ASD_PCBA_SN_SIZE+1]; + + u8 enabled_phys; /* mask of enabled phys */ + struct asd_phy_desc phy_desc[ASD_MAX_PHYS]; + u32 max_scbs; /* absolute sequencer scb queue size */ + struct asd_dma_tok *scb_ext; + u32 max_ddbs; + struct asd_dma_tok *ddb_ext; + + spinlock_t ddb_lock; + void *ddb_bitmap; + + int num_phys; /* ENABLEABLE */ + int max_phys; /* REPORTED + ENABLEABLE */ + + unsigned addr_range; /* max # of addrs; max # of possible ports */ + unsigned port_name_base; + unsigned dev_name_base; + unsigned sata_name_base; +}; + +struct asd_ascb { + struct list_head list; + struct asd_ha_struct *ha; + + struct scb *scb; /* equals dma_scb->vaddr */ + struct asd_dma_tok dma_scb; + struct asd_dma_tok *sg_arr; + + void (*tasklet_complete)(struct asd_ascb *, struct done_list_struct *); + u8 uldd_timer:1; + + /* internally generated command */ + struct timer_list timer; + struct completion completion; + u8 tag_valid:1; + __be16 tag; /* error recovery only */ + + /* If this is an Empty SCB, index of first edb in seq->edb_arr. */ + int edb_index; + + /* Used by the timer timeout function. */ + int tc_index; + + void *uldd_task; +}; + +#define ASD_DL_SIZE_BITS 0x8 +#define ASD_DL_SIZE (1<<(2+ASD_DL_SIZE_BITS)) +#define ASD_DEF_DL_TOGGLE 0x01 + +struct asd_seq_data { + spinlock_t pend_q_lock; + u16 scbpro; + int pending; + struct list_head pend_q; + int can_queue; /* per adapter */ + struct asd_dma_tok next_scb; /* next scb to be delivered to CSEQ */ + + spinlock_t tc_index_lock; + void **tc_index_array; + void *tc_index_bitmap; + int tc_index_bitmap_bits; + + struct tasklet_struct dl_tasklet; + struct done_list_struct *dl; /* array of done list entries, equals */ + struct asd_dma_tok *actual_dl; /* actual_dl->vaddr */ + int dl_toggle; + int dl_next; + + int num_edbs; + struct asd_dma_tok **edb_arr; + int num_escbs; + struct asd_ascb **escb_arr; /* array of pointers to escbs */ +}; + +/* This is the Host Adapter structure. It describes the hardware + * SAS adapter. + */ +struct asd_ha_struct { + struct pci_dev *pcidev; + const char *name; + + struct sas_ha_struct sas_ha; + + u8 revision_id; + + int iospace; + spinlock_t iolock; + struct asd_ha_addrspace io_handle[2]; + + struct hw_profile hw_prof; + + struct asd_phy phys[ASD_MAX_PHYS]; + struct asd_sas_port ports[ASD_MAX_PHYS]; + + struct dma_pool *scb_pool; + + struct asd_seq_data seq; /* sequencer related */ +}; + +/* ---------- Common macros ---------- */ + +#define ASD_BUSADDR_LO(__dma_handle) ((u32)(__dma_handle)) +#define ASD_BUSADDR_HI(__dma_handle) (((sizeof(dma_addr_t))==8) \ + ? ((u32)((__dma_handle) >> 32)) \ + : ((u32)0)) + +#define dev_to_asd_ha(__dev) pci_get_drvdata(to_pci_dev(__dev)) +#define SCB_SITE_VALID(__site_no) (((__site_no) & 0xF0FF) != 0x00FF \ + && ((__site_no) & 0xF0FF) > 0x001F) +/* For each bit set in __lseq_mask, set __lseq to equal the bit + * position of the set bit and execute the statement following. + * __mc is the temporary mask, used as a mask "counter". + */ +#define for_each_sequencer(__lseq_mask, __mc, __lseq) \ + for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\ + if (((__mc) & 1)) +#define for_each_phy(__lseq_mask, __mc, __lseq) \ + for ((__mc)=(__lseq_mask),(__lseq)=0;(__mc)!=0;(__lseq++),(__mc)>>=1)\ + if (((__mc) & 1)) + +#define PHY_ENABLED(_HA, _I) ((_HA)->hw_prof.enabled_phys & (1<<(_I))) + +/* ---------- DMA allocs ---------- */ + +static inline struct asd_dma_tok *asd_dmatok_alloc(unsigned int flags) +{ + return kmem_cache_alloc(asd_dma_token_cache, flags); +} + +static inline void asd_dmatok_free(struct asd_dma_tok *token) +{ + kmem_cache_free(asd_dma_token_cache, token); +} + +static inline struct asd_dma_tok *asd_alloc_coherent(struct asd_ha_struct * + asd_ha, size_t size, + unsigned int flags) +{ + struct asd_dma_tok *token = asd_dmatok_alloc(flags); + if (token) { + token->size = size; + token->vaddr = dma_alloc_coherent(&asd_ha->pcidev->dev, + token->size, + &token->dma_handle, + flags); + if (!token->vaddr) { + asd_dmatok_free(token); + token = NULL; + } + } + return token; +} + +static inline void asd_free_coherent(struct asd_ha_struct *asd_ha, + struct asd_dma_tok *token) +{ + if (token) { + dma_free_coherent(&asd_ha->pcidev->dev, token->size, + token->vaddr, token->dma_handle); + asd_dmatok_free(token); + } +} + +static inline void asd_init_ascb(struct asd_ha_struct *asd_ha, + struct asd_ascb *ascb) +{ + INIT_LIST_HEAD(&ascb->list); + ascb->scb = ascb->dma_scb.vaddr; + ascb->ha = asd_ha; + ascb->timer.function = NULL; + init_timer(&ascb->timer); + ascb->tc_index = -1; + init_completion(&ascb->completion); +} + +/* Must be called with the tc_index_lock held! + */ +static inline void asd_tc_index_release(struct asd_seq_data *seq, int index) +{ + seq->tc_index_array[index] = NULL; + clear_bit(index, seq->tc_index_bitmap); +} + +/* Must be called with the tc_index_lock held! + */ +static inline int asd_tc_index_get(struct asd_seq_data *seq, void *ptr) +{ + int index; + + index = find_first_zero_bit(seq->tc_index_bitmap, + seq->tc_index_bitmap_bits); + if (index == seq->tc_index_bitmap_bits) + return -1; + + seq->tc_index_array[index] = ptr; + set_bit(index, seq->tc_index_bitmap); + + return index; +} + +/* Must be called with the tc_index_lock held! + */ +static inline void *asd_tc_index_find(struct asd_seq_data *seq, int index) +{ + return seq->tc_index_array[index]; +} + +/** + * asd_ascb_free -- free a single aSCB after is has completed + * @ascb: pointer to the aSCB of interest + * + * This frees an aSCB after it has been executed/completed by + * the sequencer. + */ +static inline void asd_ascb_free(struct asd_ascb *ascb) +{ + if (ascb) { + struct asd_ha_struct *asd_ha = ascb->ha; + unsigned long flags; + + BUG_ON(!list_empty(&ascb->list)); + spin_lock_irqsave(&ascb->ha->seq.tc_index_lock, flags); + asd_tc_index_release(&ascb->ha->seq, ascb->tc_index); + spin_unlock_irqrestore(&ascb->ha->seq.tc_index_lock, flags); + dma_pool_free(asd_ha->scb_pool, ascb->dma_scb.vaddr, + ascb->dma_scb.dma_handle); + kmem_cache_free(asd_ascb_cache, ascb); + } +} + +/** + * asd_ascb_list_free -- free a list of ascbs + * @ascb_list: a list of ascbs + * + * This function will free a list of ascbs allocated by asd_ascb_alloc_list. + * It is used when say the scb queueing function returned QUEUE_FULL, + * and we do not need the ascbs any more. + */ +static inline void asd_ascb_free_list(struct asd_ascb *ascb_list) +{ + LIST_HEAD(list); + struct list_head *n, *pos; + + __list_add(&list, ascb_list->list.prev, &ascb_list->list); + list_for_each_safe(pos, n, &list) { + list_del_init(pos); + asd_ascb_free(list_entry(pos, struct asd_ascb, list)); + } +} + +/* ---------- Function declarations ---------- */ + +int asd_init_hw(struct asd_ha_struct *asd_ha); +irqreturn_t asd_hw_isr(int irq, void *dev_id, struct pt_regs *regs); + + +struct asd_ascb *asd_ascb_alloc_list(struct asd_ha_struct + *asd_ha, int *num, + unsigned int gfp_mask); + +int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num); +int asd_post_escb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, + int num); + +int asd_init_post_escbs(struct asd_ha_struct *asd_ha); +void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc); +void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op); +void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op); +int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask); +void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id, + u8 subfunc); + +void asd_ascb_timedout(unsigned long data); +int asd_chip_hardrst(struct asd_ha_struct *asd_ha); + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c new file mode 100644 index 0000000..3ec2e46 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -0,0 +1,860 @@ +/* + * Aic94xx SAS/SATA driver initialization. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/delay.h> + +#include <scsi/scsi_host.h> + +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" +#include "aic94xx_seq.h" + +/* The format is "version.release.patchlevel" */ +#define ASD_DRIVER_VERSION "1.0.2" + +static int use_msi = 0; +module_param_named(use_msi, use_msi, int, S_IRUGO); +MODULE_PARM_DESC(use_msi, "\n" + "\tEnable(1) or disable(0) using PCI MSI.\n" + "\tDefault: 0"); + +static int lldd_max_execute_num = 0; +module_param_named(collector, lldd_max_execute_num, int, S_IRUGO); +MODULE_PARM_DESC(collector, "\n" + "\tIf greater than one, tells the SAS Layer to run in Task Collector\n" + "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n" + "\tThe aic94xx SAS LLDD supports both modes.\n" + "\tDefault: 0 (Direct Mode).\n"); + +char sas_addr_str[2*SAS_ADDR_SIZE + 1] = ""; + +static struct scsi_transport_template *aic94xx_transport_template; + +static struct scsi_host_template aic94xx_sht = { + .module = THIS_MODULE, + /* .name is initialized */ + .name = "aic94xx", + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = sas_slave_configure, + .slave_destroy = sas_slave_destroy, + .change_queue_depth = sas_change_queue_depth, + .change_queue_type = sas_change_queue_type, + .bios_param = sas_bios_param, + .can_queue = 1, + .cmd_per_lun = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, +}; + +static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha) +{ + int err, i; + struct asd_ha_addrspace *io_handle; + + asd_ha->iospace = 0; + for (i = 0; i < 3; i += 2) { + io_handle = &asd_ha->io_handle[i==0?0:1]; + io_handle->start = pci_resource_start(asd_ha->pcidev, i); + io_handle->len = pci_resource_len(asd_ha->pcidev, i); + io_handle->flags = pci_resource_flags(asd_ha->pcidev, i); + err = -ENODEV; + if (!io_handle->start || !io_handle->len) { + asd_printk("MBAR%d start or length for %s is 0.\n", + i==0?0:1, pci_name(asd_ha->pcidev)); + goto Err; + } + err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME); + if (err) { + asd_printk("couldn't reserve memory region for %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + if (io_handle->flags & IORESOURCE_CACHEABLE) + io_handle->addr = ioremap(io_handle->start, + io_handle->len); + else + io_handle->addr = ioremap_nocache(io_handle->start, + io_handle->len); + if (!io_handle->addr) { + asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1, + pci_name(asd_ha->pcidev)); + goto Err_unreq; + } + } + + return 0; +Err_unreq: + pci_release_region(asd_ha->pcidev, i); +Err: + if (i > 0) { + io_handle = &asd_ha->io_handle[0]; + iounmap(io_handle->addr); + pci_release_region(asd_ha->pcidev, 0); + } + return err; +} + +static void __devexit asd_unmap_memio(struct asd_ha_struct *asd_ha) +{ + struct asd_ha_addrspace *io_handle; + + io_handle = &asd_ha->io_handle[1]; + iounmap(io_handle->addr); + pci_release_region(asd_ha->pcidev, 2); + + io_handle = &asd_ha->io_handle[0]; + iounmap(io_handle->addr); + pci_release_region(asd_ha->pcidev, 0); +} + +static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha) +{ + int i = PCI_IOBAR_OFFSET, err; + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; + + asd_ha->iospace = 1; + io_handle->start = pci_resource_start(asd_ha->pcidev, i); + io_handle->len = pci_resource_len(asd_ha->pcidev, i); + io_handle->flags = pci_resource_flags(asd_ha->pcidev, i); + io_handle->addr = (void __iomem *) io_handle->start; + if (!io_handle->start || !io_handle->len) { + asd_printk("couldn't get IO ports for %s\n", + pci_name(asd_ha->pcidev)); + return -ENODEV; + } + err = pci_request_region(asd_ha->pcidev, i, ASD_DRIVER_NAME); + if (err) { + asd_printk("couldn't reserve io space for %s\n", + pci_name(asd_ha->pcidev)); + } + + return err; +} + +static void __devexit asd_unmap_ioport(struct asd_ha_struct *asd_ha) +{ + pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET); +} + +static int __devinit asd_map_ha(struct asd_ha_struct *asd_ha) +{ + int err; + u16 cmd_reg; + + err = pci_read_config_word(asd_ha->pcidev, PCI_COMMAND, &cmd_reg); + if (err) { + asd_printk("couldn't read command register of %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + + err = -ENODEV; + if (cmd_reg & PCI_COMMAND_MEMORY) { + if ((err = asd_map_memio(asd_ha))) + goto Err; + } else if (cmd_reg & PCI_COMMAND_IO) { + if ((err = asd_map_ioport(asd_ha))) + goto Err; + asd_printk("%s ioport mapped -- upgrade your hardware\n", + pci_name(asd_ha->pcidev)); + } else { + asd_printk("no proper device access to %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + + return 0; +Err: + return err; +} + +static void __devexit asd_unmap_ha(struct asd_ha_struct *asd_ha) +{ + if (asd_ha->iospace) + asd_unmap_ioport(asd_ha); + else + asd_unmap_memio(asd_ha); +} + +static const char *asd_dev_rev[30] = { + [0] = "A0", + [1] = "A1", + [8] = "B0", +}; + +static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha) +{ + int err, i; + + err = pci_read_config_byte(asd_ha->pcidev, PCI_REVISION_ID, + &asd_ha->revision_id); + if (err) { + asd_printk("couldn't read REVISION ID register of %s\n", + pci_name(asd_ha->pcidev)); + goto Err; + } + err = -ENODEV; + if (asd_ha->revision_id < AIC9410_DEV_REV_B0) { + asd_printk("%s is revision %s (%X), which is not supported\n", + pci_name(asd_ha->pcidev), + asd_dev_rev[asd_ha->revision_id], + asd_ha->revision_id); + goto Err; + } + /* Provide some sane default values. */ + asd_ha->hw_prof.max_scbs = 512; + asd_ha->hw_prof.max_ddbs = 128; + asd_ha->hw_prof.num_phys = ASD_MAX_PHYS; + /* All phys are enabled, by default. */ + asd_ha->hw_prof.enabled_phys = 0xFF; + for (i = 0; i < ASD_MAX_PHYS; i++) { + asd_ha->hw_prof.phy_desc[i].max_sas_lrate = PHY_LINKRATE_3; + asd_ha->hw_prof.phy_desc[i].min_sas_lrate = PHY_LINKRATE_1_5; + asd_ha->hw_prof.phy_desc[i].max_sata_lrate= PHY_LINKRATE_1_5; + asd_ha->hw_prof.phy_desc[i].min_sata_lrate= PHY_LINKRATE_1_5; + } + + return 0; +Err: + return err; +} + +static int __devinit asd_aic9410_setup(struct asd_ha_struct *asd_ha) +{ + int err = asd_common_setup(asd_ha); + + if (err) + return err; + + asd_ha->hw_prof.addr_range = 8; + asd_ha->hw_prof.port_name_base = 0; + asd_ha->hw_prof.dev_name_base = 8; + asd_ha->hw_prof.sata_name_base = 16; + + return 0; +} + +static int __devinit asd_aic9405_setup(struct asd_ha_struct *asd_ha) +{ + int err = asd_common_setup(asd_ha); + + if (err) + return err; + + asd_ha->hw_prof.addr_range = 4; + asd_ha->hw_prof.port_name_base = 0; + asd_ha->hw_prof.dev_name_base = 4; + asd_ha->hw_prof.sata_name_base = 8; + + return 0; +} + +static ssize_t asd_show_dev_rev(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + return snprintf(buf, PAGE_SIZE, "%s\n", + asd_dev_rev[asd_ha->revision_id]); +} +static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); + +static ssize_t asd_show_dev_bios_build(struct device *dev, + struct device_attribute *attr,char *buf) +{ + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", asd_ha->hw_prof.bios.bld); +} +static DEVICE_ATTR(bios_build, S_IRUGO, asd_show_dev_bios_build, NULL); + +static ssize_t asd_show_dev_pcba_sn(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asd_ha_struct *asd_ha = dev_to_asd_ha(dev); + return snprintf(buf, PAGE_SIZE, "%s\n", asd_ha->hw_prof.pcba_sn); +} +static DEVICE_ATTR(pcba_sn, S_IRUGO, asd_show_dev_pcba_sn, NULL); + +static void asd_create_dev_attrs(struct asd_ha_struct *asd_ha) +{ + device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_create_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); + device_create_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); +} + +static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) +{ + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); +} + +/* The first entry, 0, is used for dynamic ids, the rest for devices + * we know about. + */ +static struct asd_pcidev_struct { + const char * name; + int (*setup)(struct asd_ha_struct *asd_ha); +} asd_pcidev_data[] = { + /* Id 0 is used for dynamic ids. */ + { .name = "Adaptec AIC-94xx SAS/SATA Host Adapter", + .setup = asd_aic9410_setup + }, + { .name = "Adaptec AIC-9410W SAS/SATA Host Adapter", + .setup = asd_aic9410_setup + }, + { .name = "Adaptec AIC-9405W SAS/SATA Host Adapter", + .setup = asd_aic9405_setup + }, +}; + +static inline int asd_create_ha_caches(struct asd_ha_struct *asd_ha) +{ + asd_ha->scb_pool = dma_pool_create(ASD_DRIVER_NAME "_scb_pool", + &asd_ha->pcidev->dev, + sizeof(struct scb), + 8, 0); + if (!asd_ha->scb_pool) { + asd_printk("couldn't create scb pool\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * asd_free_edbs -- free empty data buffers + * asd_ha: pointer to host adapter structure + */ +static inline void asd_free_edbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + for (i = 0; i < seq->num_edbs; i++) + asd_free_coherent(asd_ha, seq->edb_arr[i]); + kfree(seq->edb_arr); + seq->edb_arr = NULL; +} + +static inline void asd_free_escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + for (i = 0; i < seq->num_escbs; i++) { + if (!list_empty(&seq->escb_arr[i]->list)) + list_del_init(&seq->escb_arr[i]->list); + + asd_ascb_free(seq->escb_arr[i]); + } + kfree(seq->escb_arr); + seq->escb_arr = NULL; +} + +static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha) +{ + int i; + + if (asd_ha->hw_prof.ddb_ext) + asd_free_coherent(asd_ha, asd_ha->hw_prof.ddb_ext); + if (asd_ha->hw_prof.scb_ext) + asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext); + + if (asd_ha->hw_prof.ddb_bitmap) + kfree(asd_ha->hw_prof.ddb_bitmap); + asd_ha->hw_prof.ddb_bitmap = NULL; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + struct asd_phy *phy = &asd_ha->phys[i]; + + asd_free_coherent(asd_ha, phy->id_frm_tok); + } + if (asd_ha->seq.escb_arr) + asd_free_escbs(asd_ha); + if (asd_ha->seq.edb_arr) + asd_free_edbs(asd_ha); + if (asd_ha->hw_prof.ue.area) { + kfree(asd_ha->hw_prof.ue.area); + asd_ha->hw_prof.ue.area = NULL; + } + if (asd_ha->seq.tc_index_array) { + kfree(asd_ha->seq.tc_index_array); + kfree(asd_ha->seq.tc_index_bitmap); + asd_ha->seq.tc_index_array = NULL; + asd_ha->seq.tc_index_bitmap = NULL; + } + if (asd_ha->seq.actual_dl) { + asd_free_coherent(asd_ha, asd_ha->seq.actual_dl); + asd_ha->seq.actual_dl = NULL; + asd_ha->seq.dl = NULL; + } + if (asd_ha->seq.next_scb.vaddr) { + dma_pool_free(asd_ha->scb_pool, asd_ha->seq.next_scb.vaddr, + asd_ha->seq.next_scb.dma_handle); + asd_ha->seq.next_scb.vaddr = NULL; + } + dma_pool_destroy(asd_ha->scb_pool); + asd_ha->scb_pool = NULL; +} + +kmem_cache_t *asd_dma_token_cache; +kmem_cache_t *asd_ascb_cache; + +static int asd_create_global_caches(void) +{ + if (!asd_dma_token_cache) { + asd_dma_token_cache + = kmem_cache_create(ASD_DRIVER_NAME "_dma_token", + sizeof(struct asd_dma_tok), + 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!asd_dma_token_cache) { + asd_printk("couldn't create dma token cache\n"); + return -ENOMEM; + } + } + + if (!asd_ascb_cache) { + asd_ascb_cache = kmem_cache_create(ASD_DRIVER_NAME "_ascb", + sizeof(struct asd_ascb), + 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!asd_ascb_cache) { + asd_printk("couldn't create ascb cache\n"); + goto Err; + } + } + + return 0; +Err: + kmem_cache_destroy(asd_dma_token_cache); + asd_dma_token_cache = NULL; + return -ENOMEM; +} + +static void asd_destroy_global_caches(void) +{ + if (asd_dma_token_cache) + kmem_cache_destroy(asd_dma_token_cache); + asd_dma_token_cache = NULL; + + if (asd_ascb_cache) + kmem_cache_destroy(asd_ascb_cache); + asd_ascb_cache = NULL; +} + +static int asd_register_sas_ha(struct asd_ha_struct *asd_ha) +{ + int i; + struct asd_sas_phy **sas_phys = + kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_phy), GFP_KERNEL); + struct asd_sas_port **sas_ports = + kmalloc(ASD_MAX_PHYS * sizeof(struct asd_sas_port), GFP_KERNEL); + + if (!sas_phys || !sas_ports) { + kfree(sas_phys); + kfree(sas_ports); + return -ENOMEM; + } + + asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name; + asd_ha->sas_ha.lldd_module = THIS_MODULE; + asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0]; + + for (i = 0; i < ASD_MAX_PHYS; i++) { + sas_phys[i] = &asd_ha->phys[i].sas_phy; + sas_ports[i] = &asd_ha->ports[i]; + } + + asd_ha->sas_ha.sas_phy = sas_phys; + asd_ha->sas_ha.sas_port= sas_ports; + asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; + + asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue; + + return sas_register_ha(&asd_ha->sas_ha); +} + +static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha) +{ + int err; + + err = sas_unregister_ha(&asd_ha->sas_ha); + + sas_remove_host(asd_ha->sas_ha.core.shost); + scsi_remove_host(asd_ha->sas_ha.core.shost); + scsi_host_put(asd_ha->sas_ha.core.shost); + + kfree(asd_ha->sas_ha.sas_phy); + kfree(asd_ha->sas_ha.sas_port); + + return err; +} + +static int __devinit asd_pci_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + struct asd_pcidev_struct *asd_dev; + unsigned asd_id = (unsigned) id->driver_data; + struct asd_ha_struct *asd_ha; + struct Scsi_Host *shost; + int err; + + if (asd_id >= ARRAY_SIZE(asd_pcidev_data)) { + asd_printk("wrong driver_data in PCI table\n"); + return -ENODEV; + } + + if ((err = pci_enable_device(dev))) { + asd_printk("couldn't enable device %s\n", pci_name(dev)); + return err; + } + + pci_set_master(dev); + + err = -ENOMEM; + + shost = scsi_host_alloc(&aic94xx_sht, sizeof(void *)); + if (!shost) + goto Err; + + asd_dev = &asd_pcidev_data[asd_id]; + + asd_ha = kzalloc(sizeof(*asd_ha), GFP_KERNEL); + if (!asd_ha) { + asd_printk("out of memory\n"); + goto Err; + } + asd_ha->pcidev = dev; + asd_ha->sas_ha.pcidev = asd_ha->pcidev; + asd_ha->sas_ha.lldd_ha = asd_ha; + + asd_ha->name = asd_dev->name; + asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev)); + + SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha; + asd_ha->sas_ha.core.shost = shost; + shost->transportt = aic94xx_transport_template; + shost->max_id = ~0; + shost->max_lun = ~0; + shost->max_cmd_len = 16; + + err = scsi_add_host(shost, &dev->dev); + if (err) { + scsi_host_put(shost); + goto Err_free; + } + + + + err = asd_dev->setup(asd_ha); + if (err) + goto Err_free; + + err = -ENODEV; + if (!pci_set_dma_mask(dev, DMA_64BIT_MASK) + && !pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK)) + ; + else if (!pci_set_dma_mask(dev, DMA_32BIT_MASK) + && !pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) + ; + else { + asd_printk("no suitable DMA mask for %s\n", pci_name(dev)); + goto Err_free; + } + + pci_set_drvdata(dev, asd_ha); + + err = asd_map_ha(asd_ha); + if (err) + goto Err_free; + + err = asd_create_ha_caches(asd_ha); + if (err) + goto Err_unmap; + + err = asd_init_hw(asd_ha); + if (err) + goto Err_free_cache; + + asd_printk("device %s: SAS addr %llx, PCBA SN %s, %d phys, %d enabled " + "phys, flash %s, BIOS %s%d\n", + pci_name(dev), SAS_ADDR(asd_ha->hw_prof.sas_addr), + asd_ha->hw_prof.pcba_sn, asd_ha->hw_prof.max_phys, + asd_ha->hw_prof.num_phys, + asd_ha->hw_prof.flash.present ? "present" : "not present", + asd_ha->hw_prof.bios.present ? "build " : "not present", + asd_ha->hw_prof.bios.bld); + + if (use_msi) + pci_enable_msi(asd_ha->pcidev); + + err = request_irq(asd_ha->pcidev->irq, asd_hw_isr, SA_SHIRQ, + ASD_DRIVER_NAME, asd_ha); + if (err) { + asd_printk("couldn't get irq %d for %s\n", + asd_ha->pcidev->irq, pci_name(asd_ha->pcidev)); + goto Err_irq; + } + asd_enable_ints(asd_ha); + + err = asd_init_post_escbs(asd_ha); + if (err) { + asd_printk("couldn't post escbs for %s\n", + pci_name(asd_ha->pcidev)); + goto Err_escbs; + } + ASD_DPRINTK("escbs posted\n"); + + asd_create_dev_attrs(asd_ha); + + err = asd_register_sas_ha(asd_ha); + if (err) + goto Err_reg_sas; + + err = asd_enable_phys(asd_ha, asd_ha->hw_prof.enabled_phys); + if (err) { + asd_printk("coudln't enable phys, err:%d\n", err); + goto Err_en_phys; + } + ASD_DPRINTK("enabled phys\n"); + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + ssleep(1); + /* Wait for discovery to finish */ + scsi_flush_work(asd_ha->sas_ha.core.shost); + + return 0; +Err_en_phys: + asd_unregister_sas_ha(asd_ha); +Err_reg_sas: + asd_remove_dev_attrs(asd_ha); +Err_escbs: + asd_disable_ints(asd_ha); + free_irq(dev->irq, asd_ha); +Err_irq: + if (use_msi) + pci_disable_msi(dev); + asd_chip_hardrst(asd_ha); +Err_free_cache: + asd_destroy_ha_caches(asd_ha); +Err_unmap: + asd_unmap_ha(asd_ha); +Err_free: + kfree(asd_ha); + scsi_remove_host(shost); +Err: + pci_disable_device(dev); + return err; +} + +static void asd_free_queues(struct asd_ha_struct *asd_ha) +{ + unsigned long flags; + LIST_HEAD(pending); + struct list_head *n, *pos; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + asd_ha->seq.pending = 0; + list_splice_init(&asd_ha->seq.pend_q, &pending); + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + if (!list_empty(&pending)) + ASD_DPRINTK("Uh-oh! Pending is not empty!\n"); + + list_for_each_safe(pos, n, &pending) { + struct asd_ascb *ascb = list_entry(pos, struct asd_ascb, list); + list_del_init(pos); + ASD_DPRINTK("freeing from pending\n"); + asd_ascb_free(ascb); + } +} + +static void asd_turn_off_leds(struct asd_ha_struct *asd_ha) +{ + u8 phy_mask = asd_ha->hw_prof.enabled_phys; + u8 i; + + for_each_phy(phy_mask, phy_mask, i) { + asd_turn_led(asd_ha, i, 0); + asd_control_led(asd_ha, i, 0); + } +} + +static void __devexit asd_pci_remove(struct pci_dev *dev) +{ + struct asd_ha_struct *asd_ha = pci_get_drvdata(dev); + + if (!asd_ha) + return; + + asd_unregister_sas_ha(asd_ha); + + asd_disable_ints(asd_ha); + + asd_remove_dev_attrs(asd_ha); + + /* XXX more here as needed */ + + free_irq(dev->irq, asd_ha); + if (use_msi) + pci_disable_msi(asd_ha->pcidev); + asd_turn_off_leds(asd_ha); + asd_chip_hardrst(asd_ha); + asd_free_queues(asd_ha); + asd_destroy_ha_caches(asd_ha); + asd_unmap_ha(asd_ha); + kfree(asd_ha); + pci_disable_device(dev); + return; +} + +static ssize_t asd_version_show(struct device_driver *driver, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", ASD_DRIVER_VERSION); +} +static DRIVER_ATTR(version, S_IRUGO, asd_version_show, NULL); + +static void asd_create_driver_attrs(struct device_driver *driver) +{ + driver_create_file(driver, &driver_attr_version); +} + +static void asd_remove_driver_attrs(struct device_driver *driver) +{ + driver_remove_file(driver, &driver_attr_version); +} + +static struct sas_domain_function_template aic94xx_transport_functions = { + .lldd_port_formed = asd_update_port_links, + + .lldd_dev_found = asd_dev_found, + .lldd_dev_gone = asd_dev_gone, + + .lldd_execute_task = asd_execute_task, + + .lldd_abort_task = asd_abort_task, + .lldd_abort_task_set = asd_abort_task_set, + .lldd_clear_aca = asd_clear_aca, + .lldd_clear_task_set = asd_clear_task_set, + .lldd_I_T_nexus_reset = NULL, + .lldd_lu_reset = asd_lu_reset, + .lldd_query_task = asd_query_task, + + .lldd_clear_nexus_port = asd_clear_nexus_port, + .lldd_clear_nexus_ha = asd_clear_nexus_ha, + + .lldd_control_phy = asd_control_phy, +}; + +static const struct pci_device_id aic94xx_pci_table[] __devinitdata = { + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR10), + 0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR12), + 0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR1E), + 0, 0, 1}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR30), + 0, 0, 2}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR32), + 0, 0, 2}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR3E), + 0, 0, 2}, + {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_RAZOR3F), + 0, 0, 2}, + {} +}; + +MODULE_DEVICE_TABLE(pci, aic94xx_pci_table); + +static struct pci_driver aic94xx_pci_driver = { + .name = ASD_DRIVER_NAME, + .id_table = aic94xx_pci_table, + .probe = asd_pci_probe, + .remove = __devexit_p(asd_pci_remove), +}; + +static int __init aic94xx_init(void) +{ + int err; + + + asd_printk("%s version %s loaded\n", ASD_DRIVER_DESCRIPTION, + ASD_DRIVER_VERSION); + + err = asd_create_global_caches(); + if (err) + return err; + + aic94xx_transport_template = + sas_domain_attach_transport(&aic94xx_transport_functions); + if (err) + goto out_destroy_caches; + + err = pci_register_driver(&aic94xx_pci_driver); + if (err) + goto out_release_transport; + + asd_create_driver_attrs(&aic94xx_pci_driver.driver); + + return err; + + out_release_transport: + sas_release_transport(aic94xx_transport_template); + out_destroy_caches: + asd_destroy_global_caches(); + + return err; +} + +static void __exit aic94xx_exit(void) +{ + asd_remove_driver_attrs(&aic94xx_pci_driver.driver); + pci_unregister_driver(&aic94xx_pci_driver); + sas_release_transport(aic94xx_transport_template); + asd_destroy_global_caches(); + asd_printk("%s version %s unloaded\n", ASD_DRIVER_DESCRIPTION, + ASD_DRIVER_VERSION); +} + +module_init(aic94xx_init); +module_exit(aic94xx_exit); + +MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>"); +MODULE_DESCRIPTION(ASD_DRIVER_DESCRIPTION); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(ASD_DRIVER_VERSION); diff --git a/drivers/scsi/aic94xx/aic94xx_reg.c b/drivers/scsi/aic94xx/aic94xx_reg.c new file mode 100644 index 0000000..f210dac --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_reg.c @@ -0,0 +1,332 @@ +/* + * Aic94xx SAS/SATA driver register access. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <linux/pci.h> +#include "aic94xx_reg.h" +#include "aic94xx.h" + +/* Writing to device address space. + * Offset comes before value to remind that the operation of + * this function is *offs = val. + */ +static inline void asd_write_byte(struct asd_ha_struct *asd_ha, + unsigned long offs, u8 val) +{ + if (unlikely(asd_ha->iospace)) + outb(val, + (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); + else + writeb(val, asd_ha->io_handle[0].addr + offs); + wmb(); +} + +static inline void asd_write_word(struct asd_ha_struct *asd_ha, + unsigned long offs, u16 val) +{ + if (unlikely(asd_ha->iospace)) + outw(val, + (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); + else + writew(val, asd_ha->io_handle[0].addr + offs); + wmb(); +} + +static inline void asd_write_dword(struct asd_ha_struct *asd_ha, + unsigned long offs, u32 val) +{ + if (unlikely(asd_ha->iospace)) + outl(val, + (unsigned long)asd_ha->io_handle[0].addr + (offs & 0xFF)); + else + writel(val, asd_ha->io_handle[0].addr + offs); + wmb(); +} + +/* Reading from device address space. + */ +static inline u8 asd_read_byte(struct asd_ha_struct *asd_ha, + unsigned long offs) +{ + u8 val; + if (unlikely(asd_ha->iospace)) + val = inb((unsigned long) asd_ha->io_handle[0].addr + + (offs & 0xFF)); + else + val = readb(asd_ha->io_handle[0].addr + offs); + rmb(); + return val; +} + +static inline u16 asd_read_word(struct asd_ha_struct *asd_ha, + unsigned long offs) +{ + u16 val; + if (unlikely(asd_ha->iospace)) + val = inw((unsigned long)asd_ha->io_handle[0].addr + + (offs & 0xFF)); + else + val = readw(asd_ha->io_handle[0].addr + offs); + rmb(); + return val; +} + +static inline u32 asd_read_dword(struct asd_ha_struct *asd_ha, + unsigned long offs) +{ + u32 val; + if (unlikely(asd_ha->iospace)) + val = inl((unsigned long) asd_ha->io_handle[0].addr + + (offs & 0xFF)); + else + val = readl(asd_ha->io_handle[0].addr + offs); + rmb(); + return val; +} + +static inline u32 asd_mem_offs_swa(void) +{ + return 0; +} + +static inline u32 asd_mem_offs_swc(void) +{ + return asd_mem_offs_swa() + MBAR0_SWA_SIZE; +} + +static inline u32 asd_mem_offs_swb(void) +{ + return asd_mem_offs_swc() + MBAR0_SWC_SIZE + 0x20; +} + +/* We know that the register wanted is in the range + * of the sliding window. + */ +#define ASD_READ_SW(ww, type, ord) \ +static inline type asd_read_##ww##_##ord (struct asd_ha_struct *asd_ha,\ + u32 reg) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ + u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\ + return asd_read_##ord (asd_ha, (unsigned long) map_offs); \ +} + +#define ASD_WRITE_SW(ww, type, ord) \ +static inline void asd_write_##ww##_##ord (struct asd_ha_struct *asd_ha,\ + u32 reg, type val) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0]; \ + u32 map_offs=(reg - io_handle-> ww##_base )+asd_mem_offs_##ww ();\ + asd_write_##ord (asd_ha, (unsigned long) map_offs, val); \ +} + +ASD_READ_SW(swa, u8, byte); +ASD_READ_SW(swa, u16, word); +ASD_READ_SW(swa, u32, dword); + +ASD_READ_SW(swb, u8, byte); +ASD_READ_SW(swb, u16, word); +ASD_READ_SW(swb, u32, dword); + +ASD_READ_SW(swc, u8, byte); +ASD_READ_SW(swc, u16, word); +ASD_READ_SW(swc, u32, dword); + +ASD_WRITE_SW(swa, u8, byte); +ASD_WRITE_SW(swa, u16, word); +ASD_WRITE_SW(swa, u32, dword); + +ASD_WRITE_SW(swb, u8, byte); +ASD_WRITE_SW(swb, u16, word); +ASD_WRITE_SW(swb, u32, dword); + +ASD_WRITE_SW(swc, u8, byte); +ASD_WRITE_SW(swc, u16, word); +ASD_WRITE_SW(swc, u32, dword); + +/* + * A word about sliding windows: + * MBAR0 is divided into sliding windows A, C and B, in that order. + * SWA starts at offset 0 of MBAR0, up to 0x57, with size 0x58 bytes. + * SWC starts at offset 0x58 of MBAR0, up to 0x60, with size 0x8 bytes. + * From 0x60 to 0x7F, we have a copy of PCI config space 0x60-0x7F. + * SWB starts at offset 0x80 of MBAR0 and extends to the end of MBAR0. + * See asd_init_sw() in aic94xx_hwi.c + * + * We map the most common registers we'd access of the internal 4GB + * host adapter memory space. If a register/internal memory location + * is wanted which is not mapped, we slide SWB, by paging it, + * see asd_move_swb() in aic94xx_reg.c. + */ + +/** + * asd_move_swb -- move sliding window B + * @asd_ha: pointer to host adapter structure + * @reg: register desired to be within range of the new window + */ +static inline void asd_move_swb(struct asd_ha_struct *asd_ha, u32 reg) +{ + u32 base = reg & ~(MBAR0_SWB_SIZE-1); + pci_write_config_dword(asd_ha->pcidev, PCI_CONF_MBAR0_SWB, base); + asd_ha->io_handle[0].swb_base = base; +} + +static void __asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val) +{ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); + if (io_handle->swa_base <= reg + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) + asd_write_swa_byte (asd_ha, reg,val); + else if (io_handle->swb_base <= reg + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) + asd_write_swb_byte (asd_ha, reg, val); + else if (io_handle->swc_base <= reg + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) + asd_write_swc_byte (asd_ha, reg, val); + else { + /* Ok, we have to move SWB */ + asd_move_swb(asd_ha, reg); + asd_write_swb_byte (asd_ha, reg, val); + } +} + +#define ASD_WRITE_REG(type, ord) \ +void asd_write_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg, type val)\ +{ \ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \ + unsigned long flags; \ + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \ + spin_lock_irqsave(&asd_ha->iolock, flags); \ + if (io_handle->swa_base <= reg \ + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \ + asd_write_swa_##ord (asd_ha, reg,val); \ + else if (io_handle->swb_base <= reg \ + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \ + asd_write_swb_##ord (asd_ha, reg, val); \ + else if (io_handle->swc_base <= reg \ + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \ + asd_write_swc_##ord (asd_ha, reg, val); \ + else { \ + /* Ok, we have to move SWB */ \ + asd_move_swb(asd_ha, reg); \ + asd_write_swb_##ord (asd_ha, reg, val); \ + } \ + spin_unlock_irqrestore(&asd_ha->iolock, flags); \ +} + +ASD_WRITE_REG(u8, byte); +ASD_WRITE_REG(u16,word); +ASD_WRITE_REG(u32,dword); + +static u8 __asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg) +{ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; + u8 val; + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); + if (io_handle->swa_base <= reg + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) + val = asd_read_swa_byte (asd_ha, reg); + else if (io_handle->swb_base <= reg + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) + val = asd_read_swb_byte (asd_ha, reg); + else if (io_handle->swc_base <= reg + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) + val = asd_read_swc_byte (asd_ha, reg); + else { + /* Ok, we have to move SWB */ + asd_move_swb(asd_ha, reg); + val = asd_read_swb_byte (asd_ha, reg); + } + return val; +} + +#define ASD_READ_REG(type, ord) \ +type asd_read_reg_##ord (struct asd_ha_struct *asd_ha, u32 reg) \ +{ \ + struct asd_ha_addrspace *io_handle=&asd_ha->io_handle[0]; \ + type val; \ + unsigned long flags; \ + BUG_ON(reg >= 0xC0000000 || reg < ALL_BASE_ADDR); \ + spin_lock_irqsave(&asd_ha->iolock, flags); \ + if (io_handle->swa_base <= reg \ + && reg < io_handle->swa_base + MBAR0_SWA_SIZE) \ + val = asd_read_swa_##ord (asd_ha, reg); \ + else if (io_handle->swb_base <= reg \ + && reg < io_handle->swb_base + MBAR0_SWB_SIZE) \ + val = asd_read_swb_##ord (asd_ha, reg); \ + else if (io_handle->swc_base <= reg \ + && reg < io_handle->swc_base + MBAR0_SWC_SIZE) \ + val = asd_read_swc_##ord (asd_ha, reg); \ + else { \ + /* Ok, we have to move SWB */ \ + asd_move_swb(asd_ha, reg); \ + val = asd_read_swb_##ord (asd_ha, reg); \ + } \ + spin_unlock_irqrestore(&asd_ha->iolock, flags); \ + return val; \ +} + +ASD_READ_REG(u8, byte); +ASD_READ_REG(u16,word); +ASD_READ_REG(u32,dword); + +/** + * asd_read_reg_string -- read a string of bytes from io space memory + * @asd_ha: pointer to host adapter structure + * @dst: pointer to a destination buffer where data will be written to + * @offs: start offset (register) to read from + * @count: number of bytes to read + */ +void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst, + u32 offs, int count) +{ + u8 *p = dst; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->iolock, flags); + for ( ; count > 0; count--, offs++, p++) + *p = __asd_read_reg_byte(asd_ha, offs); + spin_unlock_irqrestore(&asd_ha->iolock, flags); +} + +/** + * asd_write_reg_string -- write a string of bytes to io space memory + * @asd_ha: pointer to host adapter structure + * @src: pointer to source buffer where data will be read from + * @offs: start offset (register) to write to + * @count: number of bytes to write + */ +void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src, + u32 offs, int count) +{ + u8 *p = src; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->iolock, flags); + for ( ; count > 0; count--, offs++, p++) + __asd_write_reg_byte(asd_ha, offs, *p); + spin_unlock_irqrestore(&asd_ha->iolock, flags); +} diff --git a/drivers/scsi/aic94xx/aic94xx_reg.h b/drivers/scsi/aic94xx/aic94xx_reg.h new file mode 100644 index 0000000..2279307 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_reg.h @@ -0,0 +1,302 @@ +/* + * Aic94xx SAS/SATA driver hardware registers definitions. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef _AIC94XX_REG_H_ +#define _AIC94XX_REG_H_ + +#include <asm/io.h> +#include "aic94xx_hwi.h" + +/* Values */ +#define AIC9410_DEV_REV_B0 0x8 + +/* MBAR0, SWA, SWB, SWC, internal memory space addresses */ +#define REG_BASE_ADDR 0xB8000000 +#define REG_BASE_ADDR_CSEQCIO 0xB8002000 +#define REG_BASE_ADDR_EXSI 0xB8042800 + +#define MBAR0_SWA_SIZE 0x58 +extern u32 MBAR0_SWB_SIZE; +#define MBAR0_SWC_SIZE 0x8 + +/* MBAR1, points to On Chip Memory */ +#define OCM_BASE_ADDR 0xA0000000 +#define OCM_MAX_SIZE 0x20000 + +/* Smallest address possible to reference */ +#define ALL_BASE_ADDR OCM_BASE_ADDR + +/* PCI configuration space registers */ +#define PCI_IOBAR_OFFSET 4 + +#define PCI_CONF_MBAR1 0x6C +#define PCI_CONF_MBAR0_SWA 0x70 +#define PCI_CONF_MBAR0_SWB 0x74 +#define PCI_CONF_MBAR0_SWC 0x78 +#define PCI_CONF_MBAR_KEY 0x7C +#define PCI_CONF_FLSH_BAR 0xB8 + +#include "aic94xx_reg_def.h" + +u8 asd_read_reg_byte(struct asd_ha_struct *asd_ha, u32 reg); +u16 asd_read_reg_word(struct asd_ha_struct *asd_ha, u32 reg); +u32 asd_read_reg_dword(struct asd_ha_struct *asd_ha, u32 reg); + +void asd_write_reg_byte(struct asd_ha_struct *asd_ha, u32 reg, u8 val); +void asd_write_reg_word(struct asd_ha_struct *asd_ha, u32 reg, u16 val); +void asd_write_reg_dword(struct asd_ha_struct *asd_ha, u32 reg, u32 val); + +void asd_read_reg_string(struct asd_ha_struct *asd_ha, void *dst, + u32 offs, int count); +void asd_write_reg_string(struct asd_ha_struct *asd_ha, void *src, + u32 offs, int count); + +#define ASD_READ_OCM(type, ord, S) \ +static inline type asd_read_ocm_##ord (struct asd_ha_struct *asd_ha, \ + u32 offs) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \ + type val = read##S (io_handle->addr + (unsigned long) offs); \ + rmb(); \ + return val; \ +} + +ASD_READ_OCM(u8, byte, b); +ASD_READ_OCM(u16,word, w); +ASD_READ_OCM(u32,dword,l); + +#define ASD_WRITE_OCM(type, ord, S) \ +static inline void asd_write_ocm_##ord (struct asd_ha_struct *asd_ha, \ + u32 offs, type val) \ +{ \ + struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[1]; \ + write##S (val, io_handle->addr + (unsigned long) offs); \ + return; \ +} + +ASD_WRITE_OCM(u8, byte, b); +ASD_WRITE_OCM(u16,word, w); +ASD_WRITE_OCM(u32,dword,l); + +#define ASD_DDBSITE_READ(type, ord) \ +static inline type asd_ddbsite_read_##ord (struct asd_ha_struct *asd_ha, \ + u16 ddb_site_no, \ + u16 offs) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \ + asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \ + return asd_read_reg_##ord (asd_ha, CTXACCESS); \ +} + +ASD_DDBSITE_READ(u32, dword); +ASD_DDBSITE_READ(u16, word); + +static inline u8 asd_ddbsite_read_byte(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, + u16 offs) +{ + if (offs & 1) + return asd_ddbsite_read_word(asd_ha, ddb_site_no, + offs & ~1) >> 8; + else + return asd_ddbsite_read_word(asd_ha, ddb_site_no, + offs) & 0xFF; +} + + +#define ASD_DDBSITE_WRITE(type, ord) \ +static inline void asd_ddbsite_write_##ord (struct asd_ha_struct *asd_ha, \ + u16 ddb_site_no, \ + u16 offs, type val) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnDDB_SITE + offs); \ + asd_write_reg_word(asd_ha, ADDBPTR, ddb_site_no); \ + asd_write_reg_##ord (asd_ha, CTXACCESS, val); \ +} + +ASD_DDBSITE_WRITE(u32, dword); +ASD_DDBSITE_WRITE(u16, word); + +static inline void asd_ddbsite_write_byte(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, + u16 offs, u8 val) +{ + u16 base = offs & ~1; + u16 rval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base); + if (offs & 1) + rval = (val << 8) | (rval & 0xFF); + else + rval = (rval & 0xFF00) | val; + asd_ddbsite_write_word(asd_ha, ddb_site_no, base, rval); +} + + +#define ASD_SCBSITE_READ(type, ord) \ +static inline type asd_scbsite_read_##ord (struct asd_ha_struct *asd_ha, \ + u16 scb_site_no, \ + u16 offs) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \ + asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \ + return asd_read_reg_##ord (asd_ha, CTXACCESS); \ +} + +ASD_SCBSITE_READ(u32, dword); +ASD_SCBSITE_READ(u16, word); + +static inline u8 asd_scbsite_read_byte(struct asd_ha_struct *asd_ha, + u16 scb_site_no, + u16 offs) +{ + if (offs & 1) + return asd_scbsite_read_word(asd_ha, scb_site_no, + offs & ~1) >> 8; + else + return asd_scbsite_read_word(asd_ha, scb_site_no, + offs) & 0xFF; +} + + +#define ASD_SCBSITE_WRITE(type, ord) \ +static inline void asd_scbsite_write_##ord (struct asd_ha_struct *asd_ha, \ + u16 scb_site_no, \ + u16 offs, type val) \ +{ \ + asd_write_reg_word(asd_ha, ALTCIOADR, MnSCB_SITE + offs); \ + asd_write_reg_word(asd_ha, ASCBPTR, scb_site_no); \ + asd_write_reg_##ord (asd_ha, CTXACCESS, val); \ +} + +ASD_SCBSITE_WRITE(u32, dword); +ASD_SCBSITE_WRITE(u16, word); + +static inline void asd_scbsite_write_byte(struct asd_ha_struct *asd_ha, + u16 scb_site_no, + u16 offs, u8 val) +{ + u16 base = offs & ~1; + u16 rval = asd_scbsite_read_word(asd_ha, scb_site_no, base); + if (offs & 1) + rval = (val << 8) | (rval & 0xFF); + else + rval = (rval & 0xFF00) | val; + asd_scbsite_write_word(asd_ha, scb_site_no, base, rval); +} + +/** + * asd_ddbsite_update_word -- atomically update a word in a ddb site + * @asd_ha: pointer to host adapter structure + * @ddb_site_no: the DDB site number + * @offs: the offset into the DDB + * @oldval: old value found in that offset + * @newval: the new value to replace it + * + * This function is used when the sequencers are running and we need to + * update a DDB site atomically without expensive pausing and upausing + * of the sequencers and accessing the DDB site through the CIO bus. + * + * Return 0 on success; -EFAULT on parity error; -EAGAIN if the old value + * is different than the current value at that offset. + */ +static inline int asd_ddbsite_update_word(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, u16 offs, + u16 oldval, u16 newval) +{ + u8 done; + u16 oval = asd_ddbsite_read_word(asd_ha, ddb_site_no, offs); + if (oval != oldval) + return -EAGAIN; + asd_write_reg_word(asd_ha, AOLDDATA, oldval); + asd_write_reg_word(asd_ha, ANEWDATA, newval); + do { + done = asd_read_reg_byte(asd_ha, ATOMICSTATCTL); + } while (!(done & ATOMICDONE)); + if (done & ATOMICERR) + return -EFAULT; /* parity error */ + else if (done & ATOMICWIN) + return 0; /* success */ + else + return -EAGAIN; /* oldval different than current value */ +} + +static inline int asd_ddbsite_update_byte(struct asd_ha_struct *asd_ha, + u16 ddb_site_no, u16 offs, + u8 _oldval, u8 _newval) +{ + u16 base = offs & ~1; + u16 oval; + u16 nval = asd_ddbsite_read_word(asd_ha, ddb_site_no, base); + if (offs & 1) { + if ((nval >> 8) != _oldval) + return -EAGAIN; + nval = (_newval << 8) | (nval & 0xFF); + oval = (_oldval << 8) | (nval & 0xFF); + } else { + if ((nval & 0xFF) != _oldval) + return -EAGAIN; + nval = (nval & 0xFF00) | _newval; + oval = (nval & 0xFF00) | _oldval; + } + return asd_ddbsite_update_word(asd_ha, ddb_site_no, base, oval, nval); +} + +static inline void asd_write_reg_addr(struct asd_ha_struct *asd_ha, u32 reg, + dma_addr_t dma_handle) +{ + asd_write_reg_dword(asd_ha, reg, ASD_BUSADDR_LO(dma_handle)); + asd_write_reg_dword(asd_ha, reg+4, ASD_BUSADDR_HI(dma_handle)); +} + +static inline u32 asd_get_cmdctx_size(struct asd_ha_struct *asd_ha) +{ + /* DCHREVISION returns 0, possibly broken */ + u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE; + return ctxmemsize ? 65536 : 32768; +} + +static inline u32 asd_get_devctx_size(struct asd_ha_struct *asd_ha) +{ + u32 ctxmemsize = asd_read_reg_dword(asd_ha, LmMnINT(0,0)) & CTXMEMSIZE; + return ctxmemsize ? 8192 : 4096; +} + +static inline void asd_disable_ints(struct asd_ha_struct *asd_ha) +{ + asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN); +} + +static inline void asd_enable_ints(struct asd_ha_struct *asd_ha) +{ + /* Enable COM SAS interrupt on errors, COMSTAT */ + asd_write_reg_dword(asd_ha, COMSTATEN, + EN_CSBUFPERR | EN_CSERR | EN_OVLYERR); + /* Enable DCH SAS CFIFTOERR */ + asd_write_reg_dword(asd_ha, DCHSTATUS, EN_CFIFTOERR); + /* Enable Host Device interrupts */ + asd_write_reg_dword(asd_ha, CHIMINTEN, SET_CHIMINTEN); +} + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h new file mode 100644 index 0000000..b79f45f --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h @@ -0,0 +1,2398 @@ +/* + * Aic94xx SAS/SATA driver hardware registers defintions. + * + * Copyright (C) 2004 Adaptec, Inc. All rights reserved. + * Copyright (C) 2004 David Chaw <david_chaw@adaptec.com> + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * Luben Tuikov: Some register value updates to make it work with the window + * agnostic register r/w functions. Some register corrections, sizes, + * etc. + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * $Id: //depot/aic94xx/aic94xx_reg_def.h#27 $ + * + */ + +#ifndef _ADP94XX_REG_DEF_H_ +#define _ADP94XX_REG_DEF_H_ + +/* + * Common definitions. + */ +#define CSEQ_MODE_PAGE_SIZE 0x200 /* CSEQ mode page size */ +#define LmSEQ_MODE_PAGE_SIZE 0x200 /* LmSEQ mode page size */ +#define LmSEQ_HOST_REG_SIZE 0x4000 /* LmSEQ Host Register size */ + +/********************* COM_SAS registers definition *************************/ + +/* The base is REG_BASE_ADDR, defined in aic94xx_reg.h. + */ + +/* + * CHIM Registers, Address Range : (0x00-0xFF) + */ +#define COMBIST (REG_BASE_ADDR + 0x00) + +/* bits 31:24 */ +#define L7BLKRST 0x80000000 +#define L6BLKRST 0x40000000 +#define L5BLKRST 0x20000000 +#define L4BLKRST 0x10000000 +#define L3BLKRST 0x08000000 +#define L2BLKRST 0x04000000 +#define L1BLKRST 0x02000000 +#define L0BLKRST 0x01000000 +#define LmBLKRST 0xFF000000 +#define LmBLKRST_COMBIST(phyid) (1 << (24 + phyid)) + +#define OCMBLKRST 0x00400000 +#define CTXMEMBLKRST 0x00200000 +#define CSEQBLKRST 0x00100000 +#define EXSIBLKRST 0x00040000 +#define DPIBLKRST 0x00020000 +#define DFIFBLKRST 0x00010000 +#define HARDRST 0x00000200 +#define COMBLKRST 0x00000100 +#define FRCDFPERR 0x00000080 +#define FRCCIOPERR 0x00000020 +#define FRCBISTERR 0x00000010 +#define COMBISTEN 0x00000004 +#define COMBISTDONE 0x00000002 /* ro */ +#define COMBISTFAIL 0x00000001 /* ro */ + +#define COMSTAT (REG_BASE_ADDR + 0x04) + +#define REQMBXREAD 0x00000040 +#define RSPMBXAVAIL 0x00000020 +#define CSBUFPERR 0x00000008 +#define OVLYERR 0x00000004 +#define CSERR 0x00000002 +#define OVLYDMADONE 0x00000001 + +#define COMSTAT_MASK (REQMBXREAD | RSPMBXAVAIL | \ + CSBUFPERR | OVLYERR | CSERR |\ + OVLYDMADONE) + +#define COMSTATEN (REG_BASE_ADDR + 0x08) + +#define EN_REQMBXREAD 0x00000040 +#define EN_RSPMBXAVAIL 0x00000020 +#define EN_CSBUFPERR 0x00000008 +#define EN_OVLYERR 0x00000004 +#define EN_CSERR 0x00000002 +#define EN_OVLYDONE 0x00000001 + +#define SCBPRO (REG_BASE_ADDR + 0x0C) + +#define SCBCONS_MASK 0xFFFF0000 +#define SCBPRO_MASK 0x0000FFFF + +#define CHIMREQMBX (REG_BASE_ADDR + 0x10) + +#define CHIMRSPMBX (REG_BASE_ADDR + 0x14) + +#define CHIMINT (REG_BASE_ADDR + 0x18) + +#define EXT_INT0 0x00000800 +#define EXT_INT1 0x00000400 +#define PORRSTDET 0x00000200 +#define HARDRSTDET 0x00000100 +#define DLAVAILQ 0x00000080 /* ro */ +#define HOSTERR 0x00000040 +#define INITERR 0x00000020 +#define DEVINT 0x00000010 +#define COMINT 0x00000008 +#define DEVTIMER2 0x00000004 +#define DEVTIMER1 0x00000002 +#define DLAVAIL 0x00000001 + +#define CHIMINT_MASK (HOSTERR | INITERR | DEVINT | COMINT |\ + DEVTIMER2 | DEVTIMER1 | DLAVAIL) + +#define DEVEXCEPT_MASK (HOSTERR | INITERR | DEVINT | COMINT) + +#define CHIMINTEN (REG_BASE_ADDR + 0x1C) + +#define RST_EN_EXT_INT1 0x01000000 +#define RST_EN_EXT_INT0 0x00800000 +#define RST_EN_HOSTERR 0x00400000 +#define RST_EN_INITERR 0x00200000 +#define RST_EN_DEVINT 0x00100000 +#define RST_EN_COMINT 0x00080000 +#define RST_EN_DEVTIMER2 0x00040000 +#define RST_EN_DEVTIMER1 0x00020000 +#define RST_EN_DLAVAIL 0x00010000 +#define SET_EN_EXT_INT1 0x00000100 +#define SET_EN_EXT_INT0 0x00000080 +#define SET_EN_HOSTERR 0x00000040 +#define SET_EN_INITERR 0x00000020 +#define SET_EN_DEVINT 0x00000010 +#define SET_EN_COMINT 0x00000008 +#define SET_EN_DEVTIMER2 0x00000004 +#define SET_EN_DEVTIMER1 0x00000002 +#define SET_EN_DLAVAIL 0x00000001 + +#define RST_CHIMINTEN (RST_EN_HOSTERR | RST_EN_INITERR | \ + RST_EN_DEVINT | RST_EN_COMINT | \ + RST_EN_DEVTIMER2 | RST_EN_DEVTIMER1 |\ + RST_EN_DLAVAIL) + +#define SET_CHIMINTEN (SET_EN_HOSTERR | SET_EN_INITERR |\ + SET_EN_DEVINT | SET_EN_COMINT |\ + SET_EN_DLAVAIL) + +#define OVLYDMACTL (REG_BASE_ADDR + 0x20) + +#define OVLYADR_MASK 0x07FF0000 +#define OVLYLSEQ_MASK 0x0000FF00 +#define OVLYCSEQ 0x00000080 +#define OVLYHALTERR 0x00000040 +#define PIOCMODE 0x00000020 +#define RESETOVLYDMA 0x00000008 /* wo */ +#define STARTOVLYDMA 0x00000004 +#define STOPOVLYDMA 0x00000002 /* wo */ +#define OVLYDMAACT 0x00000001 /* ro */ + +#define OVLYDMACNT (REG_BASE_ADDR + 0x24) + +#define OVLYDOMAIN1 0x20000000 /* ro */ +#define OVLYDOMAIN0 0x10000000 +#define OVLYBUFADR_MASK 0x007F0000 +#define OVLYDMACNT_MASK 0x00003FFF + +#define OVLYDMAADR (REG_BASE_ADDR + 0x28) + +#define DMAERR (REG_BASE_ADDR + 0x30) + +#define OVLYERRSTAT_MASK 0x0000FF00 /* ro */ +#define CSERRSTAT_MASK 0x000000FF /* ro */ + +#define SPIODATA (REG_BASE_ADDR + 0x34) + +/* 0x38 - 0x3C are reserved */ + +#define T1CNTRLR (REG_BASE_ADDR + 0x40) + +#define T1DONE 0x00010000 /* ro */ +#define TIMER64 0x00000400 +#define T1ENABLE 0x00000200 +#define T1RELOAD 0x00000100 +#define T1PRESCALER_MASK 0x00000003 + +#define T1CMPR (REG_BASE_ADDR + 0x44) + +#define T1CNTR (REG_BASE_ADDR + 0x48) + +#define T2CNTRLR (REG_BASE_ADDR + 0x4C) + +#define T2DONE 0x00010000 /* ro */ +#define T2ENABLE 0x00000200 +#define T2RELOAD 0x00000100 +#define T2PRESCALER_MASK 0x00000003 + +#define T2CMPR (REG_BASE_ADDR + 0x50) + +#define T2CNTR (REG_BASE_ADDR + 0x54) + +/* 0x58h - 0xFCh are reserved */ + +/* + * DCH_SAS Registers, Address Range : (0x800-0xFFF) + */ +#define CMDCTXBASE (REG_BASE_ADDR + 0x800) + +#define DEVCTXBASE (REG_BASE_ADDR + 0x808) + +#define CTXDOMAIN (REG_BASE_ADDR + 0x810) + +#define DEVCTXDOMAIN1 0x00000008 /* ro */ +#define DEVCTXDOMAIN0 0x00000004 +#define CMDCTXDOMAIN1 0x00000002 /* ro */ +#define CMDCTXDOMAIN0 0x00000001 + +#define DCHCTL (REG_BASE_ADDR + 0x814) + +#define OCMBISTREPAIR 0x00080000 +#define OCMBISTEN 0x00040000 +#define OCMBISTDN 0x00020000 /* ro */ +#define OCMBISTFAIL 0x00010000 /* ro */ +#define DDBBISTEN 0x00004000 +#define DDBBISTDN 0x00002000 /* ro */ +#define DDBBISTFAIL 0x00001000 /* ro */ +#define SCBBISTEN 0x00000400 +#define SCBBISTDN 0x00000200 /* ro */ +#define SCBBISTFAIL 0x00000100 /* ro */ + +#define MEMSEL_MASK 0x000000E0 +#define MEMSEL_CCM_LSEQ 0x00000000 +#define MEMSEL_CCM_IOP 0x00000020 +#define MEMSEL_CCM_SASCTL 0x00000040 +#define MEMSEL_DCM_LSEQ 0x00000060 +#define MEMSEL_DCM_IOP 0x00000080 +#define MEMSEL_OCM 0x000000A0 + +#define FRCERR 0x00000010 +#define AUTORLS 0x00000001 + +#define DCHREVISION (REG_BASE_ADDR + 0x818) + +#define DCHREVISION_MASK 0x000000FF + +#define DCHSTATUS (REG_BASE_ADDR + 0x81C) + +#define EN_CFIFTOERR 0x00020000 +#define CFIFTOERR 0x00000200 +#define CSEQINT 0x00000100 /* ro */ +#define LSEQ7INT 0x00000080 /* ro */ +#define LSEQ6INT 0x00000040 /* ro */ +#define LSEQ5INT 0x00000020 /* ro */ +#define LSEQ4INT 0x00000010 /* ro */ +#define LSEQ3INT 0x00000008 /* ro */ +#define LSEQ2INT 0x00000004 /* ro */ +#define LSEQ1INT 0x00000002 /* ro */ +#define LSEQ0INT 0x00000001 /* ro */ + +#define LSEQINT_MASK (LSEQ7INT | LSEQ6INT | LSEQ5INT |\ + LSEQ4INT | LSEQ3INT | LSEQ2INT |\ + LSEQ1INT | LSEQ0INT) + +#define DCHDFIFDEBUG (REG_BASE_ADDR + 0x820) +#define ENFAIRMST 0x00FF0000 +#define DISWRMST9 0x00000200 +#define DISWRMST8 0x00000100 +#define DISRDMST 0x000000FF + +#define ATOMICSTATCTL (REG_BASE_ADDR + 0x824) +/* 8 bit wide */ +#define AUTOINC 0x80 +#define ATOMICERR 0x04 +#define ATOMICWIN 0x02 +#define ATOMICDONE 0x01 + + +#define ALTCIOADR (REG_BASE_ADDR + 0x828) +/* 16 bit; bits 8:0 define CIO addr space of CSEQ */ + +#define ASCBPTR (REG_BASE_ADDR + 0x82C) +/* 16 bit wide */ + +#define ADDBPTR (REG_BASE_ADDR + 0x82E) +/* 16 bit wide */ + +#define ANEWDATA (REG_BASE_ADDR + 0x830) +/* 16 bit */ + +#define AOLDDATA (REG_BASE_ADDR + 0x834) +/* 16 bit */ + +#define CTXACCESS (REG_BASE_ADDR + 0x838) +/* 32 bit */ + +/* 0x83Ch - 0xFFCh are reserved */ + +/* + * ARP2 External Processor Registers, Address Range : (0x00-0x1F) + */ +#define ARP2CTL 0x00 + +#define FRCSCRPERR 0x00040000 +#define FRCARP2PERR 0x00020000 +#define FRCARP2ILLOPC 0x00010000 +#define ENWAITTO 0x00008000 +#define PERRORDIS 0x00004000 +#define FAILDIS 0x00002000 +#define CIOPERRDIS 0x00001000 +#define BREAKEN3 0x00000800 +#define BREAKEN2 0x00000400 +#define BREAKEN1 0x00000200 +#define BREAKEN0 0x00000100 +#define EPAUSE 0x00000008 +#define PAUSED 0x00000004 /* ro */ +#define STEP 0x00000002 +#define ARP2RESET 0x00000001 /* wo */ + +#define ARP2INT 0x04 + +#define HALTCODE_MASK 0x00FF0000 /* ro */ +#define ARP2WAITTO 0x00000100 +#define ARP2HALTC 0x00000080 +#define ARP2ILLOPC 0x00000040 +#define ARP2PERR 0x00000020 +#define ARP2CIOPERR 0x00000010 +#define ARP2BREAK3 0x00000008 +#define ARP2BREAK2 0x00000004 +#define ARP2BREAK1 0x00000002 +#define ARP2BREAK0 0x00000001 + +#define ARP2INTEN 0x08 + +#define EN_ARP2WAITTO 0x00000100 +#define EN_ARP2HALTC 0x00000080 +#define EN_ARP2ILLOPC 0x00000040 +#define EN_ARP2PERR 0x00000020 +#define EN_ARP2CIOPERR 0x00000010 +#define EN_ARP2BREAK3 0x00000008 +#define EN_ARP2BREAK2 0x00000004 +#define EN_ARP2BREAK1 0x00000002 +#define EN_ARP2BREAK0 0x00000001 + +#define ARP2BREAKADR01 0x0C + +#define BREAKADR1_MASK 0x0FFF0000 +#define BREAKADR0_MASK 0x00000FFF + +#define ARP2BREAKADR23 0x10 + +#define BREAKADR3_MASK 0x0FFF0000 +#define BREAKADR2_MASK 0x00000FFF + +/* 0x14h - 0x1Ch are reserved */ + +/* + * ARP2 Registers, Address Range : (0x00-0x1F) + * The definitions have the same address offset for CSEQ and LmSEQ + * CIO Bus Registers. + */ +#define MODEPTR 0x00 + +#define DSTMODE 0xF0 +#define SRCMODE 0x0F + +#define ALTMODE 0x01 + +#define ALTDMODE 0xF0 +#define ALTSMODE 0x0F + +#define ATOMICXCHG 0x02 + +#define FLAG 0x04 + +#define INTCODE_MASK 0xF0 +#define ALTMODEV2 0x04 +#define CARRY_INT 0x02 +#define CARRY 0x01 + +#define ARP2INTCTL 0x05 + +#define PAUSEDIS 0x80 +#define RSTINTCTL 0x40 +#define POPALTMODE 0x08 +#define ALTMODEV 0x04 +#define INTMASK 0x02 +#define IRET 0x01 + +#define STACK 0x06 + +#define FUNCTION1 0x07 + +#define PRGMCNT 0x08 + +#define ACCUM 0x0A + +#define SINDEX 0x0C + +#define DINDEX 0x0E + +#define ALLONES 0x10 + +#define ALLZEROS 0x11 + +#define SINDIR 0x12 + +#define DINDIR 0x13 + +#define JUMLDIR 0x14 + +#define ARP2HALTCODE 0x15 + +#define CURRADDR 0x16 + +#define LASTADDR 0x18 + +#define NXTLADDR 0x1A + +#define DBGPORTPTR 0x1C + +#define DBGPORT 0x1D + +/* + * CIO Registers. + * The definitions have the same address offset for CSEQ and LmSEQ + * CIO Bus Registers. + */ +#define MnSCBPTR 0x20 + +#define MnDDBPTR 0x22 + +#define SCRATCHPAGE 0x24 + +#define MnSCRATCHPAGE 0x25 + +#define SCRATCHPAGESV 0x26 + +#define MnSCRATCHPAGESV 0x27 + +#define MnDMAERRS 0x46 + +#define MnSGDMAERRS 0x47 + +#define MnSGBUF 0x53 + +#define MnSGDMASTAT 0x5b + +#define MnDDMACTL 0x5c /* RAZOR.rspec.fm rev 1.5 is wrong */ + +#define MnDDMASTAT 0x5d /* RAZOR.rspec.fm rev 1.5 is wrong */ + +#define MnDDMAMODE 0x5e /* RAZOR.rspec.fm rev 1.5 is wrong */ + +#define MnDMAENG 0x60 + +#define MnPIPECTL 0x61 + +#define MnSGBADR 0x65 + +#define MnSCB_SITE 0x100 + +#define MnDDB_SITE 0x180 + +/* + * The common definitions below have the same address offset for both + * CSEQ and LmSEQ. + */ +#define BISTCTL0 0x4C + +#define BISTCTL1 0x50 + +#define MAPPEDSCR 0x800 + +/* + * CSEQ Host Register, Address Range : (0x000-0xFFC) + */ +#define CSEQ_HOST_REG_BASE_ADR 0xB8001000 + +#define CARP2CTL (CSEQ_HOST_REG_BASE_ADR + ARP2CTL) + +#define CARP2INT (CSEQ_HOST_REG_BASE_ADR + ARP2INT) + +#define CARP2INTEN (CSEQ_HOST_REG_BASE_ADR + ARP2INTEN) + +#define CARP2BREAKADR01 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR01) + +#define CARP2BREAKADR23 (CSEQ_HOST_REG_BASE_ADR+ARP2BREAKADR23) + +#define CBISTCTL (CSEQ_HOST_REG_BASE_ADR + BISTCTL1) + +#define CSEQRAMBISTEN 0x00000040 +#define CSEQRAMBISTDN 0x00000020 /* ro */ +#define CSEQRAMBISTFAIL 0x00000010 /* ro */ +#define CSEQSCRBISTEN 0x00000004 +#define CSEQSCRBISTDN 0x00000002 /* ro */ +#define CSEQSCRBISTFAIL 0x00000001 /* ro */ + +#define CMAPPEDSCR (CSEQ_HOST_REG_BASE_ADR + MAPPEDSCR) + +/* + * CSEQ CIO Bus Registers, Address Range : (0x0000-0x1FFC) + * 16 modes, each mode is 512 bytes. + * Unless specified, the register should valid for all modes. + */ +#define CSEQ_CIO_REG_BASE_ADR REG_BASE_ADDR_CSEQCIO + +#define CSEQm_CIO_REG(Mode, Reg) \ + (CSEQ_CIO_REG_BASE_ADR + \ + ((u32) (Mode) * CSEQ_MODE_PAGE_SIZE) + (u32) (Reg)) + +#define CMODEPTR (CSEQ_CIO_REG_BASE_ADR + MODEPTR) + +#define CALTMODE (CSEQ_CIO_REG_BASE_ADR + ALTMODE) + +#define CATOMICXCHG (CSEQ_CIO_REG_BASE_ADR + ATOMICXCHG) + +#define CFLAG (CSEQ_CIO_REG_BASE_ADR + FLAG) + +#define CARP2INTCTL (CSEQ_CIO_REG_BASE_ADR + ARP2INTCTL) + +#define CSTACK (CSEQ_CIO_REG_BASE_ADR + STACK) + +#define CFUNCTION1 (CSEQ_CIO_REG_BASE_ADR + FUNCTION1) + +#define CPRGMCNT (CSEQ_CIO_REG_BASE_ADR + PRGMCNT) + +#define CACCUM (CSEQ_CIO_REG_BASE_ADR + ACCUM) + +#define CSINDEX (CSEQ_CIO_REG_BASE_ADR + SINDEX) + +#define CDINDEX (CSEQ_CIO_REG_BASE_ADR + DINDEX) + +#define CALLONES (CSEQ_CIO_REG_BASE_ADR + ALLONES) + +#define CALLZEROS (CSEQ_CIO_REG_BASE_ADR + ALLZEROS) + +#define CSINDIR (CSEQ_CIO_REG_BASE_ADR + SINDIR) + +#define CDINDIR (CSEQ_CIO_REG_BASE_ADR + DINDIR) + +#define CJUMLDIR (CSEQ_CIO_REG_BASE_ADR + JUMLDIR) + +#define CARP2HALTCODE (CSEQ_CIO_REG_BASE_ADR + ARP2HALTCODE) + +#define CCURRADDR (CSEQ_CIO_REG_BASE_ADR + CURRADDR) + +#define CLASTADDR (CSEQ_CIO_REG_BASE_ADR + LASTADDR) + +#define CNXTLADDR (CSEQ_CIO_REG_BASE_ADR + NXTLADDR) + +#define CDBGPORTPTR (CSEQ_CIO_REG_BASE_ADR + DBGPORTPTR) + +#define CDBGPORT (CSEQ_CIO_REG_BASE_ADR + DBGPORT) + +#define CSCRATCHPAGE (CSEQ_CIO_REG_BASE_ADR + SCRATCHPAGE) + +#define CMnSCBPTR(Mode) CSEQm_CIO_REG(Mode, MnSCBPTR) + +#define CMnDDBPTR(Mode) CSEQm_CIO_REG(Mode, MnDDBPTR) + +#define CMnSCRATCHPAGE(Mode) CSEQm_CIO_REG(Mode, MnSCRATCHPAGE) + +#define CLINKCON (CSEQ_CIO_REG_BASE_ADR + 0x28) + +#define CCIOAACESS (CSEQ_CIO_REG_BASE_ADR + 0x2C) + +/* mode 0-7 */ +#define MnREQMBX 0x30 +#define CMnREQMBX(Mode) CSEQm_CIO_REG(Mode, 0x30) + +/* mode 8 */ +#define CSEQCON CSEQm_CIO_REG(8, 0x30) + +/* mode 0-7 */ +#define MnRSPMBX 0x34 +#define CMnRSPMBX(Mode) CSEQm_CIO_REG(Mode, 0x34) + +/* mode 8 */ +#define CSEQCOMCTL CSEQm_CIO_REG(8, 0x34) + +/* mode 8 */ +#define CSEQCOMSTAT CSEQm_CIO_REG(8, 0x35) + +/* mode 8 */ +#define CSEQCOMINTEN CSEQm_CIO_REG(8, 0x36) + +/* mode 8 */ +#define CSEQCOMDMACTL CSEQm_CIO_REG(8, 0x37) + +#define CSHALTERR 0x10 +#define RESETCSDMA 0x08 /* wo */ +#define STARTCSDMA 0x04 +#define STOPCSDMA 0x02 /* wo */ +#define CSDMAACT 0x01 /* ro */ + +/* mode 0-7 */ +#define MnINT 0x38 +#define CMnINT(Mode) CSEQm_CIO_REG(Mode, 0x38) + +#define CMnREQMBXE 0x02 +#define CMnRSPMBXF 0x01 +#define CMnINT_MASK 0x00000003 + +/* mode 8 */ +#define CSEQREQMBX CSEQm_CIO_REG(8, 0x38) + +/* mode 0-7 */ +#define MnINTEN 0x3C +#define CMnINTEN(Mode) CSEQm_CIO_REG(Mode, 0x3C) + +#define EN_CMnRSPMBXF 0x01 + +/* mode 8 */ +#define CSEQRSPMBX CSEQm_CIO_REG(8, 0x3C) + +/* mode 8 */ +#define CSDMAADR CSEQm_CIO_REG(8, 0x40) + +/* mode 8 */ +#define CSDMACNT CSEQm_CIO_REG(8, 0x48) + +/* mode 8 */ +#define CSEQDLCTL CSEQm_CIO_REG(8, 0x4D) + +#define DONELISTEND 0x10 +#define DONELISTSIZE_MASK 0x0F +#define DONELISTSIZE_8ELEM 0x01 +#define DONELISTSIZE_16ELEM 0x02 +#define DONELISTSIZE_32ELEM 0x03 +#define DONELISTSIZE_64ELEM 0x04 +#define DONELISTSIZE_128ELEM 0x05 +#define DONELISTSIZE_256ELEM 0x06 +#define DONELISTSIZE_512ELEM 0x07 +#define DONELISTSIZE_1024ELEM 0x08 +#define DONELISTSIZE_2048ELEM 0x09 +#define DONELISTSIZE_4096ELEM 0x0A +#define DONELISTSIZE_8192ELEM 0x0B +#define DONELISTSIZE_16384ELEM 0x0C + +/* mode 8 */ +#define CSEQDLOFFS CSEQm_CIO_REG(8, 0x4E) + +/* mode 11 */ +#define CM11INTVEC0 CSEQm_CIO_REG(11, 0x50) + +/* mode 11 */ +#define CM11INTVEC1 CSEQm_CIO_REG(11, 0x52) + +/* mode 11 */ +#define CM11INTVEC2 CSEQm_CIO_REG(11, 0x54) + +#define CCONMSK (CSEQ_CIO_REG_BASE_ADR + 0x60) + +#define CCONEXIST (CSEQ_CIO_REG_BASE_ADR + 0x61) + +#define CCONMODE (CSEQ_CIO_REG_BASE_ADR + 0x62) + +#define CTIMERCALC (CSEQ_CIO_REG_BASE_ADR + 0x64) + +#define CINTDIS (CSEQ_CIO_REG_BASE_ADR + 0x68) + +/* mode 8, 32x32 bits, 128 bytes of mapped buffer */ +#define CSBUFFER CSEQm_CIO_REG(8, 0x80) + +#define CSCRATCH (CSEQ_CIO_REG_BASE_ADR + 0x1C0) + +/* mode 0-8 */ +#define CMnSCRATCH(Mode) CSEQm_CIO_REG(Mode, 0x1E0) + +/* + * CSEQ Mapped Instruction RAM Page, Address Range : (0x0000-0x1FFC) + */ +#define CSEQ_RAM_REG_BASE_ADR 0xB8004000 + +/* + * The common definitions below have the same address offset for all the Link + * sequencers. + */ +#define MODECTL 0x40 + +#define DBGMODE 0x44 + +#define CONTROL 0x48 +#define LEDTIMER 0x00010000 +#define LEDTIMERS_10us 0x00000000 +#define LEDTIMERS_1ms 0x00000800 +#define LEDTIMERS_100ms 0x00001000 +#define LEDMODE_TXRX 0x00000000 +#define LEDMODE_CONNECTED 0x00000200 +#define LEDPOL 0x00000100 + +#define LSEQRAM 0x1000 + +/* + * LmSEQ Host Registers, Address Range : (0x0000-0x3FFC) + */ +#define LSEQ0_HOST_REG_BASE_ADR 0xB8020000 +#define LSEQ1_HOST_REG_BASE_ADR 0xB8024000 +#define LSEQ2_HOST_REG_BASE_ADR 0xB8028000 +#define LSEQ3_HOST_REG_BASE_ADR 0xB802C000 +#define LSEQ4_HOST_REG_BASE_ADR 0xB8030000 +#define LSEQ5_HOST_REG_BASE_ADR 0xB8034000 +#define LSEQ6_HOST_REG_BASE_ADR 0xB8038000 +#define LSEQ7_HOST_REG_BASE_ADR 0xB803C000 + +#define LmARP2CTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2CTL) + +#define LmARP2INT(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2INT) + +#define LmARP2INTEN(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2INTEN) + +#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + DBGMODE) + +#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + CONTROL) + +#define LmARP2BREAKADR01(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2BREAKADR01) + +#define LmARP2BREAKADR23(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + ARP2BREAKADR23) + +#define LmMODECTL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + MODECTL) + +#define LmAUTODISCI 0x08000000 +#define LmDSBLBITLT 0x04000000 +#define LmDSBLANTT 0x02000000 +#define LmDSBLCRTT 0x01000000 +#define LmDSBLCONT 0x00000100 +#define LmPRIMODE 0x00000080 +#define LmDSBLHOLD 0x00000040 +#define LmDISACK 0x00000020 +#define LmBLIND48 0x00000010 +#define LmRCVMODE_MASK 0x0000000C +#define LmRCVMODE_PLD 0x00000000 +#define LmRCVMODE_HPC 0x00000004 + +#define LmDBGMODE(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + DBGMODE) + +#define LmFRCPERR 0x80000000 +#define LmMEMSEL_MASK 0x30000000 +#define LmFRCRBPERR 0x00000000 +#define LmFRCTBPERR 0x10000000 +#define LmFRCSGBPERR 0x20000000 +#define LmFRCARBPERR 0x30000000 +#define LmRCVIDW 0x00080000 +#define LmINVDWERR 0x00040000 +#define LmRCVDISP 0x00004000 +#define LmDISPERR 0x00002000 +#define LmDSBLDSCR 0x00000800 +#define LmDSBLSCR 0x00000400 +#define LmFRCNAK 0x00000200 +#define LmFRCROFS 0x00000100 +#define LmFRCCRC 0x00000080 +#define LmFRMTYPE_MASK 0x00000070 +#define LmSG_DATA 0x00000000 +#define LmSG_COMMAND 0x00000010 +#define LmSG_TASK 0x00000020 +#define LmSG_TGTXFER 0x00000030 +#define LmSG_RESPONSE 0x00000040 +#define LmSG_IDENADDR 0x00000050 +#define LmSG_OPENADDR 0x00000060 +#define LmDISCRCGEN 0x00000008 +#define LmDISCRCCHK 0x00000004 +#define LmSSXMTFRM 0x00000002 +#define LmSSRCVFRM 0x00000001 + +#define LmCONTROL(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + CONTROL) + +#define LmSTEPXMTFRM 0x00000002 +#define LmSTEPRCVFRM 0x00000001 + +#define LmBISTCTL0(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) + \ + BISTCTL0) + +#define ARBBISTEN 0x40000000 +#define ARBBISTDN 0x20000000 /* ro */ +#define ARBBISTFAIL 0x10000000 /* ro */ +#define TBBISTEN 0x00000400 +#define TBBISTDN 0x00000200 /* ro */ +#define TBBISTFAIL 0x00000100 /* ro */ +#define RBBISTEN 0x00000040 +#define RBBISTDN 0x00000020 /* ro */ +#define RBBISTFAIL 0x00000010 /* ro */ +#define SGBISTEN 0x00000004 +#define SGBISTDN 0x00000002 /* ro */ +#define SGBISTFAIL 0x00000001 /* ro */ + +#define LmBISTCTL1(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum)*LmSEQ_HOST_REG_SIZE) +\ + BISTCTL1) + +#define LmRAMPAGE1 0x00000200 +#define LmRAMPAGE0 0x00000100 +#define LmIMEMBISTEN 0x00000040 +#define LmIMEMBISTDN 0x00000020 /* ro */ +#define LmIMEMBISTFAIL 0x00000010 /* ro */ +#define LmSCRBISTEN 0x00000004 +#define LmSCRBISTDN 0x00000002 /* ro */ +#define LmSCRBISTFAIL 0x00000001 /* ro */ +#define LmRAMPAGE (LmRAMPAGE1 + LmRAMPAGE0) +#define LmRAMPAGE_LSHIFT 0x8 + +#define LmSCRATCH(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\ + MAPPEDSCR) + +#define LmSEQRAM(LinkNum) (LSEQ0_HOST_REG_BASE_ADR + \ + ((LinkNum) * LmSEQ_HOST_REG_SIZE) +\ + LSEQRAM) + +/* + * LmSEQ CIO Bus Register, Address Range : (0x0000-0xFFC) + * 8 modes, each mode is 512 bytes. + * Unless specified, the register should valid for all modes. + */ +#define LmSEQ_CIOBUS_REG_BASE 0x2000 + +#define LmSEQ_PHY_BASE(Mode, LinkNum) \ + (LSEQ0_HOST_REG_BASE_ADR + \ + (LmSEQ_HOST_REG_SIZE * (u32) (LinkNum)) + \ + LmSEQ_CIOBUS_REG_BASE + \ + ((u32) (Mode) * LmSEQ_MODE_PAGE_SIZE)) + +#define LmSEQ_PHY_REG(Mode, LinkNum, Reg) \ + (LmSEQ_PHY_BASE(Mode, LinkNum) + (u32) (Reg)) + +#define LmMODEPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, MODEPTR) + +#define LmALTMODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALTMODE) + +#define LmATOMICXCHG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ATOMICXCHG) + +#define LmFLAG(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FLAG) + +#define LmARP2INTCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2INTCTL) + +#define LmSTACK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, STACK) + +#define LmFUNCTION1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, FUNCTION1) + +#define LmPRGMCNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, PRGMCNT) + +#define LmACCUM(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ACCUM) + +#define LmSINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDEX) + +#define LmDINDEX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDEX) + +#define LmALLONES(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLONES) + +#define LmALLZEROS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ALLZEROS) + +#define LmSINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SINDIR) + +#define LmDINDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DINDIR) + +#define LmJUMLDIR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, JUMLDIR) + +#define LmARP2HALTCODE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, ARP2HALTCODE) + +#define LmCURRADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, CURRADDR) + +#define LmLASTADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, LASTADDR) + +#define LmNXTLADDR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, NXTLADDR) + +#define LmDBGPORTPTR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORTPTR) + +#define LmDBGPORT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, DBGPORT) + +#define LmSCRATCHPAGE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, SCRATCHPAGE) + +#define LmMnSCRATCHPAGE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, \ + MnSCRATCHPAGE) + +#define LmTIMERCALC(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x28) + +#define LmREQMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x30) + +#define LmRSPMBX(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x34) + +#define LmMnINT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x38) + +#define CTXMEMSIZE 0x80000000 /* ro */ +#define LmACKREQ 0x08000000 +#define LmNAKREQ 0x04000000 +#define LmMnXMTERR 0x02000000 +#define LmM5OOBSVC 0x01000000 +#define LmHWTINT 0x00800000 +#define LmMnCTXDONE 0x00100000 +#define LmM2REQMBXF 0x00080000 +#define LmM2RSPMBXE 0x00040000 +#define LmMnDMAERR 0x00020000 +#define LmRCVPRIM 0x00010000 +#define LmRCVERR 0x00008000 +#define LmADDRRCV 0x00004000 +#define LmMnHDRMISS 0x00002000 +#define LmMnWAITSCB 0x00001000 +#define LmMnRLSSCB 0x00000800 +#define LmMnSAVECTX 0x00000400 +#define LmMnFETCHSG 0x00000200 +#define LmMnLOADCTX 0x00000100 +#define LmMnCFGICL 0x00000080 +#define LmMnCFGSATA 0x00000040 +#define LmMnCFGEXPSATA 0x00000020 +#define LmMnCFGCMPLT 0x00000010 +#define LmMnCFGRBUF 0x00000008 +#define LmMnSAVETTR 0x00000004 +#define LmMnCFGRDAT 0x00000002 +#define LmMnCFGHDR 0x00000001 + +#define LmMnINTEN(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x3C) + +#define EN_LmACKREQ 0x08000000 +#define EN_LmNAKREQ 0x04000000 +#define EN_LmMnXMTERR 0x02000000 +#define EN_LmM5OOBSVC 0x01000000 +#define EN_LmHWTINT 0x00800000 +#define EN_LmMnCTXDONE 0x00100000 +#define EN_LmM2REQMBXF 0x00080000 +#define EN_LmM2RSPMBXE 0x00040000 +#define EN_LmMnDMAERR 0x00020000 +#define EN_LmRCVPRIM 0x00010000 +#define EN_LmRCVERR 0x00008000 +#define EN_LmADDRRCV 0x00004000 +#define EN_LmMnHDRMISS 0x00002000 +#define EN_LmMnWAITSCB 0x00001000 +#define EN_LmMnRLSSCB 0x00000800 +#define EN_LmMnSAVECTX 0x00000400 +#define EN_LmMnFETCHSG 0x00000200 +#define EN_LmMnLOADCTX 0x00000100 +#define EN_LmMnCFGICL 0x00000080 +#define EN_LmMnCFGSATA 0x00000040 +#define EN_LmMnCFGEXPSATA 0x00000020 +#define EN_LmMnCFGCMPLT 0x00000010 +#define EN_LmMnCFGRBUF 0x00000008 +#define EN_LmMnSAVETTR 0x00000004 +#define EN_LmMnCFGRDAT 0x00000002 +#define EN_LmMnCFGHDR 0x00000001 + +#define LmM0INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmMnCFGRBUF | \ + EN_LmMnSAVETTR | EN_LmMnCFGRDAT | \ + EN_LmMnCFGHDR | EN_LmRCVERR | \ + EN_LmADDRRCV | EN_LmMnHDRMISS | \ + EN_LmMnRLSSCB | EN_LmMnSAVECTX | \ + EN_LmMnFETCHSG | EN_LmMnLOADCTX | \ + EN_LmHWTINT | EN_LmMnCTXDONE | \ + EN_LmRCVPRIM | EN_LmMnCFGSATA | \ + EN_LmMnCFGEXPSATA | EN_LmMnDMAERR) + +#define LmM1INTEN_MASK (EN_LmMnCFGCMPLT | EN_LmADDRRCV | \ + EN_LmMnRLSSCB | EN_LmMnSAVECTX | \ + EN_LmMnFETCHSG | EN_LmMnLOADCTX | \ + EN_LmMnXMTERR | EN_LmHWTINT | \ + EN_LmMnCTXDONE | EN_LmRCVPRIM | \ + EN_LmRCVERR | EN_LmMnDMAERR) + +#define LmM2INTEN_MASK (EN_LmADDRRCV | EN_LmHWTINT | \ + EN_LmM2REQMBXF | EN_LmRCVPRIM | \ + EN_LmRCVERR) + +#define LmM5INTEN_MASK (EN_LmADDRRCV | EN_LmM5OOBSVC | \ + EN_LmHWTINT | EN_LmRCVPRIM | \ + EN_LmRCVERR) + +#define LmXMTPRIMD(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x40) + +#define LmXMTPRIMCS(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x44) + +#define LmCONSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x45) + +#define LmMnDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x46) + +#define LmMnSGDMAERRS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x47) + +#define LmM0EXPHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x48) + +#define LmM1SASALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x48) +#define SAS_ALIGN_DEFAULT 0xFF + +#define LmM0MSKHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x49) + +#define LmM1STPALIGN(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x49) +#define STP_ALIGN_DEFAULT 0x1F + +#define LmM0RCVHDRP(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4A) + +#define LmM1XMTHDRP(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4A) + +#define LmM0ICLADR(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4B) + +#define LmM1ALIGNMODE(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4B) + +#define LmDISALIGN 0x20 +#define LmROTSTPALIGN 0x10 +#define LmSTPALIGN 0x08 +#define LmROTNOTIFY 0x04 +#define LmDUALALIGN 0x02 +#define LmROTALIGN 0x01 + +#define LmM0EXPRCVNT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x4C) + +#define LmM1XMTCNT(LinkNum) LmSEQ_PHY_REG(1, LinkNum, 0x4C) + +#define LmMnBUFSTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x4E) + +#define LmMnBUFPERR 0x01 + +/* mode 0-1 */ +#define LmMnXFRLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x59) + +#define LmMnXFRLVL_128 0x05 +#define LmMnXFRLVL_256 0x04 +#define LmMnXFRLVL_512 0x03 +#define LmMnXFRLVL_1024 0x02 +#define LmMnXFRLVL_1536 0x01 +#define LmMnXFRLVL_2048 0x00 + + /* mode 0-1 */ +#define LmMnSGDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5A) + +#define LmMnRESETSG 0x04 +#define LmMnSTOPSG 0x02 +#define LmMnSTARTSG 0x01 + +/* mode 0-1 */ +#define LmMnSGDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5B) + +/* mode 0-1 */ +#define LmMnDDMACTL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5C) + +#define LmMnFLUSH 0x40 /* wo */ +#define LmMnRLSRTRY 0x20 /* wo */ +#define LmMnDISCARD 0x10 /* wo */ +#define LmMnRESETDAT 0x08 /* wo */ +#define LmMnSUSDAT 0x04 /* wo */ +#define LmMnSTOPDAT 0x02 /* wo */ +#define LmMnSTARTDAT 0x01 /* wo */ + +/* mode 0-1 */ +#define LmMnDDMASTAT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5D) + +#define LmMnDPEMPTY 0x80 +#define LmMnFLUSHING 0x40 +#define LmMnDDMAREQ 0x20 +#define LmMnHDMAREQ 0x10 +#define LmMnDATFREE 0x08 +#define LmMnDATSUS 0x04 +#define LmMnDATACT 0x02 +#define LmMnDATEN 0x01 + +/* mode 0-1 */ +#define LmMnDDMAMODE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x5E) + +#define LmMnDMATYPE_NORMAL 0x0000 +#define LmMnDMATYPE_HOST_ONLY_TX 0x0001 +#define LmMnDMATYPE_DEVICE_ONLY_TX 0x0002 +#define LmMnDMATYPE_INVALID 0x0003 +#define LmMnDMATYPE_MASK 0x0003 + +#define LmMnDMAWRAP 0x0004 +#define LmMnBITBUCKET 0x0008 +#define LmMnDISHDR 0x0010 +#define LmMnSTPCRC 0x0020 +#define LmXTEST 0x0040 +#define LmMnDISCRC 0x0080 +#define LmMnENINTLK 0x0100 +#define LmMnADDRFRM 0x0400 +#define LmMnENXMTCRC 0x0800 + +/* mode 0-1 */ +#define LmMnXFRCNT(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x70) + +/* mode 0-1 */ +#define LmMnDPSEL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7B) +#define LmMnDPSEL_MASK 0x07 +#define LmMnEOLPRE 0x40 +#define LmMnEOSPRE 0x80 + +/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */ +/* Receive Mode n = 0 */ +#define LmMnHRADDR 0x00 +#define LmMnHBYTECNT 0x01 +#define LmMnHREWIND 0x02 +#define LmMnDWADDR 0x03 +#define LmMnDSPACECNT 0x04 +#define LmMnDFRMSIZE 0x05 + +/* Registers used in conjunction with LmMnDPSEL and LmMnDPACC registers */ +/* Transmit Mode n = 1 */ +#define LmMnHWADDR 0x00 +#define LmMnHSPACECNT 0x01 +/* #define LmMnHREWIND 0x02 */ +#define LmMnDRADDR 0x03 +#define LmMnDBYTECNT 0x04 +/* #define LmMnDFRMSIZE 0x05 */ + +/* mode 0-1 */ +#define LmMnDPACC(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x78) +#define LmMnDPACC_MASK 0x00FFFFFF + +/* mode 0-1 */ +#define LmMnHOLDLVL(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7D) + +#define LmPRMSTAT0(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x80) +#define LmPRMSTAT0BYTE0 0x80 +#define LmPRMSTAT0BYTE1 0x81 +#define LmPRMSTAT0BYTE2 0x82 +#define LmPRMSTAT0BYTE3 0x83 + +#define LmFRAMERCVD 0x80000000 +#define LmXFRRDYRCVD 0x40000000 +#define LmUNKNOWNP 0x20000000 +#define LmBREAK 0x10000000 +#define LmDONE 0x08000000 +#define LmOPENACPT 0x04000000 +#define LmOPENRJCT 0x02000000 +#define LmOPENRTRY 0x01000000 +#define LmCLOSERV1 0x00800000 +#define LmCLOSERV0 0x00400000 +#define LmCLOSENORM 0x00200000 +#define LmCLOSECLAF 0x00100000 +#define LmNOTIFYRV2 0x00080000 +#define LmNOTIFYRV1 0x00040000 +#define LmNOTIFYRV0 0x00020000 +#define LmNOTIFYSPIN 0x00010000 +#define LmBROADRV4 0x00008000 +#define LmBROADRV3 0x00004000 +#define LmBROADRV2 0x00002000 +#define LmBROADRV1 0x00001000 +#define LmBROADSES 0x00000800 +#define LmBROADRVCH1 0x00000400 +#define LmBROADRVCH0 0x00000200 +#define LmBROADCH 0x00000100 +#define LmAIPRVWP 0x00000080 +#define LmAIPWP 0x00000040 +#define LmAIPWD 0x00000020 +#define LmAIPWC 0x00000010 +#define LmAIPRV2 0x00000008 +#define LmAIPRV1 0x00000004 +#define LmAIPRV0 0x00000002 +#define LmAIPNRML 0x00000001 + +#define LmBROADCAST_MASK (LmBROADCH | LmBROADRVCH0 | \ + LmBROADRVCH1) + +#define LmPRMSTAT1(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0x84) +#define LmPRMSTAT1BYTE0 0x84 +#define LmPRMSTAT1BYTE1 0x85 +#define LmPRMSTAT1BYTE2 0x86 +#define LmPRMSTAT1BYTE3 0x87 + +#define LmFRMRCVDSTAT 0x80000000 +#define LmBREAK_DET 0x04000000 +#define LmCLOSE_DET 0x02000000 +#define LmDONE_DET 0x01000000 +#define LmXRDY 0x00040000 +#define LmSYNCSRST 0x00020000 +#define LmSYNC 0x00010000 +#define LmXHOLD 0x00008000 +#define LmRRDY 0x00004000 +#define LmHOLD 0x00002000 +#define LmROK 0x00001000 +#define LmRIP 0x00000800 +#define LmCRBLK 0x00000400 +#define LmACK 0x00000200 +#define LmNAK 0x00000100 +#define LmHARDRST 0x00000080 +#define LmERROR 0x00000040 +#define LmRERR 0x00000020 +#define LmPMREQP 0x00000010 +#define LmPMREQS 0x00000008 +#define LmPMACK 0x00000004 +#define LmPMNAK 0x00000002 +#define LmDMAT 0x00000001 + +/* mode 1 */ +#define LmMnSATAFS(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x7E) +#define LmMnXMTSIZE(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0x93) + +/* mode 0 */ +#define LmMnFRMERR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xB0) + +#define LmACRCERR 0x00000800 +#define LmPHYOVRN 0x00000400 +#define LmOBOVRN 0x00000200 +#define LmMnZERODATA 0x00000100 +#define LmSATAINTLK 0x00000080 +#define LmMnCRCERR 0x00000020 +#define LmRRDYOVRN 0x00000010 +#define LmMISSSOAF 0x00000008 +#define LmMISSSOF 0x00000004 +#define LmMISSEOAF 0x00000002 +#define LmMISSEOF 0x00000001 + +#define LmFRMERREN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xB4) + +#define EN_LmACRCERR 0x00000800 +#define EN_LmPHYOVRN 0x00000400 +#define EN_LmOBOVRN 0x00000200 +#define EN_LmMnZERODATA 0x00000100 +#define EN_LmSATAINTLK 0x00000080 +#define EN_LmFRMBAD 0x00000040 +#define EN_LmMnCRCERR 0x00000020 +#define EN_LmRRDYOVRN 0x00000010 +#define EN_LmMISSSOAF 0x00000008 +#define EN_LmMISSSOF 0x00000004 +#define EN_LmMISSEOAF 0x00000002 +#define EN_LmMISSEOF 0x00000001 + +#define LmFRMERREN_MASK (EN_LmSATAINTLK | EN_LmMnCRCERR | \ + EN_LmRRDYOVRN | EN_LmMISSSOF | \ + EN_LmMISSEOAF | EN_LmMISSEOF | \ + EN_LmACRCERR | LmPHYOVRN | \ + EN_LmOBOVRN | EN_LmMnZERODATA) + +#define LmHWTSTATEN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC5) + +#define EN_LmDONETO 0x80 +#define EN_LmINVDISP 0x40 +#define EN_LmINVDW 0x20 +#define EN_LmDWSEVENT 0x08 +#define EN_LmCRTTTO 0x04 +#define EN_LmANTTTO 0x02 +#define EN_LmBITLTTO 0x01 + +#define LmHWTSTATEN_MASK (EN_LmINVDISP | EN_LmINVDW | \ + EN_LmDWSEVENT | EN_LmCRTTTO | \ + EN_LmANTTTO | EN_LmDONETO | \ + EN_LmBITLTTO) + +#define LmHWTSTAT(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xC7) + +#define LmDONETO 0x80 +#define LmINVDISP 0x40 +#define LmINVDW 0x20 +#define LmDWSEVENT 0x08 +#define LmCRTTTO 0x04 +#define LmANTTTO 0x02 +#define LmBITLTTO 0x01 + +#define LmMnDATABUFADR(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xC8) +#define LmDATABUFADR_MASK 0x0FFF + +#define LmMnDATABUF(LinkNum, Mode) LmSEQ_PHY_REG(Mode, LinkNum, 0xCA) + +#define LmPRIMSTAT0EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE0) + +#define EN_LmUNKNOWNP 0x20000000 +#define EN_LmBREAK 0x10000000 +#define EN_LmDONE 0x08000000 +#define EN_LmOPENACPT 0x04000000 +#define EN_LmOPENRJCT 0x02000000 +#define EN_LmOPENRTRY 0x01000000 +#define EN_LmCLOSERV1 0x00800000 +#define EN_LmCLOSERV0 0x00400000 +#define EN_LmCLOSENORM 0x00200000 +#define EN_LmCLOSECLAF 0x00100000 +#define EN_LmNOTIFYRV2 0x00080000 +#define EN_LmNOTIFYRV1 0x00040000 +#define EN_LmNOTIFYRV0 0x00020000 +#define EN_LmNOTIFYSPIN 0x00010000 +#define EN_LmBROADRV4 0x00008000 +#define EN_LmBROADRV3 0x00004000 +#define EN_LmBROADRV2 0x00002000 +#define EN_LmBROADRV1 0x00001000 +#define EN_LmBROADRV0 0x00000800 +#define EN_LmBROADRVCH1 0x00000400 +#define EN_LmBROADRVCH0 0x00000200 +#define EN_LmBROADCH 0x00000100 +#define EN_LmAIPRVWP 0x00000080 +#define EN_LmAIPWP 0x00000040 +#define EN_LmAIPWD 0x00000020 +#define EN_LmAIPWC 0x00000010 +#define EN_LmAIPRV2 0x00000008 +#define EN_LmAIPRV1 0x00000004 +#define EN_LmAIPRV0 0x00000002 +#define EN_LmAIPNRML 0x00000001 + +#define LmPRIMSTAT0EN_MASK (EN_LmBREAK | \ + EN_LmDONE | EN_LmOPENACPT | \ + EN_LmOPENRJCT | EN_LmOPENRTRY | \ + EN_LmCLOSERV1 | EN_LmCLOSERV0 | \ + EN_LmCLOSENORM | EN_LmCLOSECLAF | \ + EN_LmBROADRV4 | EN_LmBROADRV3 | \ + EN_LmBROADRV2 | EN_LmBROADRV1 | \ + EN_LmBROADRV0 | EN_LmBROADRVCH1 | \ + EN_LmBROADRVCH0 | EN_LmBROADCH | \ + EN_LmAIPRVWP | EN_LmAIPWP | \ + EN_LmAIPWD | EN_LmAIPWC | \ + EN_LmAIPRV2 | EN_LmAIPRV1 | \ + EN_LmAIPRV0 | EN_LmAIPNRML) + +#define LmPRIMSTAT1EN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE4) + +#define EN_LmXRDY 0x00040000 +#define EN_LmSYNCSRST 0x00020000 +#define EN_LmSYNC 0x00010000 +#define EN_LmXHOLD 0x00008000 +#define EN_LmRRDY 0x00004000 +#define EN_LmHOLD 0x00002000 +#define EN_LmROK 0x00001000 +#define EN_LmRIP 0x00000800 +#define EN_LmCRBLK 0x00000400 +#define EN_LmACK 0x00000200 +#define EN_LmNAK 0x00000100 +#define EN_LmHARDRST 0x00000080 +#define EN_LmERROR 0x00000040 +#define EN_LmRERR 0x00000020 +#define EN_LmPMREQP 0x00000010 +#define EN_LmPMREQS 0x00000008 +#define EN_LmPMACK 0x00000004 +#define EN_LmPMNAK 0x00000002 +#define EN_LmDMAT 0x00000001 + +#define LmPRIMSTAT1EN_MASK (EN_LmHARDRST | \ + EN_LmSYNCSRST | \ + EN_LmPMREQP | EN_LmPMREQS | \ + EN_LmPMACK | EN_LmPMNAK) + +#define LmSMSTATE(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xE8) + +#define LmSMSTATEBRK(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xEC) + +#define LmSMDBGCTL(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xF0) + + +/* + * LmSEQ CIO Bus Mode 3 Register. + * Mode 3: Configuration and Setup, IOP Context SCB. + */ +#define LmM3SATATIMER(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x48) + +#define LmM3INTVEC0(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x90) + +#define LmM3INTVEC1(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x92) + +#define LmM3INTVEC2(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x94) + +#define LmM3INTVEC3(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x96) + +#define LmM3INTVEC4(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x98) + +#define LmM3INTVEC5(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9A) + +#define LmM3INTVEC6(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9C) + +#define LmM3INTVEC7(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0x9E) + +#define LmM3INTVEC8(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA4) + +#define LmM3INTVEC9(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xA6) + +#define LmM3INTVEC10(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB0) + +#define LmM3FRMGAP(LinkNum) LmSEQ_PHY_REG(3, LinkNum, 0xB4) + +#define LmBITL_TIMER(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA2) + +#define LmWWN(LinkNum) LmSEQ_PHY_REG(0, LinkNum, 0xA8) + + +/* + * LmSEQ CIO Bus Mode 5 Registers. + * Mode 5: Phy/OOB Control and Status. + */ +#define LmSEQ_OOB_REG(phy_id, reg) LmSEQ_PHY_REG(5, (phy_id), (reg)) + +#define OOB_BFLTR 0x100 + +#define BFLTR_THR_MASK 0xF0 +#define BFLTR_TC_MASK 0x0F + +#define OOB_INIT_MIN 0x102 + +#define OOB_INIT_MAX 0x104 + +#define OOB_INIT_NEG 0x106 + +#define OOB_SAS_MIN 0x108 + +#define OOB_SAS_MAX 0x10A + +#define OOB_SAS_NEG 0x10C + +#define OOB_WAKE_MIN 0x10E + +#define OOB_WAKE_MAX 0x110 + +#define OOB_WAKE_NEG 0x112 + +#define OOB_IDLE_MAX 0x114 + +#define OOB_BURST_MAX 0x116 + +#define OOB_DATA_KBITS 0x126 + +#define OOB_ALIGN_0_DATA 0x12C + +#define OOB_ALIGN_1_DATA 0x130 + +#define D10_2_DATA_k 0x00 +#define SYNC_DATA_k 0x02 +#define ALIGN_1_DATA_k 0x04 +#define ALIGN_0_DATA_k 0x08 +#define BURST_DATA_k 0x10 + +#define OOB_PHY_RESET_COUNT 0x13C + +#define OOB_SIG_GEN 0x140 + +#define START_OOB 0x80 +#define START_DWS 0x40 +#define ALIGN_CNT3 0x30 +#define ALIGN_CNT2 0x20 +#define ALIGN_CNT1 0x10 +#define ALIGN_CNT4 0x00 +#define STOP_DWS 0x08 +#define SEND_COMSAS 0x04 +#define SEND_COMINIT 0x02 +#define SEND_COMWAKE 0x01 + +#define OOB_XMIT 0x141 + +#define TX_ENABLE 0x80 +#define XMIT_OOB_BURST 0x10 +#define XMIT_D10_2 0x08 +#define XMIT_SYNC 0x04 +#define XMIT_ALIGN_1 0x02 +#define XMIT_ALIGN_0 0x01 + +#define FUNCTION_MASK 0x142 + +#define SAS_MODE_DIS 0x80 +#define SATA_MODE_DIS 0x40 +#define SPINUP_HOLD_DIS 0x20 +#define HOT_PLUG_DIS 0x10 +#define SATA_PS_DIS 0x08 +#define FUNCTION_MASK_DEFAULT (SPINUP_HOLD_DIS | SATA_PS_DIS) + +#define OOB_MODE 0x143 + +#define SAS_MODE 0x80 +#define SATA_MODE 0x40 +#define SLOW_CLK 0x20 +#define FORCE_XMIT_15 0x08 +#define PHY_SPEED_60 0x04 +#define PHY_SPEED_30 0x02 +#define PHY_SPEED_15 0x01 + +#define CURRENT_STATUS 0x144 + +#define CURRENT_OOB_DONE 0x80 +#define CURRENT_LOSS_OF_SIGNAL 0x40 +#define CURRENT_SPINUP_HOLD 0x20 +#define CURRENT_HOT_PLUG_CNCT 0x10 +#define CURRENT_GTO_TIMEOUT 0x08 +#define CURRENT_OOB_TIMEOUT 0x04 +#define CURRENT_DEVICE_PRESENT 0x02 +#define CURRENT_OOB_ERROR 0x01 + +#define CURRENT_OOB1_ERROR (CURRENT_HOT_PLUG_CNCT | \ + CURRENT_GTO_TIMEOUT) + +#define CURRENT_OOB2_ERROR (CURRENT_HOT_PLUG_CNCT | \ + CURRENT_OOB_ERROR) + +#define DEVICE_ADDED_W_CNT (CURRENT_OOB_DONE | \ + CURRENT_HOT_PLUG_CNCT | \ + CURRENT_DEVICE_PRESENT) + +#define DEVICE_ADDED_WO_CNT (CURRENT_OOB_DONE | \ + CURRENT_DEVICE_PRESENT) + +#define DEVICE_REMOVED CURRENT_LOSS_OF_SIGNAL + +#define CURRENT_PHY_MASK (CURRENT_OOB_DONE | \ + CURRENT_LOSS_OF_SIGNAL | \ + CURRENT_SPINUP_HOLD | \ + CURRENT_HOT_PLUG_CNCT | \ + CURRENT_GTO_TIMEOUT | \ + CURRENT_DEVICE_PRESENT | \ + CURRENT_OOB_ERROR ) + +#define CURRENT_ERR_MASK (CURRENT_LOSS_OF_SIGNAL | \ + CURRENT_GTO_TIMEOUT | \ + CURRENT_OOB_TIMEOUT | \ + CURRENT_OOB_ERROR ) + +#define SPEED_MASK 0x145 + +#define SATA_SPEED_30_DIS 0x10 +#define SATA_SPEED_15_DIS 0x08 +#define SAS_SPEED_60_DIS 0x04 +#define SAS_SPEED_30_DIS 0x02 +#define SAS_SPEED_15_DIS 0x01 +#define SAS_SPEED_MASK_DEFAULT 0x00 + +#define OOB_TIMER_ENABLE 0x14D + +#define HOT_PLUG_EN 0x80 +#define RCD_EN 0x40 +#define COMTIMER_EN 0x20 +#define SNTT_EN 0x10 +#define SNLT_EN 0x04 +#define SNWT_EN 0x02 +#define ALIGN_EN 0x01 + +#define OOB_STATUS 0x14E + +#define OOB_DONE 0x80 +#define LOSS_OF_SIGNAL 0x40 /* ro */ +#define SPINUP_HOLD 0x20 +#define HOT_PLUG_CNCT 0x10 /* ro */ +#define GTO_TIMEOUT 0x08 /* ro */ +#define OOB_TIMEOUT 0x04 /* ro */ +#define DEVICE_PRESENT 0x02 /* ro */ +#define OOB_ERROR 0x01 /* ro */ + +#define OOB_STATUS_ERROR_MASK (LOSS_OF_SIGNAL | GTO_TIMEOUT | \ + OOB_TIMEOUT | OOB_ERROR) + +#define OOB_STATUS_CLEAR 0x14F + +#define OOB_DONE_CLR 0x80 +#define LOSS_OF_SIGNAL_CLR 0x40 +#define SPINUP_HOLD_CLR 0x20 +#define HOT_PLUG_CNCT_CLR 0x10 +#define GTO_TIMEOUT_CLR 0x08 +#define OOB_TIMEOUT_CLR 0x04 +#define OOB_ERROR_CLR 0x01 + +#define HOT_PLUG_DELAY 0x150 +/* In 5 ms units. 20 = 100 ms. */ +#define HOTPLUG_DELAY_TIMEOUT 20 + + +#define INT_ENABLE_2 0x15A + +#define OOB_DONE_EN 0x80 +#define LOSS_OF_SIGNAL_EN 0x40 +#define SPINUP_HOLD_EN 0x20 +#define HOT_PLUG_CNCT_EN 0x10 +#define GTO_TIMEOUT_EN 0x08 +#define OOB_TIMEOUT_EN 0x04 +#define DEVICE_PRESENT_EN 0x02 +#define OOB_ERROR_EN 0x01 + +#define PHY_CONTROL_0 0x160 + +#define PHY_LOWPWREN_TX 0x80 +#define PHY_LOWPWREN_RX 0x40 +#define SPARE_REG_160_B5 0x20 +#define OFFSET_CANCEL_RX 0x10 + +/* bits 3:2 */ +#define PHY_RXCOMCENTER_60V 0x00 +#define PHY_RXCOMCENTER_70V 0x04 +#define PHY_RXCOMCENTER_80V 0x08 +#define PHY_RXCOMCENTER_90V 0x0C +#define PHY_RXCOMCENTER_MASK 0x0C + +#define PHY_RESET 0x02 +#define SAS_DEFAULT_SEL 0x01 + +#define PHY_CONTROL_1 0x161 + +/* bits 2:0 */ +#define SATA_PHY_DETLEVEL_50mv 0x00 +#define SATA_PHY_DETLEVEL_75mv 0x01 +#define SATA_PHY_DETLEVEL_100mv 0x02 +#define SATA_PHY_DETLEVEL_125mv 0x03 +#define SATA_PHY_DETLEVEL_150mv 0x04 +#define SATA_PHY_DETLEVEL_175mv 0x05 +#define SATA_PHY_DETLEVEL_200mv 0x06 +#define SATA_PHY_DETLEVEL_225mv 0x07 +#define SATA_PHY_DETLEVEL_MASK 0x07 + +/* bits 5:3 */ +#define SAS_PHY_DETLEVEL_50mv 0x00 +#define SAS_PHY_DETLEVEL_75mv 0x08 +#define SAS_PHY_DETLEVEL_100mv 0x10 +#define SAS_PHY_DETLEVEL_125mv 0x11 +#define SAS_PHY_DETLEVEL_150mv 0x20 +#define SAS_PHY_DETLEVEL_175mv 0x21 +#define SAS_PHY_DETLEVEL_200mv 0x30 +#define SAS_PHY_DETLEVEL_225mv 0x31 +#define SAS_PHY_DETLEVEL_MASK 0x38 + +#define PHY_CONTROL_2 0x162 + +/* bits 7:5 */ +#define SATA_PHY_DRV_400mv 0x00 +#define SATA_PHY_DRV_450mv 0x20 +#define SATA_PHY_DRV_500mv 0x40 +#define SATA_PHY_DRV_550mv 0x60 +#define SATA_PHY_DRV_600mv 0x80 +#define SATA_PHY_DRV_650mv 0xA0 +#define SATA_PHY_DRV_725mv 0xC0 +#define SATA_PHY_DRV_800mv 0xE0 +#define SATA_PHY_DRV_MASK 0xE0 + +/* bits 4:3 */ +#define SATA_PREEMP_0 0x00 +#define SATA_PREEMP_1 0x08 +#define SATA_PREEMP_2 0x10 +#define SATA_PREEMP_3 0x18 +#define SATA_PREEMP_MASK 0x18 + +#define SATA_CMSH1P5 0x04 + +/* bits 1:0 */ +#define SATA_SLEW_0 0x00 +#define SATA_SLEW_1 0x01 +#define SATA_SLEW_2 0x02 +#define SATA_SLEW_3 0x03 +#define SATA_SLEW_MASK 0x03 + +#define PHY_CONTROL_3 0x163 + +/* bits 7:5 */ +#define SAS_PHY_DRV_400mv 0x00 +#define SAS_PHY_DRV_450mv 0x20 +#define SAS_PHY_DRV_500mv 0x40 +#define SAS_PHY_DRV_550mv 0x60 +#define SAS_PHY_DRV_600mv 0x80 +#define SAS_PHY_DRV_650mv 0xA0 +#define SAS_PHY_DRV_725mv 0xC0 +#define SAS_PHY_DRV_800mv 0xE0 +#define SAS_PHY_DRV_MASK 0xE0 + +/* bits 4:3 */ +#define SAS_PREEMP_0 0x00 +#define SAS_PREEMP_1 0x08 +#define SAS_PREEMP_2 0x10 +#define SAS_PREEMP_3 0x18 +#define SAS_PREEMP_MASK 0x18 + +#define SAS_CMSH1P5 0x04 + +/* bits 1:0 */ +#define SAS_SLEW_0 0x00 +#define SAS_SLEW_1 0x01 +#define SAS_SLEW_2 0x02 +#define SAS_SLEW_3 0x03 +#define SAS_SLEW_MASK 0x03 + +#define PHY_CONTROL_4 0x168 + +#define PHY_DONE_CAL_TX 0x80 +#define PHY_DONE_CAL_RX 0x40 +#define RX_TERM_LOAD_DIS 0x20 +#define TX_TERM_LOAD_DIS 0x10 +#define AUTO_TERM_CAL_DIS 0x08 +#define PHY_SIGDET_FLTR_EN 0x04 +#define OSC_FREQ 0x02 +#define PHY_START_CAL 0x01 + +/* + * HST_PCIX2 Registers, Addresss Range: (0x00-0xFC) + */ +#define PCIX_REG_BASE_ADR 0xB8040000 + +#define PCIC_VENDOR_ID 0x00 + +#define PCIC_DEVICE_ID 0x02 + +#define PCIC_COMMAND 0x04 + +#define INT_DIS 0x0400 +#define FBB_EN 0x0200 /* ro */ +#define SERR_EN 0x0100 +#define STEP_EN 0x0080 /* ro */ +#define PERR_EN 0x0040 +#define VGA_EN 0x0020 /* ro */ +#define MWI_EN 0x0010 +#define SPC_EN 0x0008 +#define MST_EN 0x0004 +#define MEM_EN 0x0002 +#define IO_EN 0x0001 + +#define PCIC_STATUS 0x06 + +#define PERR_DET 0x8000 +#define SERR_GEN 0x4000 +#define MABT_DET 0x2000 +#define TABT_DET 0x1000 +#define TABT_GEN 0x0800 +#define DPERR_DET 0x0100 +#define CAP_LIST 0x0010 +#define INT_STAT 0x0008 + +#define PCIC_DEVREV_ID 0x08 + +#define PCIC_CLASS_CODE 0x09 + +#define PCIC_CACHELINE_SIZE 0x0C + +#define PCIC_MBAR0 0x10 + +#define PCIC_MBAR0_OFFSET 0 + +#define PCIC_MBAR1 0x18 + +#define PCIC_MBAR1_OFFSET 2 + +#define PCIC_IOBAR 0x20 + +#define PCIC_IOBAR_OFFSET 4 + +#define PCIC_SUBVENDOR_ID 0x2C + +#define PCIC_SUBSYTEM_ID 0x2E + +#define PCIX_STATUS 0x44 +#define RCV_SCE 0x20000000 +#define UNEXP_SC 0x00080000 +#define SC_DISCARD 0x00040000 + +#define ECC_CTRL_STAT 0x48 +#define UNCOR_ECCERR 0x00000008 + +#define PCIC_PM_CSR 0x5C + +#define PWR_STATE_D0 0 +#define PWR_STATE_D1 1 /* not supported */ +#define PWR_STATE_D2 2 /* not supported */ +#define PWR_STATE_D3 3 + +#define PCIC_BASE1 0x6C /* internal use only */ + +#define BASE1_RSVD 0xFFFFFFF8 + +#define PCIC_BASEA 0x70 /* internal use only */ + +#define BASEA_RSVD 0xFFFFFFC0 +#define BASEA_START 0 + +#define PCIC_BASEB 0x74 /* internal use only */ + +#define BASEB_RSVD 0xFFFFFF80 +#define BASEB_IOMAP_MASK 0x7F +#define BASEB_START 0x80 + +#define PCIC_BASEC 0x78 /* internal use only */ + +#define BASEC_RSVD 0xFFFFFFFC +#define BASEC_MASK 0x03 +#define BASEC_START 0x58 + +#define PCIC_MBAR_KEY 0x7C /* internal use only */ + +#define MBAR_KEY_MASK 0xFFFFFFFF + +#define PCIC_HSTPCIX_CNTRL 0xA0 + +#define REWIND_DIS 0x0800 +#define SC_TMR_DIS 0x04000000 + +#define PCIC_MBAR0_MASK 0xA8 +#define PCIC_MBAR0_SIZE_MASK 0x1FFFE000 +#define PCIC_MBAR0_SIZE_SHIFT 13 +#define PCIC_MBAR0_SIZE(val) \ + (((val) & PCIC_MBAR0_SIZE_MASK) >> PCIC_MBAR0_SIZE_SHIFT) + +#define PCIC_FLASH_MBAR 0xB8 + +#define PCIC_INTRPT_STAT 0xD4 + +#define PCIC_TP_CTRL 0xFC + +/* + * EXSI Registers, Addresss Range: (0x00-0xFC) + */ +#define EXSI_REG_BASE_ADR REG_BASE_ADDR_EXSI + +#define EXSICNFGR (EXSI_REG_BASE_ADR + 0x00) + +#define OCMINITIALIZED 0x80000000 +#define ASIEN 0x00400000 +#define HCMODE 0x00200000 +#define PCIDEF 0x00100000 +#define COMSTOCK 0x00080000 +#define SEEPROMEND 0x00040000 +#define MSTTIMEN 0x00020000 +#define XREGEX 0x00000200 +#define NVRAMW 0x00000100 +#define NVRAMEX 0x00000080 +#define SRAMW 0x00000040 +#define SRAMEX 0x00000020 +#define FLASHW 0x00000010 +#define FLASHEX 0x00000008 +#define SEEPROMCFG 0x00000004 +#define SEEPROMTYP 0x00000002 +#define SEEPROMEX 0x00000001 + + +#define EXSICNTRLR (EXSI_REG_BASE_ADR + 0x04) + +#define MODINT_EN 0x00000001 + + +#define PMSTATR (EXSI_REG_BASE_ADR + 0x10) + +#define FLASHRST 0x00000002 +#define FLASHRDY 0x00000001 + + +#define FLCNFGR (EXSI_REG_BASE_ADR + 0x14) + +#define FLWEH_MASK 0x30000000 +#define FLWESU_MASK 0x0C000000 +#define FLWEPW_MASK 0x03F00000 +#define FLOEH_MASK 0x000C0000 +#define FLOESU_MASK 0x00030000 +#define FLOEPW_MASK 0x0000FC00 +#define FLCSH_MASK 0x00000300 +#define FLCSSU_MASK 0x000000C0 +#define FLCSPW_MASK 0x0000003F + +#define SRCNFGR (EXSI_REG_BASE_ADR + 0x18) + +#define SRWEH_MASK 0x30000000 +#define SRWESU_MASK 0x0C000000 +#define SRWEPW_MASK 0x03F00000 + +#define SROEH_MASK 0x000C0000 +#define SROESU_MASK 0x00030000 +#define SROEPW_MASK 0x0000FC00 +#define SRCSH_MASK 0x00000300 +#define SRCSSU_MASK 0x000000C0 +#define SRCSPW_MASK 0x0000003F + +#define NVCNFGR (EXSI_REG_BASE_ADR + 0x1C) + +#define NVWEH_MASK 0x30000000 +#define NVWESU_MASK 0x0C000000 +#define NVWEPW_MASK 0x03F00000 +#define NVOEH_MASK 0x000C0000 +#define NVOESU_MASK 0x00030000 +#define NVOEPW_MASK 0x0000FC00 +#define NVCSH_MASK 0x00000300 +#define NVCSSU_MASK 0x000000C0 +#define NVCSPW_MASK 0x0000003F + +#define XRCNFGR (EXSI_REG_BASE_ADR + 0x20) + +#define XRWEH_MASK 0x30000000 +#define XRWESU_MASK 0x0C000000 +#define XRWEPW_MASK 0x03F00000 +#define XROEH_MASK 0x000C0000 +#define XROESU_MASK 0x00030000 +#define XROEPW_MASK 0x0000FC00 +#define XRCSH_MASK 0x00000300 +#define XRCSSU_MASK 0x000000C0 +#define XRCSPW_MASK 0x0000003F + +#define XREGADDR (EXSI_REG_BASE_ADR + 0x24) + +#define XRADDRINCEN 0x80000000 +#define XREGADD_MASK 0x007FFFFF + + +#define XREGDATAR (EXSI_REG_BASE_ADR + 0x28) + +#define XREGDATA_MASK 0x0000FFFF + +#define GPIOOER (EXSI_REG_BASE_ADR + 0x40) + +#define GPIOODENR (EXSI_REG_BASE_ADR + 0x44) + +#define GPIOINVR (EXSI_REG_BASE_ADR + 0x48) + +#define GPIODATAOR (EXSI_REG_BASE_ADR + 0x4C) + +#define GPIODATAIR (EXSI_REG_BASE_ADR + 0x50) + +#define GPIOCNFGR (EXSI_REG_BASE_ADR + 0x54) + +#define GPIO_EXTSRC 0x00000001 + +#define SCNTRLR (EXSI_REG_BASE_ADR + 0xA0) + +#define SXFERDONE 0x00000100 +#define SXFERCNT_MASK 0x000000E0 +#define SCMDTYP_MASK 0x0000001C +#define SXFERSTART 0x00000002 +#define SXFEREN 0x00000001 + +#define SRATER (EXSI_REG_BASE_ADR + 0xA4) + +#define SADDRR (EXSI_REG_BASE_ADR + 0xA8) + +#define SADDR_MASK 0x0000FFFF + +#define SDATAOR (EXSI_REG_BASE_ADR + 0xAC) + +#define SDATAOR0 (EXSI_REG_BASE_ADR + 0xAC) +#define SDATAOR1 (EXSI_REG_BASE_ADR + 0xAD) +#define SDATAOR2 (EXSI_REG_BASE_ADR + 0xAE) +#define SDATAOR3 (EXSI_REG_BASE_ADR + 0xAF) + +#define SDATAIR (EXSI_REG_BASE_ADR + 0xB0) + +#define SDATAIR0 (EXSI_REG_BASE_ADR + 0xB0) +#define SDATAIR1 (EXSI_REG_BASE_ADR + 0xB1) +#define SDATAIR2 (EXSI_REG_BASE_ADR + 0xB2) +#define SDATAIR3 (EXSI_REG_BASE_ADR + 0xB3) + +#define ASISTAT0R (EXSI_REG_BASE_ADR + 0xD0) +#define ASIFMTERR 0x00000400 +#define ASISEECHKERR 0x00000200 +#define ASIERR 0x00000100 + +#define ASISTAT1R (EXSI_REG_BASE_ADR + 0xD4) +#define CHECKSUM_MASK 0x0000FFFF + +#define ASIERRADDR (EXSI_REG_BASE_ADR + 0xD8) +#define ASIERRDATAR (EXSI_REG_BASE_ADR + 0xDC) +#define ASIERRSTATR (EXSI_REG_BASE_ADR + 0xE0) +#define CPI2ASIBYTECNT_MASK 0x00070000 +#define CPI2ASIBYTEEN_MASK 0x0000F000 +#define CPI2ASITARGERR_MASK 0x00000F00 +#define CPI2ASITARGMID_MASK 0x000000F0 +#define CPI2ASIMSTERR_MASK 0x0000000F + +/* + * XSRAM, External SRAM (DWord and any BE pattern accessible) + */ +#define XSRAM_REG_BASE_ADDR 0xB8100000 +#define XSRAM_SIZE 0x100000 + +/* + * NVRAM Registers, Address Range: (0x00000 - 0x3FFFF). + */ +#define NVRAM_REG_BASE_ADR 0xBF800000 +#define NVRAM_MAX_BASE_ADR 0x003FFFFF + +/* OCM base address */ +#define OCM_BASE_ADDR 0xA0000000 +#define OCM_MAX_SIZE 0x20000 + +/* + * Sequencers (Central and Link) Scratch RAM page definitions. + */ + +/* + * The Central Management Sequencer (CSEQ) Scratch Memory is a 1024 + * byte memory. It is dword accessible and has byte parity + * protection. The CSEQ accesses it in 32 byte windows, either as mode + * dependent or mode independent memory. Each mode has 96 bytes, + * (three 32 byte pages 0-2, not contiguous), leaving 128 bytes of + * Mode Independent memory (four 32 byte pages 3-7). Note that mode + * dependent scratch memory, Mode 8, page 0-3 overlaps mode + * independent scratch memory, pages 0-3. + * - 896 bytes of mode dependent scratch, 96 bytes per Modes 0-7, and + * 128 bytes in mode 8, + * - 259 bytes of mode independent scratch, common to modes 0-15. + * + * Sequencer scratch RAM is 1024 bytes. This scratch memory is + * divided into mode dependent and mode independent scratch with this + * memory further subdivided into pages of size 32 bytes. There are 5 + * pages (160 bytes) of mode independent scratch and 3 pages of + * dependent scratch memory for modes 0-7 (768 bytes). Mode 8 pages + * 0-2 dependent scratch overlap with pages 0-2 of mode independent + * scratch memory. + * + * The host accesses this scratch in a different manner from the + * central sequencer. The sequencer has to use CSEQ registers CSCRPAGE + * and CMnSCRPAGE to access the scratch memory. A flat mapping of the + * scratch memory is avaliable for software convenience and to prevent + * corruption while the sequencer is running. This memory is mapped + * onto addresses 800h - BFFh, total of 400h bytes. + * + * These addresses are mapped as follows: + * + * 800h-83Fh Mode Dependent Scratch Mode 0 Pages 0-1 + * 840h-87Fh Mode Dependent Scratch Mode 1 Pages 0-1 + * 880h-8BFh Mode Dependent Scratch Mode 2 Pages 0-1 + * 8C0h-8FFh Mode Dependent Scratch Mode 3 Pages 0-1 + * 900h-93Fh Mode Dependent Scratch Mode 4 Pages 0-1 + * 940h-97Fh Mode Dependent Scratch Mode 5 Pages 0-1 + * 980h-9BFh Mode Dependent Scratch Mode 6 Pages 0-1 + * 9C0h-9FFh Mode Dependent Scratch Mode 7 Pages 0-1 + * A00h-A5Fh Mode Dependent Scratch Mode 8 Pages 0-2 + * Mode Independent Scratch Pages 0-2 + * A60h-A7Fh Mode Dependent Scratch Mode 8 Page 3 + * Mode Independent Scratch Page 3 + * A80h-AFFh Mode Independent Scratch Pages 4-7 + * B00h-B1Fh Mode Dependent Scratch Mode 0 Page 2 + * B20h-B3Fh Mode Dependent Scratch Mode 1 Page 2 + * B40h-B5Fh Mode Dependent Scratch Mode 2 Page 2 + * B60h-B7Fh Mode Dependent Scratch Mode 3 Page 2 + * B80h-B9Fh Mode Dependent Scratch Mode 4 Page 2 + * BA0h-BBFh Mode Dependent Scratch Mode 5 Page 2 + * BC0h-BDFh Mode Dependent Scratch Mode 6 Page 2 + * BE0h-BFFh Mode Dependent Scratch Mode 7 Page 2 + */ + +/* General macros */ +#define CSEQ_PAGE_SIZE 32 /* Scratch page size (in bytes) */ + +/* All macros start with offsets from base + 0x800 (CMAPPEDSCR). + * Mode dependent scratch page 0, mode 0. + * For modes 1-7 you have to do arithmetic. */ +#define CSEQ_LRM_SAVE_SINDEX (CMAPPEDSCR + 0x0000) +#define CSEQ_LRM_SAVE_SCBPTR (CMAPPEDSCR + 0x0002) +#define CSEQ_Q_LINK_HEAD (CMAPPEDSCR + 0x0004) +#define CSEQ_Q_LINK_TAIL (CMAPPEDSCR + 0x0006) +#define CSEQ_LRM_SAVE_SCRPAGE (CMAPPEDSCR + 0x0008) + +/* Mode dependent scratch page 0 mode 8 macros. */ +#define CSEQ_RET_ADDR (CMAPPEDSCR + 0x0200) +#define CSEQ_RET_SCBPTR (CMAPPEDSCR + 0x0202) +#define CSEQ_SAVE_SCBPTR (CMAPPEDSCR + 0x0204) +#define CSEQ_EMPTY_TRANS_CTX (CMAPPEDSCR + 0x0206) +#define CSEQ_RESP_LEN (CMAPPEDSCR + 0x0208) +#define CSEQ_TMF_SCBPTR (CMAPPEDSCR + 0x020A) +#define CSEQ_GLOBAL_PREV_SCB (CMAPPEDSCR + 0x020C) +#define CSEQ_GLOBAL_HEAD (CMAPPEDSCR + 0x020E) +#define CSEQ_CLEAR_LU_HEAD (CMAPPEDSCR + 0x0210) +#define CSEQ_TMF_OPCODE (CMAPPEDSCR + 0x0212) +#define CSEQ_SCRATCH_FLAGS (CMAPPEDSCR + 0x0213) +#define CSEQ_HSB_SITE (CMAPPEDSCR + 0x021A) +#define CSEQ_FIRST_INV_SCB_SITE (CMAPPEDSCR + 0x021C) +#define CSEQ_FIRST_INV_DDB_SITE (CMAPPEDSCR + 0x021E) + +/* Mode dependent scratch page 1 mode 8 macros. */ +#define CSEQ_LUN_TO_CLEAR (CMAPPEDSCR + 0x0220) +#define CSEQ_LUN_TO_CHECK (CMAPPEDSCR + 0x0228) + +/* Mode dependent scratch page 2 mode 8 macros */ +#define CSEQ_HQ_NEW_POINTER (CMAPPEDSCR + 0x0240) +#define CSEQ_HQ_DONE_BASE (CMAPPEDSCR + 0x0248) +#define CSEQ_HQ_DONE_POINTER (CMAPPEDSCR + 0x0250) +#define CSEQ_HQ_DONE_PASS (CMAPPEDSCR + 0x0254) + +/* Mode independent scratch page 4 macros. */ +#define CSEQ_Q_EXE_HEAD (CMAPPEDSCR + 0x0280) +#define CSEQ_Q_EXE_TAIL (CMAPPEDSCR + 0x0282) +#define CSEQ_Q_DONE_HEAD (CMAPPEDSCR + 0x0284) +#define CSEQ_Q_DONE_TAIL (CMAPPEDSCR + 0x0286) +#define CSEQ_Q_SEND_HEAD (CMAPPEDSCR + 0x0288) +#define CSEQ_Q_SEND_TAIL (CMAPPEDSCR + 0x028A) +#define CSEQ_Q_DMA2CHIM_HEAD (CMAPPEDSCR + 0x028C) +#define CSEQ_Q_DMA2CHIM_TAIL (CMAPPEDSCR + 0x028E) +#define CSEQ_Q_COPY_HEAD (CMAPPEDSCR + 0x0290) +#define CSEQ_Q_COPY_TAIL (CMAPPEDSCR + 0x0292) +#define CSEQ_REG0 (CMAPPEDSCR + 0x0294) +#define CSEQ_REG1 (CMAPPEDSCR + 0x0296) +#define CSEQ_REG2 (CMAPPEDSCR + 0x0298) +#define CSEQ_LINK_CTL_Q_MAP (CMAPPEDSCR + 0x029C) +#define CSEQ_MAX_CSEQ_MODE (CMAPPEDSCR + 0x029D) +#define CSEQ_FREE_LIST_HACK_COUNT (CMAPPEDSCR + 0x029E) + +/* Mode independent scratch page 5 macros. */ +#define CSEQ_EST_NEXUS_REQ_QUEUE (CMAPPEDSCR + 0x02A0) +#define CSEQ_EST_NEXUS_REQ_COUNT (CMAPPEDSCR + 0x02A8) +#define CSEQ_Q_EST_NEXUS_HEAD (CMAPPEDSCR + 0x02B0) +#define CSEQ_Q_EST_NEXUS_TAIL (CMAPPEDSCR + 0x02B2) +#define CSEQ_NEED_EST_NEXUS_SCB (CMAPPEDSCR + 0x02B4) +#define CSEQ_EST_NEXUS_REQ_HEAD (CMAPPEDSCR + 0x02B6) +#define CSEQ_EST_NEXUS_REQ_TAIL (CMAPPEDSCR + 0x02B7) +#define CSEQ_EST_NEXUS_SCB_OFFSET (CMAPPEDSCR + 0x02B8) + +/* Mode independent scratch page 6 macros. */ +#define CSEQ_INT_ROUT_RET_ADDR0 (CMAPPEDSCR + 0x02C0) +#define CSEQ_INT_ROUT_RET_ADDR1 (CMAPPEDSCR + 0x02C2) +#define CSEQ_INT_ROUT_SCBPTR (CMAPPEDSCR + 0x02C4) +#define CSEQ_INT_ROUT_MODE (CMAPPEDSCR + 0x02C6) +#define CSEQ_ISR_SCRATCH_FLAGS (CMAPPEDSCR + 0x02C7) +#define CSEQ_ISR_SAVE_SINDEX (CMAPPEDSCR + 0x02C8) +#define CSEQ_ISR_SAVE_DINDEX (CMAPPEDSCR + 0x02CA) +#define CSEQ_Q_MONIRTT_HEAD (CMAPPEDSCR + 0x02D0) +#define CSEQ_Q_MONIRTT_TAIL (CMAPPEDSCR + 0x02D2) +#define CSEQ_FREE_SCB_MASK (CMAPPEDSCR + 0x02D5) +#define CSEQ_BUILTIN_FREE_SCB_HEAD (CMAPPEDSCR + 0x02D6) +#define CSEQ_BUILTIN_FREE_SCB_TAIL (CMAPPEDSCR + 0x02D8) +#define CSEQ_EXTENDED_FREE_SCB_HEAD (CMAPPEDSCR + 0x02DA) +#define CSEQ_EXTENDED_FREE_SCB_TAIL (CMAPPEDSCR + 0x02DC) + +/* Mode independent scratch page 7 macros. */ +#define CSEQ_EMPTY_REQ_QUEUE (CMAPPEDSCR + 0x02E0) +#define CSEQ_EMPTY_REQ_COUNT (CMAPPEDSCR + 0x02E8) +#define CSEQ_Q_EMPTY_HEAD (CMAPPEDSCR + 0x02F0) +#define CSEQ_Q_EMPTY_TAIL (CMAPPEDSCR + 0x02F2) +#define CSEQ_NEED_EMPTY_SCB (CMAPPEDSCR + 0x02F4) +#define CSEQ_EMPTY_REQ_HEAD (CMAPPEDSCR + 0x02F6) +#define CSEQ_EMPTY_REQ_TAIL (CMAPPEDSCR + 0x02F7) +#define CSEQ_EMPTY_SCB_OFFSET (CMAPPEDSCR + 0x02F8) +#define CSEQ_PRIMITIVE_DATA (CMAPPEDSCR + 0x02FA) +#define CSEQ_TIMEOUT_CONST (CMAPPEDSCR + 0x02FC) + +/*************************************************************************** +* Link m Sequencer scratch RAM is 512 bytes. +* This scratch memory is divided into mode dependent and mode +* independent scratch with this memory further subdivided into +* pages of size 32 bytes. There are 4 pages (128 bytes) of +* mode independent scratch and 4 pages of dependent scratch +* memory for modes 0-2 (384 bytes). +* +* The host accesses this scratch in a different manner from the +* link sequencer. The sequencer has to use LSEQ registers +* LmSCRPAGE and LmMnSCRPAGE to access the scratch memory. A flat +* mapping of the scratch memory is avaliable for software +* convenience and to prevent corruption while the sequencer is +* running. This memory is mapped onto addresses 800h - 9FFh. +* +* These addresses are mapped as follows: +* +* 800h-85Fh Mode Dependent Scratch Mode 0 Pages 0-2 +* 860h-87Fh Mode Dependent Scratch Mode 0 Page 3 +* Mode Dependent Scratch Mode 5 Page 0 +* 880h-8DFh Mode Dependent Scratch Mode 1 Pages 0-2 +* 8E0h-8FFh Mode Dependent Scratch Mode 1 Page 3 +* Mode Dependent Scratch Mode 5 Page 1 +* 900h-95Fh Mode Dependent Scratch Mode 2 Pages 0-2 +* 960h-97Fh Mode Dependent Scratch Mode 2 Page 3 +* Mode Dependent Scratch Mode 5 Page 2 +* 980h-9DFh Mode Independent Scratch Pages 0-3 +* 9E0h-9FFh Mode Independent Scratch Page 3 +* Mode Dependent Scratch Mode 5 Page 3 +* +****************************************************************************/ +/* General macros */ +#define LSEQ_MODE_SCRATCH_SIZE 0x80 /* Size of scratch RAM per mode */ +#define LSEQ_PAGE_SIZE 0x20 /* Scratch page size (in bytes) */ +#define LSEQ_MODE5_PAGE0_OFFSET 0x60 + +/* Common mode dependent scratch page 0 macros for modes 0,1,2, and 5 */ +/* Indexed using LSEQ_MODE_SCRATCH_SIZE * mode, for modes 0,1,2. */ +#define LmSEQ_RET_ADDR(LinkNum) (LmSCRATCH(LinkNum) + 0x0000) +#define LmSEQ_REG0_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0002) +#define LmSEQ_MODE_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0004) + +/* Mode flag macros (byte 0) */ +#define SAS_SAVECTX_OCCURRED 0x80 +#define SAS_OOBSVC_OCCURRED 0x40 +#define SAS_OOB_DEVICE_PRESENT 0x20 +#define SAS_CFGHDR_OCCURRED 0x10 +#define SAS_RCV_INTS_ARE_DISABLED 0x08 +#define SAS_OOB_HOT_PLUG_CNCT 0x04 +#define SAS_AWAIT_OPEN_CONNECTION 0x02 +#define SAS_CFGCMPLT_OCCURRED 0x01 + +/* Mode flag macros (byte 1) */ +#define SAS_RLSSCB_OCCURRED 0x80 +#define SAS_FORCED_HEADER_MISS 0x40 + +#define LmSEQ_RET_ADDR2(LinkNum) (LmSCRATCH(LinkNum) + 0x0006) +#define LmSEQ_RET_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0008) +#define LmSEQ_OPCODE_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000B) +#define LmSEQ_DATA_TO_CSEQ(LinkNum) (LmSCRATCH(LinkNum) + 0x000C) + +/* Mode dependent scratch page 0 macros for mode 0 (non-common) */ +/* Absolute offsets */ +#define LmSEQ_FIRST_INV_DDB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x000E) +#define LmSEQ_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0010) +#define LmSEQ_RESP_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x0012) +#define LmSEQ_FIRST_INV_SCB_SITE(LinkNum) (LmSCRATCH(LinkNum) + 0x0014) +#define LmSEQ_INTEN_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0016) +#define LmSEQ_LINK_RST_FRM_LEN(LinkNum) (LmSCRATCH(LinkNum) + 0x001A) +#define LmSEQ_LINK_RST_PROTOCOL(LinkNum) (LmSCRATCH(LinkNum) + 0x001B) +#define LmSEQ_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x001C) +#define LmSEQ_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x001D) +#define LmSEQ_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x001E) + +/* Mode dependent scratch page 0 macros for mode 1 (non-common) */ +/* Absolute offsets */ +#define LmSEQ_Q_XMIT_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x008E) +#define LmSEQ_M1_EMPTY_TRANS_CTX(LinkNum) (LmSCRATCH(LinkNum) + 0x0090) +#define LmSEQ_INI_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0092) +#define LmSEQ_FAILED_OPEN_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009A) +#define LmSEQ_XMIT_REQUEST_TYPE(LinkNum) (LmSCRATCH(LinkNum) + 0x009B) +#define LmSEQ_M1_RESP_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x009C) +#define LmSEQ_M1_LAST_LOADED_SGE(LinkNum) (LmSCRATCH(LinkNum) + 0x009D) +#define LmSEQ_M1_SAVE_SCBPTR(LinkNum) (LmSCRATCH(LinkNum) + 0x009E) + +/* Mode dependent scratch page 0 macros for mode 2 (non-common) */ +#define LmSEQ_PORT_COUNTER(LinkNum) (LmSCRATCH(LinkNum) + 0x010E) +#define LmSEQ_PM_TABLE_PTR(LinkNum) (LmSCRATCH(LinkNum) + 0x0110) +#define LmSEQ_SATA_INTERLOCK_TMR_SAVE(LinkNum) (LmSCRATCH(LinkNum) + 0x0112) +#define LmSEQ_IP_BITL(LinkNum) (LmSCRATCH(LinkNum) + 0x0114) +#define LmSEQ_COPY_SMP_CONN_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x0116) +#define LmSEQ_P0M2_OFFS1AH(LinkNum) (LmSCRATCH(LinkNum) + 0x011A) + +/* Mode dependent scratch page 0 macros for modes 4/5 (non-common) */ +/* Absolute offsets */ +#define LmSEQ_SAVED_OOB_STATUS(LinkNum) (LmSCRATCH(LinkNum) + 0x006E) +#define LmSEQ_SAVED_OOB_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x006F) +#define LmSEQ_Q_LINK_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0070) +#define LmSEQ_LINK_RST_ERR(LinkNum) (LmSCRATCH(LinkNum) + 0x0072) +#define LmSEQ_SAVED_OOB_SIGNALS(LinkNum) (LmSCRATCH(LinkNum) + 0x0073) +#define LmSEQ_SAS_RESET_MODE(LinkNum) (LmSCRATCH(LinkNum) + 0x0074) +#define LmSEQ_LINK_RESET_RETRY_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0075) +#define LmSEQ_NUM_LINK_RESET_RETRIES(LinkNum) (LmSCRATCH(LinkNum) + 0x0076) +#define LmSEQ_OOB_INT_ENABLES(LinkNum) (LmSCRATCH(LinkNum) + 0x007A) +#define LmSEQ_NOTIFY_TIMER_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x007C) +#define LmSEQ_NOTIFY_TIMER_DOWN_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x007E) + +/* Mode dependent scratch page 1, mode 0 and mode 1 */ +#define LmSEQ_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x0020) +#define LmSEQ_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x0030) +#define LmSEQ_M1_SG_LIST_PTR_ADDR0(LinkNum) (LmSCRATCH(LinkNum) + 0x00A0) +#define LmSEQ_M1_SG_LIST_PTR_ADDR1(LinkNum) (LmSCRATCH(LinkNum) + 0x00B0) + +/* Mode dependent scratch page 1 macros for mode 2 */ +/* Absolute offsets */ +#define LmSEQ_INVALID_DWORD_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0120) +#define LmSEQ_DISPARITY_ERROR_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0124) +#define LmSEQ_LOSS_OF_SYNC_COUNT(LinkNum) (LmSCRATCH(LinkNum) + 0x0128) + +/* Mode dependent scratch page 1 macros for mode 4/5 */ +#define LmSEQ_FRAME_TYPE_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E0) +#define LmSEQ_HASHED_DEST_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E1) +#define LmSEQ_HASHED_SRC_ADDR_MASK_PRINT(LinkNum) (LmSCRATCH(LinkNum) + 0x00E4) +#define LmSEQ_HASHED_SRC_ADDR_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00E5) +#define LmSEQ_NUM_FILL_BYTES_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00EB) +#define LmSEQ_TAG_MASK(LinkNum) (LmSCRATCH(LinkNum) + 0x00F0) +#define LmSEQ_TARGET_PORT_XFER_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x00F2) +#define LmSEQ_DATA_OFFSET(LinkNum) (LmSCRATCH(LinkNum) + 0x00F4) + +/* Mode dependent scratch page 2 macros for mode 0 */ +/* Absolute offsets */ +#define LmSEQ_SMP_RCV_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0040) +#define LmSEQ_DEVICE_BITS(LinkNum) (LmSCRATCH(LinkNum) + 0x005B) +#define LmSEQ_SDB_DDB(LinkNum) (LmSCRATCH(LinkNum) + 0x005C) +#define LmSEQ_SDB_NUM_TAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x005E) +#define LmSEQ_SDB_CURR_TAG(LinkNum) (LmSCRATCH(LinkNum) + 0x005F) + +/* Mode dependent scratch page 2 macros for mode 1 */ +/* Absolute offsets */ +/* byte 0 bits 1-0 are domain select. */ +#define LmSEQ_TX_ID_ADDR_FRAME(LinkNum) (LmSCRATCH(LinkNum) + 0x00C0) +#define LmSEQ_OPEN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00C8) +#define LmSEQ_SRST_AS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x00CC) +#define LmSEQ_LAST_LOADED_SG_EL(LinkNum) (LmSCRATCH(LinkNum) + 0x00D4) + +/* Mode dependent scratch page 2 macros for mode 2 */ +/* Absolute offsets */ +#define LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0140) +#define LmSEQ_CLOSE_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0144) +#define LmSEQ_BREAK_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0148) +#define LmSEQ_DWS_RESET_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x014C) +#define LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(LinkNum) \ + (LmSCRATCH(LinkNum) + 0x0150) +#define LmSEQ_MCTL_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0154) + +/* Mode dependent scratch page 2 macros for mode 5 */ +#define LmSEQ_COMINIT_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0160) +#define LmSEQ_RCV_ID_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0164) +#define LmSEQ_RCV_FIS_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x0168) +#define LmSEQ_DEV_PRES_TIMER_TERM_TS(LinkNum) (LmSCRATCH(LinkNum) + 0x016C) + +/* Mode dependent scratch page 3 macros for modes 0 and 1 */ +/* None defined */ + +/* Mode dependent scratch page 3 macros for modes 2 and 5 */ +/* None defined */ + +/* Mode Independent Scratch page 0 macros. */ +#define LmSEQ_Q_TGTXFR_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x0180) +#define LmSEQ_Q_TGTXFR_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x0182) +#define LmSEQ_LINK_NUMBER(LinkNum) (LmSCRATCH(LinkNum) + 0x0186) +#define LmSEQ_SCRATCH_FLAGS(LinkNum) (LmSCRATCH(LinkNum) + 0x0187) +/* + * Currently only bit 0, SAS_DWSAQD, is used. + */ +#define SAS_DWSAQD 0x01 /* + * DWSSTATUS: DWSAQD + * bit las read in ISR. + */ +#define LmSEQ_CONNECTION_STATE(LinkNum) (LmSCRATCH(LinkNum) + 0x0188) +/* Connection states (byte 0) */ +#define SAS_WE_OPENED_CS 0x01 +#define SAS_DEVICE_OPENED_CS 0x02 +#define SAS_WE_SENT_DONE_CS 0x04 +#define SAS_DEVICE_SENT_DONE_CS 0x08 +#define SAS_WE_SENT_CLOSE_CS 0x10 +#define SAS_DEVICE_SENT_CLOSE_CS 0x20 +#define SAS_WE_SENT_BREAK_CS 0x40 +#define SAS_DEVICE_SENT_BREAK_CS 0x80 +/* Connection states (byte 1) */ +#define SAS_OPN_TIMEOUT_OR_OPN_RJCT_CS 0x01 +#define SAS_AIP_RECEIVED_CS 0x02 +#define SAS_CREDIT_TIMEOUT_OCCURRED_CS 0x04 +#define SAS_ACKNAK_TIMEOUT_OCCURRED_CS 0x08 +#define SAS_SMPRSP_TIMEOUT_OCCURRED_CS 0x10 +#define SAS_DONE_TIMEOUT_OCCURRED_CS 0x20 +/* Connection states (byte 2) */ +#define SAS_SMP_RESPONSE_RECEIVED_CS 0x01 +#define SAS_INTLK_TIMEOUT_OCCURRED_CS 0x02 +#define SAS_DEVICE_SENT_DMAT_CS 0x04 +#define SAS_DEVICE_SENT_SYNCSRST_CS 0x08 +#define SAS_CLEARING_AFFILIATION_CS 0x20 +#define SAS_RXTASK_ACTIVE_CS 0x40 +#define SAS_TXTASK_ACTIVE_CS 0x80 +/* Connection states (byte 3) */ +#define SAS_PHY_LOSS_OF_SIGNAL_CS 0x01 +#define SAS_DWS_TIMER_EXPIRED_CS 0x02 +#define SAS_LINK_RESET_NOT_COMPLETE_CS 0x04 +#define SAS_PHY_DISABLED_CS 0x08 +#define SAS_LINK_CTL_TASK_ACTIVE_CS 0x10 +#define SAS_PHY_EVENT_TASK_ACTIVE_CS 0x20 +#define SAS_DEVICE_SENT_ID_FRAME_CS 0x40 +#define SAS_DEVICE_SENT_REG_FIS_CS 0x40 +#define SAS_DEVICE_SENT_HARD_RESET_CS 0x80 +#define SAS_PHY_IS_DOWN_FLAGS (SAS_PHY_LOSS_OF_SIGNAL_CS|\ + SAS_DWS_TIMER_EXPIRED_CS |\ + SAS_LINK_RESET_NOT_COMPLETE_CS|\ + SAS_PHY_DISABLED_CS) + +#define SAS_LINK_CTL_PHY_EVENT_FLAGS (SAS_LINK_CTL_TASK_ACTIVE_CS |\ + SAS_PHY_EVENT_TASK_ACTIVE_CS |\ + SAS_DEVICE_SENT_ID_FRAME_CS |\ + SAS_DEVICE_SENT_HARD_RESET_CS) + +#define LmSEQ_CONCTL(LinkNum) (LmSCRATCH(LinkNum) + 0x018C) +#define LmSEQ_CONSTAT(LinkNum) (LmSCRATCH(LinkNum) + 0x018E) +#define LmSEQ_CONNECTION_MODES(LinkNum) (LmSCRATCH(LinkNum) + 0x018F) +#define LmSEQ_REG1_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0192) +#define LmSEQ_REG2_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0194) +#define LmSEQ_REG3_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0196) +#define LmSEQ_REG0_ISR(LinkNum) (LmSCRATCH(LinkNum) + 0x0198) + +/* Mode independent scratch page 1 macros. */ +#define LmSEQ_EST_NEXUS_SCBPTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A0) +#define LmSEQ_EST_NEXUS_SCBPTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A2) +#define LmSEQ_EST_NEXUS_SCBPTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01A4) +#define LmSEQ_EST_NEXUS_SCBPTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01A6) +#define LmSEQ_EST_NEXUS_SCB_OPCODE0(LinkNum) (LmSCRATCH(LinkNum) + 0x01A8) +#define LmSEQ_EST_NEXUS_SCB_OPCODE1(LinkNum) (LmSCRATCH(LinkNum) + 0x01A9) +#define LmSEQ_EST_NEXUS_SCB_OPCODE2(LinkNum) (LmSCRATCH(LinkNum) + 0x01AA) +#define LmSEQ_EST_NEXUS_SCB_OPCODE3(LinkNum) (LmSCRATCH(LinkNum) + 0x01AB) +#define LmSEQ_EST_NEXUS_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01AC) +#define LmSEQ_EST_NEXUS_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AD) +#define LmSEQ_EST_NEXUS_BUF_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01AE) +#define LmSEQ_TIMEOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01B8) +#define LmSEQ_ISR_SAVE_SINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BC) +#define LmSEQ_ISR_SAVE_DINDEX(LinkNum) (LmSCRATCH(LinkNum) + 0x01BE) + +/* Mode independent scratch page 2 macros. */ +#define LmSEQ_EMPTY_SCB_PTR0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C0) +#define LmSEQ_EMPTY_SCB_PTR1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C2) +#define LmSEQ_EMPTY_SCB_PTR2(LinkNum) (LmSCRATCH(LinkNum) + 0x01C4) +#define LmSEQ_EMPTY_SCB_PTR3(LinkNum) (LmSCRATCH(LinkNum) + 0x01C6) +#define LmSEQ_EMPTY_SCB_OPCD0(LinkNum) (LmSCRATCH(LinkNum) + 0x01C8) +#define LmSEQ_EMPTY_SCB_OPCD1(LinkNum) (LmSCRATCH(LinkNum) + 0x01C9) +#define LmSEQ_EMPTY_SCB_OPCD2(LinkNum) (LmSCRATCH(LinkNum) + 0x01CA) +#define LmSEQ_EMPTY_SCB_OPCD3(LinkNum) (LmSCRATCH(LinkNum) + 0x01CB) +#define LmSEQ_EMPTY_SCB_HEAD(LinkNum) (LmSCRATCH(LinkNum) + 0x01CC) +#define LmSEQ_EMPTY_SCB_TAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CD) +#define LmSEQ_EMPTY_BUFS_AVAIL(LinkNum) (LmSCRATCH(LinkNum) + 0x01CE) +#define LmSEQ_ATA_SCR_REGS(LinkNum) (LmSCRATCH(LinkNum) + 0x01D4) + +/* Mode independent scratch page 3 macros. */ +#define LmSEQ_DEV_PRES_TMR_TOUT_CONST(LinkNum) (LmSCRATCH(LinkNum) + 0x01E0) +#define LmSEQ_SATA_INTERLOCK_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E4) +#define LmSEQ_STP_SHUTDOWN_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01E8) +#define LmSEQ_SRST_ASSERT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01EC) +#define LmSEQ_RCV_FIS_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F0) +#define LmSEQ_ONE_MILLISEC_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F4) +#define LmSEQ_TEN_MS_COMINIT_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01F8) +#define LmSEQ_SMP_RCV_TIMEOUT(LinkNum) (LmSCRATCH(LinkNum) + 0x01FC) + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_sas.h b/drivers/scsi/aic94xx/aic94xx_sas.h new file mode 100644 index 0000000..64d2317 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_sas.h @@ -0,0 +1,785 @@ +/* + * Aic94xx SAS/SATA driver SAS definitions and hardware interface header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef _AIC94XX_SAS_H_ +#define _AIC94XX_SAS_H_ + +#include <scsi/libsas.h> + +/* ---------- DDBs ---------- */ +/* DDBs are device descriptor blocks which describe a device in the + * domain that this sequencer can maintain low-level connections for + * us. They are be 64 bytes. + */ + +struct asd_ddb_ssp_smp_target_port { + u8 conn_type; /* byte 0 */ +#define DDB_TP_CONN_TYPE 0x81 /* Initiator port and addr frame type 0x01 */ + + u8 conn_rate; + __be16 init_conn_tag; + u8 dest_sas_addr[8]; /* bytes 4-11 */ + + __le16 send_queue_head; + u8 sq_suspended; + u8 ddb_type; /* DDB_TYPE_TARGET */ +#define DDB_TYPE_UNUSED 0xFF +#define DDB_TYPE_TARGET 0xFE +#define DDB_TYPE_INITIATOR 0xFD +#define DDB_TYPE_PM_PORT 0xFC + + __le16 _r_a; + __be16 awt_def; + + u8 compat_features; /* byte 20 */ + u8 pathway_blocked_count; + __be16 arb_wait_time; + __be32 more_compat_features; /* byte 24 */ + + u8 conn_mask; + u8 flags; /* concurrent conn:2,2 and open:0(1) */ +#define CONCURRENT_CONN_SUPP 0x04 +#define OPEN_REQUIRED 0x01 + + u16 _r_b; + __le16 exec_queue_tail; + __le16 send_queue_tail; + __le16 sister_ddb; + + __le16 _r_c; + + u8 max_concurrent_conn; + u8 num_concurrent_conn; + u8 num_contexts; + + u8 _r_d; + + __le16 active_task_count; + + u8 _r_e[9]; + + u8 itnl_reason; /* I_T nexus loss reason */ + + __le16 _r_f; + + __le16 itnl_timeout; +#define ITNL_TIMEOUT_CONST 0x7D0 /* 2 seconds */ + + __le32 itnl_timestamp; +} __attribute__ ((packed)); + +struct asd_ddb_stp_sata_target_port { + u8 conn_type; /* byte 0 */ + u8 conn_rate; + __be16 init_conn_tag; + u8 dest_sas_addr[8]; /* bytes 4-11 */ + + __le16 send_queue_head; + u8 sq_suspended; + u8 ddb_type; /* DDB_TYPE_TARGET */ + + __le16 _r_a; + + __be16 awt_def; + u8 compat_features; /* byte 20 */ + u8 pathway_blocked_count; + __be16 arb_wait_time; + __be32 more_compat_features; /* byte 24 */ + + u8 conn_mask; + u8 flags; /* concurrent conn:2,2 and open:0(1) */ +#define SATA_MULTIPORT 0x80 +#define SUPPORTS_AFFIL 0x40 +#define STP_AFFIL_POL 0x20 + + u8 _r_b; + u8 flags2; /* STP close policy:0 */ +#define STP_CL_POL_NO_TX 0x00 +#define STP_CL_POL_BTW_CMDS 0x01 + + __le16 exec_queue_tail; + __le16 send_queue_tail; + __le16 sister_ddb; + __le16 ata_cmd_scbptr; + __le32 sata_tag_alloc_mask; + __le16 active_task_count; + __le16 _r_c; + __le32 sata_sactive; + u8 num_sata_tags; + u8 sata_status; + u8 sata_ending_status; + u8 itnl_reason; /* I_T nexus loss reason */ + __le16 ncq_data_scb_ptr; + __le16 itnl_timeout; + __le32 itnl_timestamp; +} __attribute__ ((packed)); + +/* This struct asd_ddb_init_port, describes the device descriptor block + * of an initiator port (when the sequencer is operating in target mode). + * Bytes [0,11] and [20,27] are from the OPEN address frame. + * The sequencer allocates an initiator port DDB entry. + */ +struct asd_ddb_init_port { + u8 conn_type; /* byte 0 */ + u8 conn_rate; + __be16 init_conn_tag; /* BE */ + u8 dest_sas_addr[8]; + __le16 send_queue_head; /* LE, byte 12 */ + u8 sq_suspended; + u8 ddb_type; /* DDB_TYPE_INITIATOR */ + __le16 _r_a; + __be16 awt_def; /* BE */ + u8 compat_features; + u8 pathway_blocked_count; + __be16 arb_wait_time; /* BE */ + __be32 more_compat_features; /* BE */ + u8 conn_mask; + u8 flags; /* == 5 */ + u16 _r_b; + __le16 exec_queue_tail; /* execution queue tail */ + __le16 send_queue_tail; + __le16 sister_ddb; + __le16 init_resp_timeout; /* initiator response timeout */ + __le32 _r_c; + __le16 active_tasks; /* active task count */ + __le16 init_list; /* initiator list link pointer */ + __le32 _r_d; + u8 max_conn_to[3]; /* from Conn-Disc mode page, in us, LE */ + u8 itnl_reason; /* I_T nexus loss reason */ + __le16 bus_inact_to; /* from Conn-Disc mode page, in 100 us, LE */ + __le16 itnl_to; /* from the Protocol Specific Port Ctrl MP */ + __le32 itnl_timestamp; +} __attribute__ ((packed)); + +/* This struct asd_ddb_sata_tag, describes a look-up table to be used + * by the sequencers. SATA II, IDENTIFY DEVICE data, word 76, bit 8: + * NCQ support. This table is used by the sequencers to find the + * corresponding SCB, given a SATA II tag value. + */ +struct asd_ddb_sata_tag { + __le16 scb_pointer[32]; +} __attribute__ ((packed)); + +/* This struct asd_ddb_sata_pm_table, describes a port number to + * connection handle look-up table. SATA targets attached to a port + * multiplier require a 4-bit port number value. There is one DDB + * entry of this type for each SATA port multiplier (sister DDB). + * Given a SATA PM port number, this table gives us the SATA PM Port + * DDB of the SATA port multiplier port (i.e. the SATA target + * discovered on the port). + */ +struct asd_ddb_sata_pm_table { + __le16 ddb_pointer[16]; + __le16 _r_a[16]; +} __attribute__ ((packed)); + +/* This struct asd_ddb_sata_pm_port, describes the SATA port multiplier + * port format DDB. + */ +struct asd_ddb_sata_pm_port { + u8 _r_a[15]; + u8 ddb_type; + u8 _r_b[13]; + u8 pm_port_flags; +#define PM_PORT_MASK 0xF0 +#define PM_PORT_SET 0x02 + u8 _r_c[6]; + __le16 sister_ddb; + __le16 ata_cmd_scbptr; + __le32 sata_tag_alloc_mask; + __le16 active_task_count; + __le16 parent_ddb; + __le32 sata_sactive; + u8 num_sata_tags; + u8 sata_status; + u8 sata_ending_status; + u8 _r_d[9]; +} __attribute__ ((packed)); + +/* This struct asd_ddb_seq_shared, describes a DDB shared by the + * central and link sequencers. port_map_by_links is indexed phy + * number [0,7]; each byte is a bit mask of all the phys that are in + * the same port as the indexed phy. + */ +struct asd_ddb_seq_shared { + __le16 q_free_ddb_head; + __le16 q_free_ddb_tail; + __le16 q_free_ddb_cnt; + __le16 q_used_ddb_head; + __le16 q_used_ddb_tail; + __le16 shared_mem_lock; + __le16 smp_conn_tag; + __le16 est_nexus_buf_cnt; + __le16 est_nexus_buf_thresh; + u32 _r_a; + u8 settable_max_contexts; + u8 _r_b[23]; + u8 conn_not_active; + u8 phy_is_up; + u8 _r_c[8]; + u8 port_map_by_links[8]; +} __attribute__ ((packed)); + +/* ---------- SG Element ---------- */ + +/* This struct sg_el, describes the hardware scatter gather buffer + * element. All entries are little endian. In an SCB, there are 2 of + * this, plus one more, called a link element of this indicating a + * sublist if needed. + * + * A link element has only the bus address set and the flags (DS) bit + * valid. The bus address points to the start of the sublist. + * + * If a sublist is needed, then that sublist should also include the 2 + * sg_el embedded in the SCB, in which case next_sg_offset is 32, + * since sizeof(sg_el) = 16; EOS should be 1 and EOL 0 in this case. + */ +struct sg_el { + __le64 bus_addr; + __le32 size; + __le16 _r; + u8 next_sg_offs; + u8 flags; +#define ASD_SG_EL_DS_MASK 0x30 +#define ASD_SG_EL_DS_OCM 0x10 +#define ASD_SG_EL_DS_HM 0x00 +#define ASD_SG_EL_LIST_MASK 0xC0 +#define ASD_SG_EL_LIST_EOL 0x40 +#define ASD_SG_EL_LIST_EOS 0x80 +} __attribute__ ((packed)); + +/* ---------- SCBs ---------- */ + +/* An SCB (sequencer control block) is comprised of a common header + * and a task part, for a total of 128 bytes. All fields are in LE + * order, unless otherwise noted. + */ + +/* This struct scb_header, defines the SCB header format. + */ +struct scb_header { + __le64 next_scb; + __le16 index; /* transaction context */ + u8 opcode; +} __attribute__ ((packed)); + +/* SCB opcodes: Execution queue + */ +#define INITIATE_SSP_TASK 0x00 +#define INITIATE_LONG_SSP_TASK 0x01 +#define INITIATE_BIDIR_SSP_TASK 0x02 +#define ABORT_TASK 0x03 +#define INITIATE_SSP_TMF 0x04 +#define SSP_TARG_GET_DATA 0x05 +#define SSP_TARG_GET_DATA_GOOD 0x06 +#define SSP_TARG_SEND_RESP 0x07 +#define QUERY_SSP_TASK 0x08 +#define INITIATE_ATA_TASK 0x09 +#define INITIATE_ATAPI_TASK 0x0a +#define CONTROL_ATA_DEV 0x0b +#define INITIATE_SMP_TASK 0x0c +#define SMP_TARG_SEND_RESP 0x0f + +/* SCB opcodes: Send Queue + */ +#define SSP_TARG_SEND_DATA 0x40 +#define SSP_TARG_SEND_DATA_GOOD 0x41 + +/* SCB opcodes: Link Queue + */ +#define CONTROL_PHY 0x80 +#define SEND_PRIMITIVE 0x81 +#define INITIATE_LINK_ADM_TASK 0x82 + +/* SCB opcodes: other + */ +#define EMPTY_SCB 0xc0 +#define INITIATE_SEQ_ADM_TASK 0xc1 +#define EST_ICL_TARG_WINDOW 0xc2 +#define COPY_MEM 0xc3 +#define CLEAR_NEXUS 0xc4 +#define INITIATE_DDB_ADM_TASK 0xc6 +#define ESTABLISH_NEXUS_ESCB 0xd0 + +#define LUN_SIZE 8 + +/* See SAS spec, task IU + */ +struct ssp_task_iu { + u8 lun[LUN_SIZE]; /* BE */ + u16 _r_a; + u8 tmf; + u8 _r_b; + __be16 tag; /* BE */ + u8 _r_c[14]; +} __attribute__ ((packed)); + +/* See SAS spec, command IU + */ +struct ssp_command_iu { + u8 lun[LUN_SIZE]; + u8 _r_a; + u8 efb_prio_attr; /* enable first burst, task prio & attr */ +#define EFB_MASK 0x80 +#define TASK_PRIO_MASK 0x78 +#define TASK_ATTR_MASK 0x07 + + u8 _r_b; + u8 add_cdb_len; /* in dwords, since bit 0,1 are reserved */ + union { + u8 cdb[16]; + struct { + __le64 long_cdb_addr; /* bus address, LE */ + __le32 long_cdb_size; /* LE */ + u8 _r_c[3]; + u8 eol_ds; /* eol:6,6, ds:5,4 */ + } long_cdb; /* sequencer extension */ + }; +} __attribute__ ((packed)); + +struct xfer_rdy_iu { + __be32 requested_offset; /* BE */ + __be32 write_data_len; /* BE */ + __be32 _r_a; +} __attribute__ ((packed)); + +/* ---------- SCB tasks ---------- */ + +/* This is both ssp_task and long_ssp_task + */ +struct initiate_ssp_task { + u8 proto_conn_rate; /* proto:6,4, conn_rate:3,0 */ + __le32 total_xfer_len; + struct ssp_frame_hdr ssp_frame; + struct ssp_command_iu ssp_cmd; + __le16 sister_scb; /* 0xFFFF */ + __le16 conn_handle; /* index to DDB for the intended target */ + u8 data_dir; /* :1,0 */ +#define DATA_DIR_NONE 0x00 +#define DATA_DIR_IN 0x01 +#define DATA_DIR_OUT 0x02 +#define DATA_DIR_BYRECIPIENT 0x03 + + u8 _r_a; + u8 retry_count; + u8 _r_b[5]; + struct sg_el sg_element[3]; /* 2 real and 1 link */ +} __attribute__ ((packed)); + +/* This defines both ata_task and atapi_task. + * ata: C bit of FIS should be 1, + * atapi: C bit of FIS should be 1, and command register should be 0xA0, + * to indicate a packet command. + */ +struct initiate_ata_task { + u8 proto_conn_rate; + __le32 total_xfer_len; + struct host_to_dev_fis fis; + __le32 data_offs; + u8 atapi_packet[16]; + u8 _r_a[12]; + __le16 sister_scb; + __le16 conn_handle; + u8 ata_flags; /* CSMI:6,6, DTM:4,4, QT:3,3, data dir:1,0 */ +#define CSMI_TASK 0x40 +#define DATA_XFER_MODE_DMA 0x10 +#define ATA_Q_TYPE_MASK 0x08 +#define ATA_Q_TYPE_UNTAGGED 0x00 +#define ATA_Q_TYPE_NCQ 0x08 + + u8 _r_b; + u8 retry_count; + u8 _r_c; + u8 flags; +#define STP_AFFIL_POLICY 0x20 +#define SET_AFFIL_POLICY 0x10 +#define RET_PARTIAL_SGLIST 0x02 + + u8 _r_d[3]; + struct sg_el sg_element[3]; +} __attribute__ ((packed)); + +struct initiate_smp_task { + u8 proto_conn_rate; + u8 _r_a[40]; + struct sg_el smp_req; + __le16 sister_scb; + __le16 conn_handle; + u8 _r_c[8]; + struct sg_el smp_resp; + u8 _r_d[32]; +} __attribute__ ((packed)); + +struct control_phy { + u8 phy_id; + u8 sub_func; +#define DISABLE_PHY 0x00 +#define ENABLE_PHY 0x01 +#define RELEASE_SPINUP_HOLD 0x02 +#define ENABLE_PHY_NO_SAS_OOB 0x03 +#define ENABLE_PHY_NO_SATA_OOB 0x04 +#define PHY_NO_OP 0x05 +#define EXECUTE_HARD_RESET 0x81 + + u8 func_mask; + u8 speed_mask; + u8 hot_plug_delay; + u8 port_type; + u8 flags; +#define DEV_PRES_TIMER_OVERRIDE_ENABLE 0x01 +#define DISABLE_PHY_IF_OOB_FAILS 0x02 + + __le32 timeout_override; + u8 link_reset_retries; + u8 _r_a[47]; + __le16 conn_handle; + u8 _r_b[56]; +} __attribute__ ((packed)); + +struct control_ata_dev { + u8 proto_conn_rate; + __le32 _r_a; + struct host_to_dev_fis fis; + u8 _r_b[32]; + __le16 sister_scb; + __le16 conn_handle; + u8 ata_flags; /* 0 */ + u8 _r_c[55]; +} __attribute__ ((packed)); + +struct empty_scb { + u8 num_valid; + __le32 _r_a; +#define ASD_EDBS_PER_SCB 7 +/* header+data+CRC+DMA suffix data */ +#define ASD_EDB_SIZE (24+1024+4+16) + struct sg_el eb[ASD_EDBS_PER_SCB]; +#define ELEMENT_NOT_VALID 0xC0 +} __attribute__ ((packed)); + +struct initiate_link_adm { + u8 phy_id; + u8 sub_func; +#define GET_LINK_ERROR_COUNT 0x00 +#define RESET_LINK_ERROR_COUNT 0x01 +#define ENABLE_NOTIFY_SPINUP_INTS 0x02 + + u8 _r_a[57]; + __le16 conn_handle; + u8 _r_b[56]; +} __attribute__ ((packed)); + +struct copy_memory { + u8 _r_a; + __le16 xfer_len; + __le16 _r_b; + __le64 src_busaddr; + u8 src_ds; /* See definition of sg_el */ + u8 _r_c[45]; + __le16 conn_handle; + __le64 _r_d; + __le64 dest_busaddr; + u8 dest_ds; /* See definition of sg_el */ + u8 _r_e[39]; +} __attribute__ ((packed)); + +struct abort_task { + u8 proto_conn_rate; + __le32 _r_a; + struct ssp_frame_hdr ssp_frame; + struct ssp_task_iu ssp_task; + __le16 sister_scb; + __le16 conn_handle; + u8 flags; /* ovrd_itnl_timer:3,3, suspend_data_trans:2,2 */ +#define SUSPEND_DATA_TRANS 0x04 + + u8 _r_b; + u8 retry_count; + u8 _r_c[5]; + __le16 index; /* Transaction context of task to be queried */ + __le16 itnl_to; + u8 _r_d[44]; +} __attribute__ ((packed)); + +struct clear_nexus { + u8 nexus; +#define NEXUS_ADAPTER 0x00 +#define NEXUS_PORT 0x01 +#define NEXUS_I_T 0x02 +#define NEXUS_I_T_L 0x03 +#define NEXUS_TAG 0x04 +#define NEXUS_TRANS_CX 0x05 +#define NEXUS_SATA_TAG 0x06 +#define NEXUS_T_L 0x07 +#define NEXUS_L 0x08 +#define NEXUS_T_TAG 0x09 + + __le32 _r_a; + u8 flags; +#define SUSPEND_TX 0x80 +#define RESUME_TX 0x40 +#define SEND_Q 0x04 +#define EXEC_Q 0x02 +#define NOTINQ 0x01 + + u8 _r_b[3]; + u8 conn_mask; + u8 _r_c[19]; + struct ssp_task_iu ssp_task; /* LUN and TAG */ + __le16 _r_d; + __le16 conn_handle; + __le64 _r_e; + __le16 index; /* Transaction context of task to be cleared */ + __le16 context; /* Clear nexus context */ + u8 _r_f[44]; +} __attribute__ ((packed)); + +struct initiate_ssp_tmf { + u8 proto_conn_rate; + __le32 _r_a; + struct ssp_frame_hdr ssp_frame; + struct ssp_task_iu ssp_task; + __le16 sister_scb; + __le16 conn_handle; + u8 flags; /* itnl override and suspend data tx */ +#define OVERRIDE_ITNL_TIMER 8 + + u8 _r_b; + u8 retry_count; + u8 _r_c[5]; + __le16 index; /* Transaction context of task to be queried */ + __le16 itnl_to; + u8 _r_d[44]; +} __attribute__ ((packed)); + +/* Transmits an arbitrary primitive on the link. + * Used for NOTIFY and BROADCAST. + */ +struct send_prim { + u8 phy_id; + u8 wait_transmit; /* :0,0 */ + u8 xmit_flags; +#define XMTPSIZE_MASK 0xF0 +#define XMTPSIZE_SINGLE 0x10 +#define XMTPSIZE_REPEATED 0x20 +#define XMTPSIZE_CONT 0x20 +#define XMTPSIZE_TRIPLE 0x30 +#define XMTPSIZE_REDUNDANT 0x60 +#define XMTPSIZE_INF 0 + +#define XMTCONTEN 0x04 +#define XMTPFRM 0x02 /* Transmit at the next frame boundary */ +#define XMTPIMM 0x01 /* Transmit immediately */ + + __le16 _r_a; + u8 prim[4]; /* K, D0, D1, D2 */ + u8 _r_b[50]; + __le16 conn_handle; + u8 _r_c[56]; +} __attribute__ ((packed)); + +/* This describes both SSP Target Get Data and SSP Target Get Data And + * Send Good Response SCBs. Used when the sequencer is operating in + * target mode... + */ +struct ssp_targ_get_data { + u8 proto_conn_rate; + __le32 total_xfer_len; + struct ssp_frame_hdr ssp_frame; + struct xfer_rdy_iu xfer_rdy; + u8 lun[LUN_SIZE]; + __le64 _r_a; + __le16 sister_scb; + __le16 conn_handle; + u8 data_dir; /* 01b */ + u8 _r_b; + u8 retry_count; + u8 _r_c[5]; + struct sg_el sg_element[3]; +} __attribute__ ((packed)); + +/* ---------- The actual SCB struct ---------- */ + +struct scb { + struct scb_header header; + union { + struct initiate_ssp_task ssp_task; + struct initiate_ata_task ata_task; + struct initiate_smp_task smp_task; + struct control_phy control_phy; + struct control_ata_dev control_ata_dev; + struct empty_scb escb; + struct initiate_link_adm link_adm; + struct copy_memory cp_mem; + struct abort_task abort_task; + struct clear_nexus clear_nexus; + struct initiate_ssp_tmf ssp_tmf; + }; +} __attribute__ ((packed)); + +/* ---------- Done List ---------- */ +/* The done list entry opcode field is defined below. + * The mnemonic encoding and meaning is as follows: + * TC - Task Complete, status was received and acknowledged + * TF - Task Failed, indicates an error prior to receiving acknowledgment + * for the command: + * - no conn, + * - NACK or R_ERR received in response to this command, + * - credit blocked or not available, or in the case of SMP request, + * - no SMP response was received. + * In these four cases it is known that the target didn't receive the + * command. + * TI - Task Interrupted, error after the command was acknowledged. It is + * known that the command was received by the target. + * TU - Task Unacked, command was transmitted but neither ACK (R_OK) nor NAK + * (R_ERR) was received due to loss of signal, broken connection, loss of + * dword sync or other reason. The application client should send the + * appropriate task query. + * TA - Task Aborted, see TF. + * _RESP - The completion includes an empty buffer containing status. + * TO - Timeout. + */ +#define TC_NO_ERROR 0x00 +#define TC_UNDERRUN 0x01 +#define TC_OVERRUN 0x02 +#define TF_OPEN_TO 0x03 +#define TF_OPEN_REJECT 0x04 +#define TI_BREAK 0x05 +#define TI_PROTO_ERR 0x06 +#define TC_SSP_RESP 0x07 +#define TI_PHY_DOWN 0x08 +#define TF_PHY_DOWN 0x09 +#define TC_LINK_ADM_RESP 0x0a +#define TC_CSMI 0x0b +#define TC_ATA_RESP 0x0c +#define TU_PHY_DOWN 0x0d +#define TU_BREAK 0x0e +#define TI_SATA_TO 0x0f +#define TI_NAK 0x10 +#define TC_CONTROL_PHY 0x11 +#define TF_BREAK 0x12 +#define TC_RESUME 0x13 +#define TI_ACK_NAK_TO 0x14 +#define TF_SMPRSP_TO 0x15 +#define TF_SMP_XMIT_RCV_ERR 0x16 +#define TC_PARTIAL_SG_LIST 0x17 +#define TU_ACK_NAK_TO 0x18 +#define TU_SATA_TO 0x19 +#define TF_NAK_RECV 0x1a +#define TA_I_T_NEXUS_LOSS 0x1b +#define TC_ATA_R_ERR_RECV 0x1c +#define TF_TMF_NO_CTX 0x1d +#define TA_ON_REQ 0x1e +#define TF_TMF_NO_TAG 0x1f +#define TF_TMF_TAG_FREE 0x20 +#define TF_TMF_TASK_DONE 0x21 +#define TF_TMF_NO_CONN_HANDLE 0x22 +#define TC_TASK_CLEARED 0x23 +#define TI_SYNCS_RECV 0x24 +#define TU_SYNCS_RECV 0x25 +#define TF_IRTT_TO 0x26 +#define TF_NO_SMP_CONN 0x27 +#define TF_IU_SHORT 0x28 +#define TF_DATA_OFFS_ERR 0x29 +#define TF_INV_CONN_HANDLE 0x2a +#define TF_REQUESTED_N_PENDING 0x2b + +/* 0xc1 - 0xc7: empty buffer received, + 0xd1 - 0xd7: establish nexus empty buffer received +*/ +/* This is the ESCB mask */ +#define ESCB_RECVD 0xC0 + + +/* This struct done_list_struct defines the done list entry. + * All fields are LE. + */ +struct done_list_struct { + __le16 index; /* aka transaction context */ + u8 opcode; + u8 status_block[4]; + u8 toggle; /* bit 0 */ +#define DL_TOGGLE_MASK 0x01 +} __attribute__ ((packed)); + +/* ---------- PHYS ---------- */ + +struct asd_phy { + struct asd_sas_phy sas_phy; + struct asd_phy_desc *phy_desc; /* hw profile */ + + struct sas_identify_frame *identify_frame; + struct asd_dma_tok *id_frm_tok; + + u8 frame_rcvd[ASD_EDB_SIZE]; +}; + + +#define ASD_SCB_SIZE sizeof(struct scb) +#define ASD_DDB_SIZE sizeof(struct asd_ddb_ssp_smp_target_port) + +/* Define this to 0 if you do not want NOTIFY (ENABLE SPINIP) sent. + * Default: 0x10 (it's a mask) + */ +#define ASD_NOTIFY_ENABLE_SPINUP 0x10 + +/* If enabled, set this to the interval between transmission + * of NOTIFY (ENABLE SPINUP). In units of 200 us. + */ +#define ASD_NOTIFY_TIMEOUT 2500 + +/* Initial delay after OOB, before we transmit NOTIFY (ENABLE SPINUP). + * If 0, transmit immediately. In milliseconds. + */ +#define ASD_NOTIFY_DOWN_COUNT 0 + +/* Device present timer timeout constant, 10 ms. */ +#define ASD_DEV_PRESENT_TIMEOUT 0x2710 + +#define ASD_SATA_INTERLOCK_TIMEOUT 0 + +/* How long to wait before shutting down an STP connection, unless + * an STP target sent frame(s). 50 usec. + * IGNORED by the sequencer (i.e. value 0 always). + */ +#define ASD_STP_SHUTDOWN_TIMEOUT 0x0 + +/* ATA soft reset timer timeout. 5 usec. */ +#define ASD_SRST_ASSERT_TIMEOUT 0x05 + +/* 31 sec */ +#define ASD_RCV_FIS_TIMEOUT 0x01D905C0 + +#define ASD_ONE_MILLISEC_TIMEOUT 0x03e8 + +/* COMINIT timer */ +#define ASD_TEN_MILLISEC_TIMEOUT 0x2710 +#define ASD_COMINIT_TIMEOUT ASD_TEN_MILLISEC_TIMEOUT + +/* 1 sec */ +#define ASD_SMP_RCV_TIMEOUT 0x000F4240 + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c new file mode 100644 index 0000000..fc1b743 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -0,0 +1,732 @@ +/* + * Aic94xx SAS/SATA driver SCB management. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <linux/pci.h> + +#include "aic94xx.h" +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" +#include "aic94xx_seq.h" + +#include "aic94xx_dump.h" + +/* ---------- EMPTY SCB ---------- */ + +#define DL_PHY_MASK 7 +#define BYTES_DMAED 0 +#define PRIMITIVE_RECVD 0x08 +#define PHY_EVENT 0x10 +#define LINK_RESET_ERROR 0x18 +#define TIMER_EVENT 0x20 +#define REQ_TASK_ABORT 0xF0 +#define REQ_DEVICE_RESET 0xF1 +#define SIGNAL_NCQ_ERROR 0xF2 +#define CLEAR_NCQ_ERROR 0xF3 + +#define PHY_EVENTS_STATUS (CURRENT_LOSS_OF_SIGNAL | CURRENT_OOB_DONE \ + | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \ + | CURRENT_OOB_ERROR) + +static inline void get_lrate_mode(struct asd_phy *phy, u8 oob_mode) +{ + switch (oob_mode & 7) { + case PHY_SPEED_60: + /* FIXME: sas transport class doesn't have this */ + phy->sas_phy.linkrate = PHY_LINKRATE_6; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; + break; + case PHY_SPEED_30: + phy->sas_phy.linkrate = PHY_LINKRATE_3; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; + break; + case PHY_SPEED_15: + phy->sas_phy.linkrate = PHY_LINKRATE_1_5; + phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; + break; + } + if (oob_mode & SAS_MODE) + phy->sas_phy.oob_mode = SAS_OOB_MODE; + else if (oob_mode & SATA_MODE) + phy->sas_phy.oob_mode = SATA_OOB_MODE; +} + +static inline void asd_phy_event_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; + int phy_id = dl->status_block[0] & DL_PHY_MASK; + struct asd_phy *phy = &asd_ha->phys[phy_id]; + + u8 oob_status = dl->status_block[1] & PHY_EVENTS_STATUS; + u8 oob_mode = dl->status_block[2]; + + switch (oob_status) { + case CURRENT_LOSS_OF_SIGNAL: + /* directly attached device was removed */ + ASD_DPRINTK("phy%d: device unplugged\n", phy_id); + asd_turn_led(asd_ha, phy_id, 0); + sas_phy_disconnected(&phy->sas_phy); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); + break; + case CURRENT_OOB_DONE: + /* hot plugged device */ + asd_turn_led(asd_ha, phy_id, 1); + get_lrate_mode(phy, oob_mode); + ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n", + phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + break; + case CURRENT_SPINUP_HOLD: + /* hot plug SATA, no COMWAKE sent */ + asd_turn_led(asd_ha, phy_id, 1); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); + break; + case CURRENT_GTO_TIMEOUT: + case CURRENT_OOB_ERROR: + ASD_DPRINTK("phy%d error while OOB: oob status:0x%x\n", phy_id, + dl->status_block[1]); + asd_turn_led(asd_ha, phy_id, 0); + sas_phy_disconnected(&phy->sas_phy); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR); + break; + } +} + +/* If phys are enabled sparsely, this will do the right thing. */ +static inline unsigned ord_phy(struct asd_ha_struct *asd_ha, + struct asd_phy *phy) +{ + u8 enabled_mask = asd_ha->hw_prof.enabled_phys; + int i, k = 0; + + for_each_phy(enabled_mask, enabled_mask, i) { + if (&asd_ha->phys[i] == phy) + return k; + k++; + } + return 0; +} + +/** + * asd_get_attached_sas_addr -- extract/generate attached SAS address + * phy: pointer to asd_phy + * sas_addr: pointer to buffer where the SAS address is to be written + * + * This function extracts the SAS address from an IDENTIFY frame + * received. If OOB is SATA, then a SAS address is generated from the + * HA tables. + * + * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame + * buffer. + */ +static inline void asd_get_attached_sas_addr(struct asd_phy *phy, u8 *sas_addr) +{ + if (phy->sas_phy.frame_rcvd[0] == 0x34 + && phy->sas_phy.oob_mode == SATA_OOB_MODE) { + struct asd_ha_struct *asd_ha = phy->sas_phy.ha->lldd_ha; + /* FIS device-to-host */ + u64 addr = be64_to_cpu(*(__be64 *)phy->phy_desc->sas_addr); + + addr += asd_ha->hw_prof.sata_name_base + ord_phy(asd_ha, phy); + *(__be64 *)sas_addr = cpu_to_be64(addr); + } else { + struct sas_identify_frame *idframe = + (void *) phy->sas_phy.frame_rcvd; + memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE); + } +} + +static inline void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl, + int edb_id, int phy_id) +{ + unsigned long flags; + int edb_el = edb_id + ascb->edb_index; + struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el]; + struct asd_phy *phy = &ascb->ha->phys[phy_id]; + struct sas_ha_struct *sas_ha = phy->sas_phy.ha; + u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2]; + + size = min(size, (u16) sizeof(phy->frame_rcvd)); + + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->sas_phy.frame_rcvd, edb->vaddr, size); + phy->sas_phy.frame_rcvd_size = size; + asd_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + asd_dump_frame_rcvd(phy, dl); + sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED); +} + +static inline void asd_link_reset_err_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl, + int phy_id) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + u8 lr_error = dl->status_block[1]; + u8 retries_left = dl->status_block[2]; + + switch (lr_error) { + case 0: + ASD_DPRINTK("phy%d: Receive ID timer expired\n", phy_id); + break; + case 1: + ASD_DPRINTK("phy%d: Loss of signal\n", phy_id); + break; + case 2: + ASD_DPRINTK("phy%d: Loss of dword sync\n", phy_id); + break; + case 3: + ASD_DPRINTK("phy%d: Receive FIS timeout\n", phy_id); + break; + default: + ASD_DPRINTK("phy%d: unknown link reset error code: 0x%x\n", + phy_id, lr_error); + break; + } + + asd_turn_led(asd_ha, phy_id, 0); + sas_phy_disconnected(sas_phy); + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + + if (retries_left == 0) { + int num = 1; + struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num, + GFP_ATOMIC); + if (!cp) { + asd_printk("%s: out of memory\n", __FUNCTION__); + goto out; + } + ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n", + phy_id); + asd_build_control_phy(cp, phy_id, ENABLE_PHY); + if (asd_post_ascb_list(ascb->ha, cp, 1) != 0) + asd_ascb_free(cp); + } +out: + ; +} + +static inline void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl, + int phy_id) +{ + unsigned long flags; + struct sas_ha_struct *sas_ha = &ascb->ha->sas_ha; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + u8 reg = dl->status_block[1]; + u32 cont = dl->status_block[2] << ((reg & 3)*8); + + reg &= ~3; + switch (reg) { + case LmPRMSTAT0BYTE0: + switch (cont) { + case LmBROADCH: + case LmBROADRVCH0: + case LmBROADRVCH1: + case LmBROADSES: + ASD_DPRINTK("phy%d: BROADCAST change received:%d\n", + phy_id, cont); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = ffs(cont); + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy,PORTE_BROADCAST_RCVD); + break; + + case LmUNKNOWNP: + ASD_DPRINTK("phy%d: unknown BREAK\n", phy_id); + break; + + default: + ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n", + phy_id, reg, cont); + break; + } + break; + case LmPRMSTAT1BYTE0: + switch (cont) { + case LmHARDRST: + ASD_DPRINTK("phy%d: HARD_RESET primitive rcvd\n", + phy_id); + /* The sequencer disables all phys on that port. + * We have to re-enable the phys ourselves. */ + sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET); + break; + + default: + ASD_DPRINTK("phy%d: primitive reg:0x%x, cont:0x%04x\n", + phy_id, reg, cont); + break; + } + break; + default: + ASD_DPRINTK("unknown primitive register:0x%x\n", + dl->status_block[1]); + break; + } +} + +/** + * asd_invalidate_edb -- invalidate an EDB and if necessary post the ESCB + * @ascb: pointer to Empty SCB + * @edb_id: index [0,6] to the empty data buffer which is to be invalidated + * + * After an EDB has been invalidated, if all EDBs in this ESCB have been + * invalidated, the ESCB is posted back to the sequencer. + * Context is tasklet/IRQ. + */ +void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id) +{ + struct asd_seq_data *seq = &ascb->ha->seq; + struct empty_scb *escb = &ascb->scb->escb; + struct sg_el *eb = &escb->eb[edb_id]; + struct asd_dma_tok *edb = seq->edb_arr[ascb->edb_index + edb_id]; + + memset(edb->vaddr, 0, ASD_EDB_SIZE); + eb->flags |= ELEMENT_NOT_VALID; + escb->num_valid--; + + if (escb->num_valid == 0) { + int i; + /* ASD_DPRINTK("reposting escb: vaddr: 0x%p, " + "dma_handle: 0x%08llx, next: 0x%08llx, " + "index:%d, opcode:0x%02x\n", + ascb->dma_scb.vaddr, + (u64)ascb->dma_scb.dma_handle, + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); + */ + escb->num_valid = ASD_EDBS_PER_SCB; + for (i = 0; i < ASD_EDBS_PER_SCB; i++) + escb->eb[i].flags = 0; + if (!list_empty(&ascb->list)) + list_del_init(&ascb->list); + i = asd_post_escb_list(ascb->ha, ascb, 1); + if (i) + asd_printk("couldn't post escb, err:%d\n", i); + } +} + +static void escb_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_ha_struct *sas_ha = &asd_ha->sas_ha; + int edb = (dl->opcode & DL_PHY_MASK) - 1; /* [0xc1,0xc7] -> [0,6] */ + u8 sb_opcode = dl->status_block[0]; + int phy_id = sb_opcode & DL_PHY_MASK; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + + if (edb > 6 || edb < 0) { + ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n", + edb, dl->opcode); + ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n", + sb_opcode, phy_id); + ASD_DPRINTK("escb: vaddr: 0x%p, " + "dma_handle: 0x%llx, next: 0x%llx, " + "index:%d, opcode:0x%02x\n", + ascb->dma_scb.vaddr, + (unsigned long long)ascb->dma_scb.dma_handle, + (unsigned long long) + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); + } + + sb_opcode &= ~DL_PHY_MASK; + + switch (sb_opcode) { + case BYTES_DMAED: + ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id); + asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id); + break; + case PRIMITIVE_RECVD: + ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__, + phy_id); + asd_primitive_rcvd_tasklet(ascb, dl, phy_id); + break; + case PHY_EVENT: + ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id); + asd_phy_event_tasklet(ascb, dl); + break; + case LINK_RESET_ERROR: + ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__, + phy_id); + asd_link_reset_err_tasklet(ascb, dl, phy_id); + break; + case TIMER_EVENT: + ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n", + __FUNCTION__, phy_id); + asd_turn_led(asd_ha, phy_id, 0); + /* the device is gone */ + sas_phy_disconnected(sas_phy); + sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT); + break; + case REQ_TASK_ABORT: + ASD_DPRINTK("%s: phy%d: REQ_TASK_ABORT\n", __FUNCTION__, + phy_id); + break; + case REQ_DEVICE_RESET: + ASD_DPRINTK("%s: phy%d: REQ_DEVICE_RESET\n", __FUNCTION__, + phy_id); + break; + case SIGNAL_NCQ_ERROR: + ASD_DPRINTK("%s: phy%d: SIGNAL_NCQ_ERROR\n", __FUNCTION__, + phy_id); + break; + case CLEAR_NCQ_ERROR: + ASD_DPRINTK("%s: phy%d: CLEAR_NCQ_ERROR\n", __FUNCTION__, + phy_id); + break; + default: + ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__, + phy_id, sb_opcode); + ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n", + edb, dl->opcode); + ASD_DPRINTK("sb_opcode : 0x%x, phy_id: 0x%x\n", + sb_opcode, phy_id); + ASD_DPRINTK("escb: vaddr: 0x%p, " + "dma_handle: 0x%llx, next: 0x%llx, " + "index:%d, opcode:0x%02x\n", + ascb->dma_scb.vaddr, + (unsigned long long)ascb->dma_scb.dma_handle, + (unsigned long long) + le64_to_cpu(ascb->scb->header.next_scb), + le16_to_cpu(ascb->scb->header.index), + ascb->scb->header.opcode); + + break; + } + + asd_invalidate_edb(ascb, edb); +} + +int asd_init_post_escbs(struct asd_ha_struct *asd_ha) +{ + struct asd_seq_data *seq = &asd_ha->seq; + int i; + + for (i = 0; i < seq->num_escbs; i++) + seq->escb_arr[i]->tasklet_complete = escb_tasklet_complete; + + ASD_DPRINTK("posting %d escbs\n", i); + return asd_post_escb_list(asd_ha, seq->escb_arr[0], seq->num_escbs); +} + +/* ---------- CONTROL PHY ---------- */ + +#define CONTROL_PHY_STATUS (CURRENT_DEVICE_PRESENT | CURRENT_OOB_DONE \ + | CURRENT_SPINUP_HOLD | CURRENT_GTO_TIMEOUT \ + | CURRENT_OOB_ERROR) + +/** + * control_phy_tasklet_complete -- tasklet complete for CONTROL PHY ascb + * @ascb: pointer to an ascb + * @dl: pointer to the done list entry + * + * This function completes a CONTROL PHY scb and frees the ascb. + * A note on LEDs: + * - an LED blinks if there is IO though it, + * - if a device is connected to the LED, it is lit, + * - if no device is connected to the LED, is is dimmed (off). + */ +static void control_phy_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct scb *scb = ascb->scb; + struct control_phy *control_phy = &scb->control_phy; + u8 phy_id = control_phy->phy_id; + struct asd_phy *phy = &ascb->ha->phys[phy_id]; + + u8 status = dl->status_block[0]; + u8 oob_status = dl->status_block[1]; + u8 oob_mode = dl->status_block[2]; + /* u8 oob_signals= dl->status_block[3]; */ + + if (status != 0) { + ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n", + __FUNCTION__, phy_id, status); + goto out; + } + + switch (control_phy->sub_func) { + case DISABLE_PHY: + asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id); + asd_turn_led(asd_ha, phy_id, 0); + asd_control_led(asd_ha, phy_id, 0); + ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id); + break; + + case ENABLE_PHY: + asd_control_led(asd_ha, phy_id, 1); + if (oob_status & CURRENT_OOB_DONE) { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + get_lrate_mode(phy, oob_mode); + asd_turn_led(asd_ha, phy_id, 1); + ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n", + __FUNCTION__, phy_id,phy->sas_phy.linkrate, + phy->sas_phy.iproto); + } else if (oob_status & CURRENT_SPINUP_HOLD) { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + asd_turn_led(asd_ha, phy_id, 1); + ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__, + phy_id); + } else if (oob_status & CURRENT_ERR_MASK) { + asd_turn_led(asd_ha, phy_id, 0); + ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n", + __FUNCTION__, phy_id, oob_status); + } else if (oob_status & (CURRENT_HOT_PLUG_CNCT + | CURRENT_DEVICE_PRESENT)) { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + asd_turn_led(asd_ha, phy_id, 1); + ASD_DPRINTK("%s: phy%d: hot plug or device present\n", + __FUNCTION__, phy_id); + } else { + asd_ha->hw_prof.enabled_phys |= (1 << phy_id); + asd_turn_led(asd_ha, phy_id, 0); + ASD_DPRINTK("%s: phy%d: no device present: " + "oob_status:0x%x\n", + __FUNCTION__, phy_id, oob_status); + } + break; + case RELEASE_SPINUP_HOLD: + case PHY_NO_OP: + case EXECUTE_HARD_RESET: + ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__, + phy_id, control_phy->sub_func); + /* XXX finish */ + break; + default: + ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__, + phy_id, control_phy->sub_func); + break; + } +out: + asd_ascb_free(ascb); +} + +static inline void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd) +{ + /* disable all speeds, then enable defaults */ + *speed_mask = SAS_SPEED_60_DIS | SAS_SPEED_30_DIS | SAS_SPEED_15_DIS + | SATA_SPEED_30_DIS | SATA_SPEED_15_DIS; + + switch (pd->max_sas_lrate) { + case PHY_LINKRATE_6: + *speed_mask &= ~SAS_SPEED_60_DIS; + default: + case PHY_LINKRATE_3: + *speed_mask &= ~SAS_SPEED_30_DIS; + case PHY_LINKRATE_1_5: + *speed_mask &= ~SAS_SPEED_15_DIS; + } + + switch (pd->min_sas_lrate) { + case PHY_LINKRATE_6: + *speed_mask |= SAS_SPEED_30_DIS; + case PHY_LINKRATE_3: + *speed_mask |= SAS_SPEED_15_DIS; + default: + case PHY_LINKRATE_1_5: + /* nothing to do */ + ; + } + + switch (pd->max_sata_lrate) { + case PHY_LINKRATE_3: + *speed_mask &= ~SATA_SPEED_30_DIS; + default: + case PHY_LINKRATE_1_5: + *speed_mask &= ~SATA_SPEED_15_DIS; + } + + switch (pd->min_sata_lrate) { + case PHY_LINKRATE_3: + *speed_mask |= SATA_SPEED_15_DIS; + default: + case PHY_LINKRATE_1_5: + /* nothing to do */ + ; + } +} + +/** + * asd_build_control_phy -- build a CONTROL PHY SCB + * @ascb: pointer to an ascb + * @phy_id: phy id to control, integer + * @subfunc: subfunction, what to actually to do the phy + * + * This function builds a CONTROL PHY scb. No allocation of any kind + * is performed. @ascb is allocated with the list function. + * The caller can override the ascb->tasklet_complete to point + * to its own callback function. It must call asd_ascb_free() + * at its tasklet complete function. + * See the default implementation. + */ +void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc) +{ + struct asd_phy *phy = &ascb->ha->phys[phy_id]; + struct scb *scb = ascb->scb; + struct control_phy *control_phy = &scb->control_phy; + + scb->header.opcode = CONTROL_PHY; + control_phy->phy_id = (u8) phy_id; + control_phy->sub_func = subfunc; + + switch (subfunc) { + case EXECUTE_HARD_RESET: /* 0x81 */ + case ENABLE_PHY: /* 0x01 */ + /* decide hot plug delay */ + control_phy->hot_plug_delay = HOTPLUG_DELAY_TIMEOUT; + + /* decide speed mask */ + set_speed_mask(&control_phy->speed_mask, phy->phy_desc); + + /* initiator port settings are in the hi nibble */ + if (phy->sas_phy.role == PHY_ROLE_INITIATOR) + control_phy->port_type = SAS_PROTO_ALL << 4; + else if (phy->sas_phy.role == PHY_ROLE_TARGET) + control_phy->port_type = SAS_PROTO_ALL; + else + control_phy->port_type = + (SAS_PROTO_ALL << 4) | SAS_PROTO_ALL; + + /* link reset retries, this should be nominal */ + control_phy->link_reset_retries = 10; + + case RELEASE_SPINUP_HOLD: /* 0x02 */ + /* decide the func_mask */ + control_phy->func_mask = FUNCTION_MASK_DEFAULT; + if (phy->phy_desc->flags & ASD_SATA_SPINUP_HOLD) + control_phy->func_mask &= ~SPINUP_HOLD_DIS; + else + control_phy->func_mask |= SPINUP_HOLD_DIS; + } + + control_phy->conn_handle = cpu_to_le16(0xFFFF); + + ascb->tasklet_complete = control_phy_tasklet_complete; +} + +/* ---------- INITIATE LINK ADM TASK ---------- */ + +static void link_adm_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + u8 opcode = dl->opcode; + struct initiate_link_adm *link_adm = &ascb->scb->link_adm; + u8 phy_id = link_adm->phy_id; + + if (opcode != TC_NO_ERROR) { + asd_printk("phy%d: link adm task 0x%x completed with error " + "0x%x\n", phy_id, link_adm->sub_func, opcode); + } + ASD_DPRINTK("phy%d: link adm task 0x%x: 0x%x\n", + phy_id, link_adm->sub_func, opcode); + + asd_ascb_free(ascb); +} + +void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id, + u8 subfunc) +{ + struct scb *scb = ascb->scb; + struct initiate_link_adm *link_adm = &scb->link_adm; + + scb->header.opcode = INITIATE_LINK_ADM_TASK; + + link_adm->phy_id = phy_id; + link_adm->sub_func = subfunc; + link_adm->conn_handle = cpu_to_le16(0xFFFF); + + ascb->tasklet_complete = link_adm_tasklet_complete; +} + +/* ---------- SCB timer ---------- */ + +/** + * asd_ascb_timedout -- called when a pending SCB's timer has expired + * @data: unsigned long, a pointer to the ascb in question + * + * This is the default timeout function which does the most necessary. + * Upper layers can implement their own timeout function, say to free + * resources they have with this SCB, and then call this one at the + * end of their timeout function. To do this, one should initialize + * the ascb->timer.{function, data, expires} prior to calling the post + * funcion. The timer is started by the post function. + */ +void asd_ascb_timedout(unsigned long data) +{ + struct asd_ascb *ascb = (void *) data; + struct asd_seq_data *seq = &ascb->ha->seq; + unsigned long flags; + + ASD_DPRINTK("scb:0x%x timed out\n", ascb->scb->header.opcode); + + spin_lock_irqsave(&seq->pend_q_lock, flags); + seq->pending--; + list_del_init(&ascb->list); + spin_unlock_irqrestore(&seq->pend_q_lock, flags); + + asd_ascb_free(ascb); +} + +/* ---------- CONTROL PHY ---------- */ + +/* Given the spec value, return a driver value. */ +static const int phy_func_table[] = { + [PHY_FUNC_NOP] = PHY_NO_OP, + [PHY_FUNC_LINK_RESET] = ENABLE_PHY, + [PHY_FUNC_HARD_RESET] = EXECUTE_HARD_RESET, + [PHY_FUNC_DISABLE] = DISABLE_PHY, + [PHY_FUNC_RELEASE_SPINUP_HOLD] = RELEASE_SPINUP_HOLD, +}; + +int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func) +{ + struct asd_ha_struct *asd_ha = phy->ha->lldd_ha; + struct asd_ascb *ascb; + int res = 1; + + if (func == PHY_FUNC_CLEAR_ERROR_LOG) + return -ENOSYS; + + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); + if (!ascb) + return -ENOMEM; + + asd_build_control_phy(ascb, phy->id, phy_func_table[func]); + res = asd_post_ascb_list(asd_ha, ascb , 1); + if (res) + asd_ascb_free(ascb); + + return res; +} diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c new file mode 100644 index 0000000..eec1e0d --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_sds.c @@ -0,0 +1,1136 @@ +/* + * Aic94xx SAS/SATA driver access to shared data structures and memory + * maps. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <linux/pci.h> +#include <linux/delay.h> + +#include "aic94xx.h" +#include "aic94xx_reg.h" + +/* ---------- OCM stuff ---------- */ + +struct asd_ocm_dir_ent { + u8 type; + u8 offs[3]; + u8 _r1; + u8 size[3]; +} __attribute__ ((packed)); + +struct asd_ocm_dir { + char sig[2]; + u8 _r1[2]; + u8 major; /* 0 */ + u8 minor; /* 0 */ + u8 _r2; + u8 num_de; + struct asd_ocm_dir_ent entry[15]; +} __attribute__ ((packed)); + +#define OCM_DE_OCM_DIR 0x00 +#define OCM_DE_WIN_DRVR 0x01 +#define OCM_DE_BIOS_CHIM 0x02 +#define OCM_DE_RAID_ENGN 0x03 +#define OCM_DE_BIOS_INTL 0x04 +#define OCM_DE_BIOS_CHIM_OSM 0x05 +#define OCM_DE_BIOS_CHIM_DYNAMIC 0x06 +#define OCM_DE_ADDC2C_RES0 0x07 +#define OCM_DE_ADDC2C_RES1 0x08 +#define OCM_DE_ADDC2C_RES2 0x09 +#define OCM_DE_ADDC2C_RES3 0x0A + +#define OCM_INIT_DIR_ENTRIES 5 +/*************************************************************************** +* OCM dircetory default +***************************************************************************/ +static struct asd_ocm_dir OCMDirInit = +{ + .sig = {0x4D, 0x4F}, /* signature */ + .num_de = OCM_INIT_DIR_ENTRIES, /* no. of directory entries */ +}; + +/*************************************************************************** +* OCM dircetory Entries default +***************************************************************************/ +static struct asd_ocm_dir_ent OCMDirEntriesInit[OCM_INIT_DIR_ENTRIES] = +{ + { + .type = (OCM_DE_ADDC2C_RES0), /* Entry type */ + .offs = {128}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_ADDC2C_RES1), /* Entry type */ + .offs = {128, 4}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_ADDC2C_RES2), /* Entry type */ + .offs = {128, 8}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_ADDC2C_RES3), /* Entry type */ + .offs = {128, 12}, /* Offset */ + .size = {0, 4}, /* size */ + }, + { + .type = (OCM_DE_WIN_DRVR), /* Entry type */ + .offs = {128, 16}, /* Offset */ + .size = {128, 235, 1}, /* size */ + }, +}; + +struct asd_bios_chim_struct { + char sig[4]; + u8 major; /* 1 */ + u8 minor; /* 0 */ + u8 bios_major; + u8 bios_minor; + __le32 bios_build; + u8 flags; + u8 pci_slot; + __le16 ue_num; + __le16 ue_size; + u8 _r[14]; + /* The unit element array is right here. + */ +} __attribute__ ((packed)); + +/** + * asd_read_ocm_seg - read an on chip memory (OCM) segment + * @asd_ha: pointer to the host adapter structure + * @buffer: where to write the read data + * @offs: offset into OCM where to read from + * @size: how many bytes to read + * + * Return the number of bytes not read. Return 0 on success. + */ +static int asd_read_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer, + u32 offs, int size) +{ + u8 *p = buffer; + if (unlikely(asd_ha->iospace)) + asd_read_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size); + else { + for ( ; size > 0; size--, offs++, p++) + *p = asd_read_ocm_byte(asd_ha, offs); + } + return size; +} + +static int asd_read_ocm_dir(struct asd_ha_struct *asd_ha, + struct asd_ocm_dir *dir, u32 offs) +{ + int err = asd_read_ocm_seg(asd_ha, dir, offs, sizeof(*dir)); + if (err) { + ASD_DPRINTK("couldn't read ocm segment\n"); + return err; + } + + if (dir->sig[0] != 'M' || dir->sig[1] != 'O') { + ASD_DPRINTK("no valid dir signature(%c%c) at start of OCM\n", + dir->sig[0], dir->sig[1]); + return -ENOENT; + } + if (dir->major != 0) { + asd_printk("unsupported major version of ocm dir:0x%x\n", + dir->major); + return -ENOENT; + } + dir->num_de &= 0xf; + return 0; +} + +/** + * asd_write_ocm_seg - write an on chip memory (OCM) segment + * @asd_ha: pointer to the host adapter structure + * @buffer: where to read the write data + * @offs: offset into OCM to write to + * @size: how many bytes to write + * + * Return the number of bytes not written. Return 0 on success. + */ +static void asd_write_ocm_seg(struct asd_ha_struct *asd_ha, void *buffer, + u32 offs, int size) +{ + u8 *p = buffer; + if (unlikely(asd_ha->iospace)) + asd_write_reg_string(asd_ha, buffer, offs+OCM_BASE_ADDR, size); + else { + for ( ; size > 0; size--, offs++, p++) + asd_write_ocm_byte(asd_ha, offs, *p); + } + return; +} + +#define THREE_TO_NUM(X) ((X)[0] | ((X)[1] << 8) | ((X)[2] << 16)) + +static int asd_find_dir_entry(struct asd_ocm_dir *dir, u8 type, + u32 *offs, u32 *size) +{ + int i; + struct asd_ocm_dir_ent *ent; + + for (i = 0; i < dir->num_de; i++) { + if (dir->entry[i].type == type) + break; + } + if (i >= dir->num_de) + return -ENOENT; + ent = &dir->entry[i]; + *offs = (u32) THREE_TO_NUM(ent->offs); + *size = (u32) THREE_TO_NUM(ent->size); + return 0; +} + +#define OCM_BIOS_CHIM_DE 2 +#define BC_BIOS_PRESENT 1 + +static int asd_get_bios_chim(struct asd_ha_struct *asd_ha, + struct asd_ocm_dir *dir) +{ + int err; + struct asd_bios_chim_struct *bc_struct; + u32 offs, size; + + err = asd_find_dir_entry(dir, OCM_BIOS_CHIM_DE, &offs, &size); + if (err) { + ASD_DPRINTK("couldn't find BIOS_CHIM dir ent\n"); + goto out; + } + err = -ENOMEM; + bc_struct = kmalloc(sizeof(*bc_struct), GFP_KERNEL); + if (!bc_struct) { + asd_printk("no memory for bios_chim struct\n"); + goto out; + } + err = asd_read_ocm_seg(asd_ha, (void *)bc_struct, offs, + sizeof(*bc_struct)); + if (err) { + ASD_DPRINTK("couldn't read ocm segment\n"); + goto out2; + } + if (strncmp(bc_struct->sig, "SOIB", 4) + && strncmp(bc_struct->sig, "IPSA", 4)) { + ASD_DPRINTK("BIOS_CHIM entry has no valid sig(%c%c%c%c)\n", + bc_struct->sig[0], bc_struct->sig[1], + bc_struct->sig[2], bc_struct->sig[3]); + err = -ENOENT; + goto out2; + } + if (bc_struct->major != 1) { + asd_printk("BIOS_CHIM unsupported major version:0x%x\n", + bc_struct->major); + err = -ENOENT; + goto out2; + } + if (bc_struct->flags & BC_BIOS_PRESENT) { + asd_ha->hw_prof.bios.present = 1; + asd_ha->hw_prof.bios.maj = bc_struct->bios_major; + asd_ha->hw_prof.bios.min = bc_struct->bios_minor; + asd_ha->hw_prof.bios.bld = le32_to_cpu(bc_struct->bios_build); + ASD_DPRINTK("BIOS present (%d,%d), %d\n", + asd_ha->hw_prof.bios.maj, + asd_ha->hw_prof.bios.min, + asd_ha->hw_prof.bios.bld); + } + asd_ha->hw_prof.ue.num = le16_to_cpu(bc_struct->ue_num); + asd_ha->hw_prof.ue.size= le16_to_cpu(bc_struct->ue_size); + ASD_DPRINTK("ue num:%d, ue size:%d\n", asd_ha->hw_prof.ue.num, + asd_ha->hw_prof.ue.size); + size = asd_ha->hw_prof.ue.num * asd_ha->hw_prof.ue.size; + if (size > 0) { + err = -ENOMEM; + asd_ha->hw_prof.ue.area = kmalloc(size, GFP_KERNEL); + if (!asd_ha->hw_prof.ue.area) + goto out2; + err = asd_read_ocm_seg(asd_ha, (void *)asd_ha->hw_prof.ue.area, + offs + sizeof(*bc_struct), size); + if (err) { + kfree(asd_ha->hw_prof.ue.area); + asd_ha->hw_prof.ue.area = NULL; + asd_ha->hw_prof.ue.num = 0; + asd_ha->hw_prof.ue.size = 0; + ASD_DPRINTK("couldn't read ue entries(%d)\n", err); + } + } +out2: + kfree(bc_struct); +out: + return err; +} + +static void +asd_hwi_initialize_ocm_dir (struct asd_ha_struct *asd_ha) +{ + int i; + + /* Zero OCM */ + for (i = 0; i < OCM_MAX_SIZE; i += 4) + asd_write_ocm_dword(asd_ha, i, 0); + + /* Write Dir */ + asd_write_ocm_seg(asd_ha, &OCMDirInit, 0, + sizeof(struct asd_ocm_dir)); + + /* Write Dir Entries */ + for (i = 0; i < OCM_INIT_DIR_ENTRIES; i++) + asd_write_ocm_seg(asd_ha, &OCMDirEntriesInit[i], + sizeof(struct asd_ocm_dir) + + (i * sizeof(struct asd_ocm_dir_ent)) + , sizeof(struct asd_ocm_dir_ent)); + +} + +static int +asd_hwi_check_ocm_access (struct asd_ha_struct *asd_ha) +{ + struct pci_dev *pcidev = asd_ha->pcidev; + u32 reg; + int err = 0; + u32 v; + + /* check if OCM has been initialized by BIOS */ + reg = asd_read_reg_dword(asd_ha, EXSICNFGR); + + if (!(reg & OCMINITIALIZED)) { + err = pci_read_config_dword(pcidev, PCIC_INTRPT_STAT, &v); + if (err) { + asd_printk("couldn't access PCIC_INTRPT_STAT of %s\n", + pci_name(pcidev)); + goto out; + } + + printk(KERN_INFO "OCM is not initialized by BIOS," + "reinitialize it and ignore it, current IntrptStatus" + "is 0x%x\n", v); + + if (v) + err = pci_write_config_dword(pcidev, + PCIC_INTRPT_STAT, v); + if (err) { + asd_printk("couldn't write PCIC_INTRPT_STAT of %s\n", + pci_name(pcidev)); + goto out; + } + + asd_hwi_initialize_ocm_dir(asd_ha); + + } +out: + return err; +} + +/** + * asd_read_ocm - read on chip memory (OCM) + * @asd_ha: pointer to the host adapter structure + */ +int asd_read_ocm(struct asd_ha_struct *asd_ha) +{ + int err; + struct asd_ocm_dir *dir; + + if (asd_hwi_check_ocm_access(asd_ha)) + return -1; + + dir = kmalloc(sizeof(*dir), GFP_KERNEL); + if (!dir) { + asd_printk("no memory for ocm dir\n"); + return -ENOMEM; + } + + err = asd_read_ocm_dir(asd_ha, dir, 0); + if (err) + goto out; + + err = asd_get_bios_chim(asd_ha, dir); +out: + kfree(dir); + return err; +} + +/* ---------- FLASH stuff ---------- */ + +#define FLASH_RESET 0xF0 +#define FLASH_MANUF_AMD 1 + +#define FLASH_SIZE 0x200000 +#define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** " +#define FLASH_NEXT_ENTRY_OFFS 0x2000 +#define FLASH_MAX_DIR_ENTRIES 32 + +#define FLASH_DE_TYPE_MASK 0x3FFFFFFF +#define FLASH_DE_MS 0x120 +#define FLASH_DE_CTRL_A_USER 0xE0 + +struct asd_flash_de { + __le32 type; + __le32 offs; + __le32 pad_size; + __le32 image_size; + __le32 chksum; + u8 _r[12]; + u8 version[32]; +} __attribute__ ((packed)); + +struct asd_flash_dir { + u8 cookie[32]; + __le32 rev; /* 2 */ + __le32 chksum; + __le32 chksum_antidote; + __le32 bld; + u8 bld_id[32]; /* build id data */ + u8 ver_data[32]; /* date and time of build */ + __le32 ae_mask; + __le32 v_mask; + __le32 oc_mask; + u8 _r[20]; + struct asd_flash_de dir_entry[FLASH_MAX_DIR_ENTRIES]; +} __attribute__ ((packed)); + +struct asd_manuf_sec { + char sig[2]; /* 'S', 'M' */ + u16 offs_next; + u8 maj; /* 0 */ + u8 min; /* 0 */ + u16 chksum; + u16 size; + u8 _r[6]; + u8 sas_addr[SAS_ADDR_SIZE]; + u8 pcba_sn[ASD_PCBA_SN_SIZE]; + /* Here start the other segments */ + u8 linked_list[0]; +} __attribute__ ((packed)); + +struct asd_manuf_phy_desc { + u8 state; /* low 4 bits */ +#define MS_PHY_STATE_ENABLEABLE 0 +#define MS_PHY_STATE_REPORTED 1 +#define MS_PHY_STATE_HIDDEN 2 + u8 phy_id; + u16 _r; + u8 phy_control_0; /* mode 5 reg 0x160 */ + u8 phy_control_1; /* mode 5 reg 0x161 */ + u8 phy_control_2; /* mode 5 reg 0x162 */ + u8 phy_control_3; /* mode 5 reg 0x163 */ +} __attribute__ ((packed)); + +struct asd_manuf_phy_param { + char sig[2]; /* 'P', 'M' */ + u16 next; + u8 maj; /* 0 */ + u8 min; /* 2 */ + u8 num_phy_desc; /* 8 */ + u8 phy_desc_size; /* 8 */ + u8 _r[3]; + u8 usage_model_id; + u32 _r2; + struct asd_manuf_phy_desc phy_desc[ASD_MAX_PHYS]; +} __attribute__ ((packed)); + +#if 0 +static const char *asd_sb_type[] = { + "unknown", + "SGPIO", + [2 ... 0x7F] = "unknown", + [0x80] = "ADPT_I2C", + [0x81 ... 0xFF] = "VENDOR_UNIQUExx" +}; +#endif + +struct asd_ms_sb_desc { + u8 type; + u8 node_desc_index; + u8 conn_desc_index; + u8 _recvd[0]; +} __attribute__ ((packed)); + +#if 0 +static const char *asd_conn_type[] = { + [0 ... 7] = "unknown", + "SFF8470", + "SFF8482", + "SFF8484", + [0x80] = "PCIX_DAUGHTER0", + [0x81] = "SAS_DAUGHTER0", + [0x82 ... 0xFF] = "VENDOR_UNIQUExx" +}; + +static const char *asd_conn_location[] = { + "unknown", + "internal", + "external", + "board_to_board", +}; +#endif + +struct asd_ms_conn_desc { + u8 type; + u8 location; + u8 num_sideband_desc; + u8 size_sideband_desc; + u32 _resvd; + u8 name[16]; + struct asd_ms_sb_desc sb_desc[0]; +} __attribute__ ((packed)); + +struct asd_nd_phy_desc { + u8 vp_attch_type; + u8 attch_specific[0]; +} __attribute__ ((packed)); + +#if 0 +static const char *asd_node_type[] = { + "IOP", + "IO_CONTROLLER", + "EXPANDER", + "PORT_MULTIPLIER", + "PORT_MULTIPLEXER", + "MULTI_DROP_I2C_BUS", +}; +#endif + +struct asd_ms_node_desc { + u8 type; + u8 num_phy_desc; + u8 size_phy_desc; + u8 _resvd; + u8 name[16]; + struct asd_nd_phy_desc phy_desc[0]; +} __attribute__ ((packed)); + +struct asd_ms_conn_map { + char sig[2]; /* 'M', 'C' */ + __le16 next; + u8 maj; /* 0 */ + u8 min; /* 0 */ + __le16 cm_size; /* size of this struct */ + u8 num_conn; + u8 conn_size; + u8 num_nodes; + u8 usage_model_id; + u32 _resvd; + struct asd_ms_conn_desc conn_desc[0]; + struct asd_ms_node_desc node_desc[0]; +} __attribute__ ((packed)); + +struct asd_ctrla_phy_entry { + u8 sas_addr[SAS_ADDR_SIZE]; + u8 sas_link_rates; /* max in hi bits, min in low bits */ + u8 flags; + u8 sata_link_rates; + u8 _r[5]; +} __attribute__ ((packed)); + +struct asd_ctrla_phy_settings { + u8 id0; /* P'h'y */ + u8 _r; + u16 next; + u8 num_phys; /* number of PHYs in the PCI function */ + u8 _r2[3]; + struct asd_ctrla_phy_entry phy_ent[ASD_MAX_PHYS]; +} __attribute__ ((packed)); + +struct asd_ll_el { + u8 id0; + u8 id1; + __le16 next; + u8 something_here[0]; +} __attribute__ ((packed)); + +static int asd_poll_flash(struct asd_ha_struct *asd_ha) +{ + int c; + u8 d; + + for (c = 5000; c > 0; c--) { + d = asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar); + d ^= asd_read_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar); + if (!d) + return 0; + udelay(5); + } + return -ENOENT; +} + +static int asd_reset_flash(struct asd_ha_struct *asd_ha) +{ + int err; + + err = asd_poll_flash(asd_ha); + if (err) + return err; + asd_write_reg_byte(asd_ha, asd_ha->hw_prof.flash.bar, FLASH_RESET); + err = asd_poll_flash(asd_ha); + + return err; +} + +static inline int asd_read_flash_seg(struct asd_ha_struct *asd_ha, + void *buffer, u32 offs, int size) +{ + asd_read_reg_string(asd_ha, buffer, asd_ha->hw_prof.flash.bar+offs, + size); + return 0; +} + +/** + * asd_find_flash_dir - finds and reads the flash directory + * @asd_ha: pointer to the host adapter structure + * @flash_dir: pointer to flash directory structure + * + * If found, the flash directory segment will be copied to + * @flash_dir. Return 1 if found, 0 if not. + */ +static int asd_find_flash_dir(struct asd_ha_struct *asd_ha, + struct asd_flash_dir *flash_dir) +{ + u32 v; + for (v = 0; v < FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) { + asd_read_flash_seg(asd_ha, flash_dir, v, + sizeof(FLASH_DIR_COOKIE)-1); + if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE, + sizeof(FLASH_DIR_COOKIE)-1) == 0) { + asd_ha->hw_prof.flash.dir_offs = v; + asd_read_flash_seg(asd_ha, flash_dir, v, + sizeof(*flash_dir)); + return 1; + } + } + return 0; +} + +static int asd_flash_getid(struct asd_ha_struct *asd_ha) +{ + int err = 0; + u32 reg, inc; + + reg = asd_read_reg_dword(asd_ha, EXSICNFGR); + + if (!(reg & FLASHEX)) { + ASD_DPRINTK("flash doesn't exist\n"); + return -ENOENT; + } + if (pci_read_config_dword(asd_ha->pcidev, PCI_CONF_FLSH_BAR, + &asd_ha->hw_prof.flash.bar)) { + asd_printk("couldn't read PCI_CONF_FLSH_BAR of %s\n", + pci_name(asd_ha->pcidev)); + return -ENOENT; + } + asd_ha->hw_prof.flash.present = 1; + asd_ha->hw_prof.flash.wide = reg & FLASHW ? 1 : 0; + err = asd_reset_flash(asd_ha); + if (err) { + ASD_DPRINTK("couldn't reset flash(%d)\n", err); + return err; + } + /* Get flash info. This would most likely be AMD Am29LV family flash. + * First try the sequence for word mode. It is the same as for + * 008B (byte mode only), 160B (word mode) and 800D (word mode). + */ + reg = asd_ha->hw_prof.flash.bar; + inc = asd_ha->hw_prof.flash.wide ? 2 : 1; + asd_write_reg_byte(asd_ha, reg + 0x555, 0xAA); + asd_write_reg_byte(asd_ha, reg + 0x2AA, 0x55); + asd_write_reg_byte(asd_ha, reg + 0x555, 0x90); + asd_ha->hw_prof.flash.manuf = asd_read_reg_byte(asd_ha, reg); + asd_ha->hw_prof.flash.dev_id= asd_read_reg_byte(asd_ha,reg+inc); + asd_ha->hw_prof.flash.sec_prot = asd_read_reg_byte(asd_ha,reg+inc+inc); + /* Get out of autoselect mode. */ + err = asd_reset_flash(asd_ha); + + if (asd_ha->hw_prof.flash.manuf == FLASH_MANUF_AMD) { + ASD_DPRINTK("0Found FLASH(%d) manuf:%d, dev_id:0x%x, " + "sec_prot:%d\n", + asd_ha->hw_prof.flash.wide ? 16 : 8, + asd_ha->hw_prof.flash.manuf, + asd_ha->hw_prof.flash.dev_id, + asd_ha->hw_prof.flash.sec_prot); + return 0; + } + + /* Ok, try the sequence for byte mode of 160B and 800D. + * We may actually never need this. + */ + asd_write_reg_byte(asd_ha, reg + 0xAAA, 0xAA); + asd_write_reg_byte(asd_ha, reg + 0x555, 0x55); + asd_write_reg_byte(asd_ha, reg + 0xAAA, 0x90); + asd_ha->hw_prof.flash.manuf = asd_read_reg_byte(asd_ha, reg); + asd_ha->hw_prof.flash.dev_id = asd_read_reg_byte(asd_ha, reg + 2); + asd_ha->hw_prof.flash.sec_prot = asd_read_reg_byte(asd_ha, reg + 4); + err = asd_reset_flash(asd_ha); + + if (asd_ha->hw_prof.flash.manuf == FLASH_MANUF_AMD) { + ASD_DPRINTK("1Found FLASH(%d) manuf:%d, dev_id:0x%x, " + "sec_prot:%d\n", + asd_ha->hw_prof.flash.wide ? 16 : 8, + asd_ha->hw_prof.flash.manuf, + asd_ha->hw_prof.flash.dev_id, + asd_ha->hw_prof.flash.sec_prot); + return 0; + } + + return -ENOENT; +} + +static u16 asd_calc_flash_chksum(u16 *p, int size) +{ + u16 chksum = 0; + + while (size-- > 0) + chksum += *p++; + + return chksum; +} + + +static int asd_find_flash_de(struct asd_flash_dir *flash_dir, u32 entry_type, + u32 *offs, u32 *size) +{ + int i; + struct asd_flash_de *de; + + for (i = 0; i < FLASH_MAX_DIR_ENTRIES; i++) { + u32 type = le32_to_cpu(flash_dir->dir_entry[i].type); + + type &= FLASH_DE_TYPE_MASK; + if (type == entry_type) + break; + } + if (i >= FLASH_MAX_DIR_ENTRIES) + return -ENOENT; + de = &flash_dir->dir_entry[i]; + *offs = le32_to_cpu(de->offs); + *size = le32_to_cpu(de->pad_size); + return 0; +} + +static int asd_validate_ms(struct asd_manuf_sec *ms) +{ + if (ms->sig[0] != 'S' || ms->sig[1] != 'M') { + ASD_DPRINTK("manuf sec: no valid sig(%c%c)\n", + ms->sig[0], ms->sig[1]); + return -ENOENT; + } + if (ms->maj != 0) { + asd_printk("unsupported manuf. sector. major version:%x\n", + ms->maj); + return -ENOENT; + } + ms->offs_next = le16_to_cpu((__force __le16) ms->offs_next); + ms->chksum = le16_to_cpu((__force __le16) ms->chksum); + ms->size = le16_to_cpu((__force __le16) ms->size); + + if (asd_calc_flash_chksum((u16 *)ms, ms->size/2)) { + asd_printk("failed manuf sector checksum\n"); + } + + return 0; +} + +static int asd_ms_get_sas_addr(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *ms) +{ + memcpy(asd_ha->hw_prof.sas_addr, ms->sas_addr, SAS_ADDR_SIZE); + return 0; +} + +static int asd_ms_get_pcba_sn(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *ms) +{ + memcpy(asd_ha->hw_prof.pcba_sn, ms->pcba_sn, ASD_PCBA_SN_SIZE); + asd_ha->hw_prof.pcba_sn[ASD_PCBA_SN_SIZE] = '\0'; + return 0; +} + +/** + * asd_find_ll_by_id - find a linked list entry by its id + * @start: void pointer to the first element in the linked list + * @id0: the first byte of the id (offs 0) + * @id1: the second byte of the id (offs 1) + * + * @start has to be the _base_ element start, since the + * linked list entries's offset is from this pointer. + * Some linked list entries use only the first id, in which case + * you can pass 0xFF for the second. + */ +static void *asd_find_ll_by_id(void * const start, const u8 id0, const u8 id1) +{ + struct asd_ll_el *el = start; + + do { + switch (id1) { + default: + if (el->id1 == id1) + case 0xFF: + if (el->id0 == id0) + return el; + } + el = start + le16_to_cpu(el->next); + } while (el != start); + + return NULL; +} + +/** + * asd_ms_get_phy_params - get phy parameters from the manufacturing sector + * @asd_ha: pointer to the host adapter structure + * @manuf_sec: pointer to the manufacturing sector + * + * The manufacturing sector contans also the linked list of sub-segments, + * since when it was read, its size was taken from the flash directory, + * not from the structure size. + * + * HIDDEN phys do not count in the total count. REPORTED phys cannot + * be enabled but are reported and counted towards the total. + * ENEBLEABLE phys are enabled by default and count towards the total. + * The absolute total phy number is ASD_MAX_PHYS. hw_prof->num_phys + * merely specifies the number of phys the host adapter decided to + * report. E.g., it is possible for phys 0, 1 and 2 to be HIDDEN, + * phys 3, 4 and 5 to be REPORTED and phys 6 and 7 to be ENEBLEABLE. + * In this case ASD_MAX_PHYS is 8, hw_prof->num_phys is 5, and only 2 + * are actually enabled (enabled by default, max number of phys + * enableable in this case). + */ +static int asd_ms_get_phy_params(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *manuf_sec) +{ + int i; + int en_phys = 0; + int rep_phys = 0; + struct asd_manuf_phy_param *phy_param; + struct asd_manuf_phy_param dflt_phy_param; + + phy_param = asd_find_ll_by_id(manuf_sec, 'P', 'M'); + if (!phy_param) { + ASD_DPRINTK("ms: no phy parameters found\n"); + ASD_DPRINTK("ms: Creating default phy parameters\n"); + dflt_phy_param.sig[0] = 'P'; + dflt_phy_param.sig[1] = 'M'; + dflt_phy_param.maj = 0; + dflt_phy_param.min = 2; + dflt_phy_param.num_phy_desc = 8; + dflt_phy_param.phy_desc_size = sizeof(struct asd_manuf_phy_desc); + for (i =0; i < ASD_MAX_PHYS; i++) { + dflt_phy_param.phy_desc[i].state = 0; + dflt_phy_param.phy_desc[i].phy_id = i; + dflt_phy_param.phy_desc[i].phy_control_0 = 0xf6; + dflt_phy_param.phy_desc[i].phy_control_1 = 0x10; + dflt_phy_param.phy_desc[i].phy_control_2 = 0x43; + dflt_phy_param.phy_desc[i].phy_control_3 = 0xeb; + } + + phy_param = &dflt_phy_param; + + } + + if (phy_param->maj != 0) { + asd_printk("unsupported manuf. phy param major version:0x%x\n", + phy_param->maj); + return -ENOENT; + } + + ASD_DPRINTK("ms: num_phy_desc: %d\n", phy_param->num_phy_desc); + asd_ha->hw_prof.enabled_phys = 0; + for (i = 0; i < phy_param->num_phy_desc; i++) { + struct asd_manuf_phy_desc *pd = &phy_param->phy_desc[i]; + switch (pd->state & 0xF) { + case MS_PHY_STATE_HIDDEN: + ASD_DPRINTK("ms: phy%d: HIDDEN\n", i); + continue; + case MS_PHY_STATE_REPORTED: + ASD_DPRINTK("ms: phy%d: REPORTED\n", i); + asd_ha->hw_prof.enabled_phys &= ~(1 << i); + rep_phys++; + continue; + case MS_PHY_STATE_ENABLEABLE: + ASD_DPRINTK("ms: phy%d: ENEBLEABLE\n", i); + asd_ha->hw_prof.enabled_phys |= (1 << i); + en_phys++; + break; + } + asd_ha->hw_prof.phy_desc[i].phy_control_0 = pd->phy_control_0; + asd_ha->hw_prof.phy_desc[i].phy_control_1 = pd->phy_control_1; + asd_ha->hw_prof.phy_desc[i].phy_control_2 = pd->phy_control_2; + asd_ha->hw_prof.phy_desc[i].phy_control_3 = pd->phy_control_3; + } + asd_ha->hw_prof.max_phys = rep_phys + en_phys; + asd_ha->hw_prof.num_phys = en_phys; + ASD_DPRINTK("ms: max_phys:0x%x, num_phys:0x%x\n", + asd_ha->hw_prof.max_phys, asd_ha->hw_prof.num_phys); + ASD_DPRINTK("ms: enabled_phys:0x%x\n", asd_ha->hw_prof.enabled_phys); + return 0; +} + +static int asd_ms_get_connector_map(struct asd_ha_struct *asd_ha, + struct asd_manuf_sec *manuf_sec) +{ + struct asd_ms_conn_map *cm; + + cm = asd_find_ll_by_id(manuf_sec, 'M', 'C'); + if (!cm) { + ASD_DPRINTK("ms: no connector map found\n"); + return 0; + } + + if (cm->maj != 0) { + ASD_DPRINTK("ms: unsupported: connector map major version 0x%x" + "\n", cm->maj); + return -ENOENT; + } + + /* XXX */ + + return 0; +} + + +/** + * asd_process_ms - find and extract information from the manufacturing sector + * @asd_ha: pointer to the host adapter structure + * @flash_dir: pointer to the flash directory + */ +static int asd_process_ms(struct asd_ha_struct *asd_ha, + struct asd_flash_dir *flash_dir) +{ + int err; + struct asd_manuf_sec *manuf_sec; + u32 offs, size; + + err = asd_find_flash_de(flash_dir, FLASH_DE_MS, &offs, &size); + if (err) { + ASD_DPRINTK("Couldn't find the manuf. sector\n"); + goto out; + } + + if (size == 0) + goto out; + + err = -ENOMEM; + manuf_sec = kmalloc(size, GFP_KERNEL); + if (!manuf_sec) { + ASD_DPRINTK("no mem for manuf sector\n"); + goto out; + } + + err = asd_read_flash_seg(asd_ha, (void *)manuf_sec, offs, size); + if (err) { + ASD_DPRINTK("couldn't read manuf sector at 0x%x, size 0x%x\n", + offs, size); + goto out2; + } + + err = asd_validate_ms(manuf_sec); + if (err) { + ASD_DPRINTK("couldn't validate manuf sector\n"); + goto out2; + } + + err = asd_ms_get_sas_addr(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("couldn't read the SAS_ADDR\n"); + goto out2; + } + ASD_DPRINTK("manuf sect SAS_ADDR %llx\n", + SAS_ADDR(asd_ha->hw_prof.sas_addr)); + + err = asd_ms_get_pcba_sn(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("couldn't read the PCBA SN\n"); + goto out2; + } + ASD_DPRINTK("manuf sect PCBA SN %s\n", asd_ha->hw_prof.pcba_sn); + + err = asd_ms_get_phy_params(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("ms: couldn't get phy parameters\n"); + goto out2; + } + + err = asd_ms_get_connector_map(asd_ha, manuf_sec); + if (err) { + ASD_DPRINTK("ms: couldn't get connector map\n"); + goto out2; + } + +out2: + kfree(manuf_sec); +out: + return err; +} + +static int asd_process_ctrla_phy_settings(struct asd_ha_struct *asd_ha, + struct asd_ctrla_phy_settings *ps) +{ + int i; + for (i = 0; i < ps->num_phys; i++) { + struct asd_ctrla_phy_entry *pe = &ps->phy_ent[i]; + + if (!PHY_ENABLED(asd_ha, i)) + continue; + if (*(u64 *)pe->sas_addr == 0) { + asd_ha->hw_prof.enabled_phys &= ~(1 << i); + continue; + } + /* This is the SAS address which should be sent in IDENTIFY. */ + memcpy(asd_ha->hw_prof.phy_desc[i].sas_addr, pe->sas_addr, + SAS_ADDR_SIZE); + asd_ha->hw_prof.phy_desc[i].max_sas_lrate = + (pe->sas_link_rates & 0xF0) >> 4; + asd_ha->hw_prof.phy_desc[i].min_sas_lrate = + (pe->sas_link_rates & 0x0F); + asd_ha->hw_prof.phy_desc[i].max_sata_lrate = + (pe->sata_link_rates & 0xF0) >> 4; + asd_ha->hw_prof.phy_desc[i].min_sata_lrate = + (pe->sata_link_rates & 0x0F); + asd_ha->hw_prof.phy_desc[i].flags = pe->flags; + ASD_DPRINTK("ctrla: phy%d: sas_addr: %llx, sas rate:0x%x-0x%x," + " sata rate:0x%x-0x%x, flags:0x%x\n", + i, + SAS_ADDR(asd_ha->hw_prof.phy_desc[i].sas_addr), + asd_ha->hw_prof.phy_desc[i].max_sas_lrate, + asd_ha->hw_prof.phy_desc[i].min_sas_lrate, + asd_ha->hw_prof.phy_desc[i].max_sata_lrate, + asd_ha->hw_prof.phy_desc[i].min_sata_lrate, + asd_ha->hw_prof.phy_desc[i].flags); + } + + return 0; +} + +/** + * asd_process_ctrl_a_user - process CTRL-A user settings + * @asd_ha: pointer to the host adapter structure + * @flash_dir: pointer to the flash directory + */ +static int asd_process_ctrl_a_user(struct asd_ha_struct *asd_ha, + struct asd_flash_dir *flash_dir) +{ + int err, i; + u32 offs, size; + struct asd_ll_el *el; + struct asd_ctrla_phy_settings *ps; + struct asd_ctrla_phy_settings dflt_ps; + + err = asd_find_flash_de(flash_dir, FLASH_DE_CTRL_A_USER, &offs, &size); + if (err) { + ASD_DPRINTK("couldn't find CTRL-A user settings section\n"); + ASD_DPRINTK("Creating default CTRL-A user settings section\n"); + + dflt_ps.id0 = 'h'; + dflt_ps.num_phys = 8; + for (i =0; i < ASD_MAX_PHYS; i++) { + memcpy(dflt_ps.phy_ent[i].sas_addr, + asd_ha->hw_prof.sas_addr, SAS_ADDR_SIZE); + dflt_ps.phy_ent[i].sas_link_rates = 0x98; + dflt_ps.phy_ent[i].flags = 0x0; + dflt_ps.phy_ent[i].sata_link_rates = 0x0; + } + + size = sizeof(struct asd_ctrla_phy_settings); + ps = &dflt_ps; + } + + if (size == 0) + goto out; + + err = -ENOMEM; + el = kmalloc(size, GFP_KERNEL); + if (!el) { + ASD_DPRINTK("no mem for ctrla user settings section\n"); + goto out; + } + + err = asd_read_flash_seg(asd_ha, (void *)el, offs, size); + if (err) { + ASD_DPRINTK("couldn't read ctrla phy settings section\n"); + goto out2; + } + + err = -ENOENT; + ps = asd_find_ll_by_id(el, 'h', 0xFF); + if (!ps) { + ASD_DPRINTK("couldn't find ctrla phy settings struct\n"); + goto out2; + } + + err = asd_process_ctrla_phy_settings(asd_ha, ps); + if (err) { + ASD_DPRINTK("couldn't process ctrla phy settings\n"); + goto out2; + } +out2: + kfree(el); +out: + return err; +} + +/** + * asd_read_flash - read flash memory + * @asd_ha: pointer to the host adapter structure + */ +int asd_read_flash(struct asd_ha_struct *asd_ha) +{ + int err; + struct asd_flash_dir *flash_dir; + + err = asd_flash_getid(asd_ha); + if (err) + return err; + + flash_dir = kmalloc(sizeof(*flash_dir), GFP_KERNEL); + if (!flash_dir) + return -ENOMEM; + + err = -ENOENT; + if (!asd_find_flash_dir(asd_ha, flash_dir)) { + ASD_DPRINTK("couldn't find flash directory\n"); + goto out; + } + + if (le32_to_cpu(flash_dir->rev) != 2) { + asd_printk("unsupported flash dir version:0x%x\n", + le32_to_cpu(flash_dir->rev)); + goto out; + } + + err = asd_process_ms(asd_ha, flash_dir); + if (err) { + ASD_DPRINTK("couldn't process manuf sector settings\n"); + goto out; + } + + err = asd_process_ctrl_a_user(asd_ha, flash_dir); + if (err) { + ASD_DPRINTK("couldn't process CTRL-A user settings\n"); + goto out; + } + +out: + kfree(flash_dir); + return err; +} diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c new file mode 100644 index 0000000..9050c6f3f --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_seq.c @@ -0,0 +1,1401 @@ +/* + * Aic94xx SAS/SATA driver sequencer interface. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * Parts of this code adapted from David Chaw's adp94xx_seq.c. + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/firmware.h> +#include "aic94xx_reg.h" +#include "aic94xx_hwi.h" + +#include "aic94xx_seq.h" +#include "aic94xx_dump.h" + +/* It takes no more than 0.05 us for an instruction + * to complete. So waiting for 1 us should be more than + * plenty. + */ +#define PAUSE_DELAY 1 +#define PAUSE_TRIES 1000 + +static const struct firmware *sequencer_fw; +static const char *sequencer_version; +static u16 cseq_vecs[CSEQ_NUM_VECS], lseq_vecs[LSEQ_NUM_VECS], mode2_task, + cseq_idle_loop, lseq_idle_loop; +static u8 *cseq_code, *lseq_code; +static u32 cseq_code_size, lseq_code_size; + +static u16 first_scb_site_no = 0xFFFF; +static u16 last_scb_site_no; + +/* ---------- Pause/Unpause CSEQ/LSEQ ---------- */ + +/** + * asd_pause_cseq - pause the central sequencer + * @asd_ha: pointer to host adapter structure + * + * Return 0 on success, negative on failure. + */ +int asd_pause_cseq(struct asd_ha_struct *asd_ha) +{ + int count = PAUSE_TRIES; + u32 arp2ctl; + + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (arp2ctl & PAUSED) + return 0; + + asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl | EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (arp2ctl & PAUSED) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't pause CSEQ\n"); + return -1; +} + +/** + * asd_unpause_cseq - unpause the central sequencer. + * @asd_ha: pointer to host adapter structure. + * + * Return 0 on success, negative on error. + */ +int asd_unpause_cseq(struct asd_ha_struct *asd_ha) +{ + u32 arp2ctl; + int count = PAUSE_TRIES; + + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (!(arp2ctl & PAUSED)) + return 0; + + asd_write_reg_dword(asd_ha, CARP2CTL, arp2ctl & ~EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, CARP2CTL); + if (!(arp2ctl & PAUSED)) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't unpause the CSEQ\n"); + return -1; +} + +/** + * asd_seq_pause_lseq - pause a link sequencer + * @asd_ha: pointer to a host adapter structure + * @lseq: link sequencer of interest + * + * Return 0 on success, negative on error. + */ +static inline int asd_seq_pause_lseq(struct asd_ha_struct *asd_ha, int lseq) +{ + u32 arp2ctl; + int count = PAUSE_TRIES; + + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (arp2ctl & PAUSED) + return 0; + + asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl | EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (arp2ctl & PAUSED) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq); + return -1; +} + +/** + * asd_pause_lseq - pause the link sequencer(s) + * @asd_ha: pointer to host adapter structure + * @lseq_mask: mask of link sequencers of interest + * + * Return 0 on success, negative on failure. + */ +int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) +{ + int lseq; + int err = 0; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_seq_pause_lseq(asd_ha, lseq); + if (err) + return err; + } + + return err; +} + +/** + * asd_seq_unpause_lseq - unpause a link sequencer + * @asd_ha: pointer to host adapter structure + * @lseq: link sequencer of interest + * + * Return 0 on success, negative on error. + */ +static inline int asd_seq_unpause_lseq(struct asd_ha_struct *asd_ha, int lseq) +{ + u32 arp2ctl; + int count = PAUSE_TRIES; + + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (!(arp2ctl & PAUSED)) + return 0; + + asd_write_reg_dword(asd_ha, LmARP2CTL(lseq), arp2ctl & ~EPAUSE); + do { + arp2ctl = asd_read_reg_dword(asd_ha, LmARP2CTL(lseq)); + if (!(arp2ctl & PAUSED)) + return 0; + udelay(PAUSE_DELAY); + } while (--count > 0); + + ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq); + return 0; +} + + +/** + * asd_unpause_lseq - unpause the link sequencer(s) + * @asd_ha: pointer to host adapter structure + * @lseq_mask: mask of link sequencers of interest + * + * Return 0 on success, negative on failure. + */ +int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask) +{ + int lseq; + int err = 0; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_seq_unpause_lseq(asd_ha, lseq); + if (err) + return err; + } + + return err; +} + +/* ---------- Downloading CSEQ/LSEQ microcode ---------- */ + +static int asd_verify_cseq(struct asd_ha_struct *asd_ha, const u8 *_prog, + u32 size) +{ + u32 addr = CSEQ_RAM_REG_BASE_ADR; + const u32 *prog = (u32 *) _prog; + u32 i; + + for (i = 0; i < size; i += 4, prog++, addr += 4) { + u32 val = asd_read_reg_dword(asd_ha, addr); + + if (le32_to_cpu(*prog) != val) { + asd_printk("%s: cseq verify failed at %u " + "read:0x%x, wanted:0x%x\n", + pci_name(asd_ha->pcidev), + i, val, le32_to_cpu(*prog)); + return -1; + } + } + ASD_DPRINTK("verified %d bytes, passed\n", size); + return 0; +} + +/** + * asd_verify_lseq - verify the microcode of a link sequencer + * @asd_ha: pointer to host adapter structure + * @_prog: pointer to the microcode + * @size: size of the microcode in bytes + * @lseq: link sequencer of interest + * + * The link sequencer code is accessed in 4 KB pages, which are selected + * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register. + * The 10 KB LSEQm instruction code is mapped, page at a time, at + * LmSEQRAM address. + */ +static int asd_verify_lseq(struct asd_ha_struct *asd_ha, const u8 *_prog, + u32 size, int lseq) +{ +#define LSEQ_CODEPAGE_SIZE 4096 + int pages = (size + LSEQ_CODEPAGE_SIZE - 1) / LSEQ_CODEPAGE_SIZE; + u32 page; + const u32 *prog = (u32 *) _prog; + + for (page = 0; page < pages; page++) { + u32 i; + + asd_write_reg_dword(asd_ha, LmBISTCTL1(lseq), + page << LmRAMPAGE_LSHIFT); + for (i = 0; size > 0 && i < LSEQ_CODEPAGE_SIZE; + i += 4, prog++, size-=4) { + + u32 val = asd_read_reg_dword(asd_ha, LmSEQRAM(lseq)+i); + + if (le32_to_cpu(*prog) != val) { + asd_printk("%s: LSEQ%d verify failed " + "page:%d, offs:%d\n", + pci_name(asd_ha->pcidev), + lseq, page, i); + return -1; + } + } + } + ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq, + (int)((u8 *)prog-_prog)); + return 0; +} + +/** + * asd_verify_seq -- verify CSEQ/LSEQ microcode + * @asd_ha: pointer to host adapter structure + * @prog: pointer to microcode + * @size: size of the microcode + * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest + * + * Return 0 if microcode is correct, negative on mismatch. + */ +static int asd_verify_seq(struct asd_ha_struct *asd_ha, const u8 *prog, + u32 size, u8 lseq_mask) +{ + if (lseq_mask == 0) + return asd_verify_cseq(asd_ha, prog, size); + else { + int lseq, err; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_verify_lseq(asd_ha, prog, size, lseq); + if (err) + return err; + } + } + + return 0; +} +#define ASD_DMA_MODE_DOWNLOAD +#ifdef ASD_DMA_MODE_DOWNLOAD +/* This is the size of the CSEQ Mapped instruction page */ +#define MAX_DMA_OVLY_COUNT ((1U << 14)-1) +static int asd_download_seq(struct asd_ha_struct *asd_ha, + const u8 * const prog, u32 size, u8 lseq_mask) +{ + u32 comstaten; + u32 reg; + int page; + const int pages = (size + MAX_DMA_OVLY_COUNT - 1) / MAX_DMA_OVLY_COUNT; + struct asd_dma_tok *token; + int err = 0; + + if (size % 4) { + asd_printk("sequencer program not multiple of 4\n"); + return -1; + } + + asd_pause_cseq(asd_ha); + asd_pause_lseq(asd_ha, 0xFF); + + /* save, disable and clear interrupts */ + comstaten = asd_read_reg_dword(asd_ha, COMSTATEN); + asd_write_reg_dword(asd_ha, COMSTATEN, 0); + asd_write_reg_dword(asd_ha, COMSTAT, COMSTAT_MASK); + + asd_write_reg_dword(asd_ha, CHIMINTEN, RST_CHIMINTEN); + asd_write_reg_dword(asd_ha, CHIMINT, CHIMINT_MASK); + + token = asd_alloc_coherent(asd_ha, MAX_DMA_OVLY_COUNT, GFP_KERNEL); + if (!token) { + asd_printk("out of memory for dma SEQ download\n"); + err = -ENOMEM; + goto out; + } + ASD_DPRINTK("dma-ing %d bytes\n", size); + + for (page = 0; page < pages; page++) { + int i; + u32 left = min(size-page*MAX_DMA_OVLY_COUNT, + (u32)MAX_DMA_OVLY_COUNT); + + memcpy(token->vaddr, prog + page*MAX_DMA_OVLY_COUNT, left); + asd_write_reg_addr(asd_ha, OVLYDMAADR, token->dma_handle); + asd_write_reg_dword(asd_ha, OVLYDMACNT, left); + reg = !page ? RESETOVLYDMA : 0; + reg |= (STARTOVLYDMA | OVLYHALTERR); + reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ); + /* Start DMA. */ + asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); + + for (i = PAUSE_TRIES*100; i > 0; i--) { + u32 dmadone = asd_read_reg_dword(asd_ha, OVLYDMACTL); + if (!(dmadone & OVLYDMAACT)) + break; + udelay(PAUSE_DELAY); + } + } + + reg = asd_read_reg_dword(asd_ha, COMSTAT); + if (!(reg & OVLYDMADONE) || (reg & OVLYERR) + || (asd_read_reg_dword(asd_ha, CHIMINT) & DEVEXCEPT_MASK)){ + asd_printk("%s: error DMA-ing sequencer code\n", + pci_name(asd_ha->pcidev)); + err = -ENODEV; + } + + asd_free_coherent(asd_ha, token); + out: + asd_write_reg_dword(asd_ha, COMSTATEN, comstaten); + + return err ? : asd_verify_seq(asd_ha, prog, size, lseq_mask); +} +#else /* ASD_DMA_MODE_DOWNLOAD */ +static int asd_download_seq(struct asd_ha_struct *asd_ha, const u8 *_prog, + u32 size, u8 lseq_mask) +{ + int i; + u32 reg = 0; + const u32 *prog = (u32 *) _prog; + + if (size % 4) { + asd_printk("sequencer program not multiple of 4\n"); + return -1; + } + + asd_pause_cseq(asd_ha); + asd_pause_lseq(asd_ha, 0xFF); + + reg |= (lseq_mask ? (((u32)lseq_mask) << 8) : OVLYCSEQ); + reg |= PIOCMODE; + + asd_write_reg_dword(asd_ha, OVLYDMACNT, size); + asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); + + ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n", + lseq_mask ? "LSEQ" : "CSEQ", lseq_mask ? "s" : ""); + + for (i = 0; i < size; i += 4, prog++) + asd_write_reg_dword(asd_ha, SPIODATA, *prog); + + reg = (reg & ~PIOCMODE) | OVLYHALTERR; + asd_write_reg_dword(asd_ha, OVLYDMACTL, reg); + + return asd_verify_seq(asd_ha, _prog, size, lseq_mask); +} +#endif /* ASD_DMA_MODE_DOWNLOAD */ + +/** + * asd_seq_download_seqs - download the sequencer microcode + * @asd_ha: pointer to host adapter structure + * + * Download the central and link sequencer microcode. + */ +static int asd_seq_download_seqs(struct asd_ha_struct *asd_ha) +{ + int err; + + if (!asd_ha->hw_prof.enabled_phys) { + asd_printk("%s: no enabled phys!\n", pci_name(asd_ha->pcidev)); + return -ENODEV; + } + + /* Download the CSEQ */ + ASD_DPRINTK("downloading CSEQ...\n"); + err = asd_download_seq(asd_ha, cseq_code, cseq_code_size, 0); + if (err) { + asd_printk("CSEQ download failed:%d\n", err); + return err; + } + + /* Download the Link Sequencers code. All of the Link Sequencers + * microcode can be downloaded at the same time. + */ + ASD_DPRINTK("downloading LSEQs...\n"); + err = asd_download_seq(asd_ha, lseq_code, lseq_code_size, + asd_ha->hw_prof.enabled_phys); + if (err) { + /* Try it one at a time */ + u8 lseq; + u8 lseq_mask = asd_ha->hw_prof.enabled_phys; + + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_download_seq(asd_ha, lseq_code, + lseq_code_size, 1<<lseq); + if (err) + break; + } + } + if (err) + asd_printk("LSEQs download failed:%d\n", err); + + return err; +} + +/* ---------- Initializing the chip, chip memory, etc. ---------- */ + +/** + * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7 + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_cseq_mip(struct asd_ha_struct *asd_ha) +{ + /* CSEQ Mode Independent, page 4 setup. */ + asd_write_reg_word(asd_ha, CSEQ_Q_EXE_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_EXE_TAIL, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_DONE_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_DONE_TAIL, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_SEND_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_SEND_TAIL, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_DMA2CHIM_TAIL, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_COPY_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_COPY_TAIL, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_REG0, 0); + asd_write_reg_word(asd_ha, CSEQ_REG1, 0); + asd_write_reg_dword(asd_ha, CSEQ_REG2, 0); + asd_write_reg_byte(asd_ha, CSEQ_LINK_CTL_Q_MAP, 0); + { + u8 con = asd_read_reg_byte(asd_ha, CCONEXIST); + u8 val = hweight8(con); + asd_write_reg_byte(asd_ha, CSEQ_MAX_CSEQ_MODE, (val<<4)|val); + } + asd_write_reg_word(asd_ha, CSEQ_FREE_LIST_HACK_COUNT, 0); + + /* CSEQ Mode independent, page 5 setup. */ + asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE, 0); + asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_QUEUE+4, 0); + asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT, 0); + asd_write_reg_dword(asd_ha, CSEQ_EST_NEXUS_REQ_COUNT+4, 0); + asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_EST_NEXUS_TAIL, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_NEED_EST_NEXUS_SCB, 0); + asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_HEAD, 0); + asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_REQ_TAIL, 0); + asd_write_reg_byte(asd_ha, CSEQ_EST_NEXUS_SCB_OFFSET, 0); + + /* CSEQ Mode independent, page 6 setup. */ + asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR0, 0); + asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_RET_ADDR1, 0); + asd_write_reg_word(asd_ha, CSEQ_INT_ROUT_SCBPTR, 0); + asd_write_reg_byte(asd_ha, CSEQ_INT_ROUT_MODE, 0); + asd_write_reg_byte(asd_ha, CSEQ_ISR_SCRATCH_FLAGS, 0); + asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_SINDEX, 0); + asd_write_reg_word(asd_ha, CSEQ_ISR_SAVE_DINDEX, 0); + asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_MONIRTT_TAIL, 0xFFFF); + /* Calculate the free scb mask. */ + { + u16 cmdctx = asd_get_cmdctx_size(asd_ha); + cmdctx = (~((cmdctx/128)-1)) >> 8; + asd_write_reg_byte(asd_ha, CSEQ_FREE_SCB_MASK, (u8)cmdctx); + } + asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_HEAD, + first_scb_site_no); + asd_write_reg_word(asd_ha, CSEQ_BUILTIN_FREE_SCB_TAIL, + last_scb_site_no); + asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_EXTENDED_FREE_SCB_TAIL, 0xFFFF); + + /* CSEQ Mode independent, page 7 setup. */ + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE, 0); + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_QUEUE+4, 0); + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT, 0); + asd_write_reg_dword(asd_ha, CSEQ_EMPTY_REQ_COUNT+4, 0); + asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_Q_EMPTY_TAIL, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_NEED_EMPTY_SCB, 0); + asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_HEAD, 0); + asd_write_reg_byte(asd_ha, CSEQ_EMPTY_REQ_TAIL, 0); + asd_write_reg_byte(asd_ha, CSEQ_EMPTY_SCB_OFFSET, 0); + asd_write_reg_word(asd_ha, CSEQ_PRIMITIVE_DATA, 0); + asd_write_reg_dword(asd_ha, CSEQ_TIMEOUT_CONST, 0); +} + +/** + * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_cseq_mdp(struct asd_ha_struct *asd_ha) +{ + int i; + int moffs; + + moffs = CSEQ_PAGE_SIZE * 2; + + /* CSEQ Mode dependent, modes 0-7, page 0 setup. */ + for (i = 0; i < 8; i++) { + asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SINDEX, 0); + asd_write_reg_word(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCBPTR, 0); + asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_HEAD, 0xFFFF); + asd_write_reg_word(asd_ha, i*moffs+CSEQ_Q_LINK_TAIL, 0xFFFF); + asd_write_reg_byte(asd_ha, i*moffs+CSEQ_LRM_SAVE_SCRPAGE, 0); + } + + /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */ + + /* CSEQ Mode dependent, mode 8, page 0 setup. */ + asd_write_reg_word(asd_ha, CSEQ_RET_ADDR, 0xFFFF); + asd_write_reg_word(asd_ha, CSEQ_RET_SCBPTR, 0); + asd_write_reg_word(asd_ha, CSEQ_SAVE_SCBPTR, 0); + asd_write_reg_word(asd_ha, CSEQ_EMPTY_TRANS_CTX, 0); + asd_write_reg_word(asd_ha, CSEQ_RESP_LEN, 0); + asd_write_reg_word(asd_ha, CSEQ_TMF_SCBPTR, 0); + asd_write_reg_word(asd_ha, CSEQ_GLOBAL_PREV_SCB, 0); + asd_write_reg_word(asd_ha, CSEQ_GLOBAL_HEAD, 0); + asd_write_reg_word(asd_ha, CSEQ_CLEAR_LU_HEAD, 0); + asd_write_reg_byte(asd_ha, CSEQ_TMF_OPCODE, 0); + asd_write_reg_byte(asd_ha, CSEQ_SCRATCH_FLAGS, 0); + asd_write_reg_word(asd_ha, CSEQ_HSB_SITE, 0); + asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_SCB_SITE, + (u16)last_scb_site_no+1); + asd_write_reg_word(asd_ha, CSEQ_FIRST_INV_DDB_SITE, + (u16)asd_ha->hw_prof.max_ddbs); + + /* CSEQ Mode dependent, mode 8, page 1 setup. */ + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR, 0); + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CLEAR + 4, 0); + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK, 0); + asd_write_reg_dword(asd_ha, CSEQ_LUN_TO_CHECK + 4, 0); + + /* CSEQ Mode dependent, mode 8, page 2 setup. */ + /* Tell the sequencer the bus address of the first SCB. */ + asd_write_reg_addr(asd_ha, CSEQ_HQ_NEW_POINTER, + asd_ha->seq.next_scb.dma_handle); + ASD_DPRINTK("First SCB dma_handle: 0x%llx\n", + (unsigned long long)asd_ha->seq.next_scb.dma_handle); + + /* Tell the sequencer the first Done List entry address. */ + asd_write_reg_addr(asd_ha, CSEQ_HQ_DONE_BASE, + asd_ha->seq.actual_dl->dma_handle); + + /* Initialize the Q_DONE_POINTER with the least significant + * 4 bytes of the first Done List address. */ + asd_write_reg_dword(asd_ha, CSEQ_HQ_DONE_POINTER, + ASD_BUSADDR_LO(asd_ha->seq.actual_dl->dma_handle)); + + asd_write_reg_byte(asd_ha, CSEQ_HQ_DONE_PASS, ASD_DEF_DL_TOGGLE); + + /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */ +} + +/** + * asd_init_cseq_scratch -- setup and init CSEQ + * @asd_ha: pointer to host adapter structure + * + * Setup and initialize Central sequencers. Initialiaze the mode + * independent and dependent scratch page to the default settings. + */ +static void asd_init_cseq_scratch(struct asd_ha_struct *asd_ha) +{ + asd_init_cseq_mip(asd_ha); + asd_init_cseq_mdp(asd_ha); +} + +/** + * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3 + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_lseq_mip(struct asd_ha_struct *asd_ha, u8 lseq) +{ + int i; + + /* LSEQ Mode independent page 0 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_HEAD(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_Q_TGTXFR_TAIL(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_NUMBER(lseq), lseq); + asd_write_reg_byte(asd_ha, LmSEQ_SCRATCH_FLAGS(lseq), + ASD_NOTIFY_ENABLE_SPINUP); + asd_write_reg_dword(asd_ha, LmSEQ_CONNECTION_STATE(lseq),0x08000000); + asd_write_reg_word(asd_ha, LmSEQ_CONCTL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_CONSTAT(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_CONNECTION_MODES(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_REG1_ISR(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_REG2_ISR(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_REG3_ISR(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_REG0_ISR(lseq)+4, 0); + + /* LSEQ Mode independent page 1 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR0(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR1(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR2(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EST_NEXUS_SCBPTR3(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_HEAD(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_SCB_TAIL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_TIMEOUT_CONST(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_SINDEX(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_ISR_SAVE_DINDEX(lseq), 0); + + /* LSEQ Mode Independent page 2 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR0(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR1(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR2(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_SCB_PTR3(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD0(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD1(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD2(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_OPCD3(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_HEAD(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_SCB_TAIL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_EMPTY_BUFS_AVAIL(lseq), 0); + for (i = 0; i < 12; i += 4) + asd_write_reg_dword(asd_ha, LmSEQ_ATA_SCR_REGS(lseq) + i, 0); + + /* LSEQ Mode Independent page 3 setup. */ + + /* Device present timer timeout */ + asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq), + ASD_DEV_PRESENT_TIMEOUT); + + /* SATA interlock timer disabled */ + asd_write_reg_dword(asd_ha, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq), + ASD_SATA_INTERLOCK_TIMEOUT); + + /* STP shutdown timer timeout constant, IGNORED by the sequencer, + * always 0. */ + asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq), + ASD_STP_SHUTDOWN_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_SRST_ASSERT_TIMEOUT(lseq), + ASD_SRST_ASSERT_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMEOUT(lseq), + ASD_RCV_FIS_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq), + ASD_ONE_MILLISEC_TIMEOUT); + + /* COM_INIT timer */ + asd_write_reg_dword(asd_ha, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq), + ASD_TEN_MILLISEC_TIMEOUT); + + asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMEOUT(lseq), + ASD_SMP_RCV_TIMEOUT); +} + +/** + * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages. + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_lseq_mdp(struct asd_ha_struct *asd_ha, int lseq) +{ + int i; + u32 moffs; + u16 ret_addr[] = { + 0xFFFF, /* mode 0 */ + 0xFFFF, /* mode 1 */ + mode2_task, /* mode 2 */ + 0, + 0xFFFF, /* mode 4/5 */ + 0xFFFF, /* mode 4/5 */ + }; + + /* + * Mode 0,1,2 and 4/5 have common field on page 0 for the first + * 14 bytes. + */ + for (i = 0; i < 3; i++) { + moffs = i * LSEQ_MODE_SCRATCH_SIZE; + asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR(lseq)+moffs, + ret_addr[i]); + asd_write_reg_word(asd_ha, LmSEQ_REG0_MODE(lseq)+moffs, 0); + asd_write_reg_word(asd_ha, LmSEQ_MODE_FLAGS(lseq)+moffs, 0); + asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR2(lseq)+moffs,0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_RET_ADDR1(lseq)+moffs,0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_OPCODE_TO_CSEQ(lseq)+moffs,0); + asd_write_reg_word(asd_ha, LmSEQ_DATA_TO_CSEQ(lseq)+moffs,0); + } + /* + * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3. + */ + asd_write_reg_word(asd_ha, + LmSEQ_RET_ADDR(lseq)+LSEQ_MODE5_PAGE0_OFFSET, + ret_addr[5]); + asd_write_reg_word(asd_ha, + LmSEQ_REG0_MODE(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0); + asd_write_reg_word(asd_ha, + LmSEQ_MODE_FLAGS(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0); + asd_write_reg_word(asd_ha, + LmSEQ_RET_ADDR2(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF); + asd_write_reg_word(asd_ha, + LmSEQ_RET_ADDR1(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0xFFFF); + asd_write_reg_byte(asd_ha, + LmSEQ_OPCODE_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET,0); + asd_write_reg_word(asd_ha, + LmSEQ_DATA_TO_CSEQ(lseq)+LSEQ_MODE5_PAGE0_OFFSET, 0); + + /* LSEQ Mode dependent 0, page 0 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_DDB_SITE(lseq), + (u16)asd_ha->hw_prof.max_ddbs); + asd_write_reg_word(asd_ha, LmSEQ_EMPTY_TRANS_CTX(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_RESP_LEN(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_FIRST_INV_SCB_SITE(lseq), + (u16)last_scb_site_no+1); + asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq), + (u16) LmM0INTEN_MASK & 0xFFFF0000 >> 16); + asd_write_reg_word(asd_ha, LmSEQ_INTEN_SAVE(lseq) + 2, + (u16) LmM0INTEN_MASK & 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_FRM_LEN(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_PROTOCOL(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_RESP_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_LAST_LOADED_SGE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_SAVE_SCBPTR(lseq), 0); + + /* LSEQ mode dependent, mode 1, page 0 setup. */ + asd_write_reg_word(asd_ha, LmSEQ_Q_XMIT_HEAD(lseq), 0xFFFF); + asd_write_reg_word(asd_ha, LmSEQ_M1_EMPTY_TRANS_CTX(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_INI_CONN_TAG(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_FAILED_OPEN_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_XMIT_REQUEST_TYPE(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_M1_RESP_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_M1_LAST_LOADED_SGE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_M1_SAVE_SCBPTR(lseq), 0); + + /* LSEQ Mode dependent mode 2, page 0 setup */ + asd_write_reg_word(asd_ha, LmSEQ_PORT_COUNTER(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_PM_TABLE_PTR(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_IP_BITL(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_COPY_SMP_CONN_TAG(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_P0M2_OFFS1AH(lseq), 0); + + /* LSEQ Mode dependent, mode 4/5, page 0 setup. */ + asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_STATUS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_MODE(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_Q_LINK_HEAD(lseq), 0xFFFF); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RST_ERR(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SAVED_OOB_SIGNALS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SAS_RESET_MODE(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_LINK_RESET_RETRY_COUNT(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_NUM_LINK_RESET_RETRIES(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_OOB_INT_ENABLES(lseq), 0); + /* + * Set the desired interval between transmissions of the NOTIFY + * (ENABLE SPINUP) primitive. Must be initilized to val - 1. + */ + asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq), + ASD_NOTIFY_TIMEOUT - 1); + /* No delay for the first NOTIFY to be sent to the attached target. */ + asd_write_reg_word(asd_ha, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq), + ASD_NOTIFY_DOWN_COUNT); + + /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */ + for (i = 0; i < 2; i++) { + int j; + /* Start from Page 1 of Mode 0 and 1. */ + moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE; + /* All the fields of page 1 can be intialized to 0. */ + for (j = 0; j < LSEQ_PAGE_SIZE; j += 4) + asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0); + } + + /* LSEQ Mode dependent, mode 2, page 1 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_INVALID_DWORD_COUNT(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_DISPARITY_ERROR_COUNT(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_LOSS_OF_SYNC_COUNT(lseq), 0); + + /* LSEQ Mode dependent, mode 4/5, page 1. */ + for (i = 0; i < LSEQ_PAGE_SIZE; i+=4) + asd_write_reg_dword(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq)+i, 0); + asd_write_reg_byte(asd_ha, LmSEQ_FRAME_TYPE_MASK(lseq), 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq), 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+1,0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_DEST_ADDR_MASK(lseq)+2,0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq), 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+1, 0xFF); + asd_write_reg_byte(asd_ha, LmSEQ_HASHED_SRC_ADDR_MASK(lseq)+2, 0xFF); + asd_write_reg_dword(asd_ha, LmSEQ_DATA_OFFSET(lseq), 0xFFFFFFFF); + + /* LSEQ Mode dependent, mode 0, page 2 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_DEVICE_BITS(lseq), 0); + asd_write_reg_word(asd_ha, LmSEQ_SDB_DDB(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SDB_NUM_TAGS(lseq), 0); + asd_write_reg_byte(asd_ha, LmSEQ_SDB_CURR_TAG(lseq), 0); + + /* LSEQ Mode Dependent 1, page 2 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_TX_ID_ADDR_FRAME(lseq)+4, 0); + asd_write_reg_dword(asd_ha, LmSEQ_OPEN_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_LAST_LOADED_SG_EL(lseq), 0); + + /* LSEQ Mode Dependent 2, page 2 setup. */ + /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer, + * i.e. always 0. */ + asd_write_reg_dword(asd_ha, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq),0); + asd_write_reg_dword(asd_ha, LmSEQ_CLOSE_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_BREAK_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq),0); + asd_write_reg_dword(asd_ha, LmSEQ_MCTL_TIMER_TERM_TS(lseq), 0); + + /* LSEQ Mode Dependent 4/5, page 2 setup. */ + asd_write_reg_dword(asd_ha, LmSEQ_COMINIT_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq), 0); + asd_write_reg_dword(asd_ha, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq), 0); +} + +/** + * asd_init_lseq_scratch -- setup and init link sequencers + * @asd_ha: pointer to host adapter struct + */ +static void asd_init_lseq_scratch(struct asd_ha_struct *asd_ha) +{ + u8 lseq; + u8 lseq_mask; + + lseq_mask = asd_ha->hw_prof.enabled_phys; + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + asd_init_lseq_mip(asd_ha, lseq); + asd_init_lseq_mdp(asd_ha, lseq); + } +} + +/** + * asd_init_scb_sites -- initialize sequencer SCB sites (memory). + * @asd_ha: pointer to host adapter structure + * + * This should be done before initializing common CSEQ and LSEQ + * scratch since those areas depend on some computed values here, + * last_scb_site_no, etc. + */ +static void asd_init_scb_sites(struct asd_ha_struct *asd_ha) +{ + u16 site_no; + u16 max_scbs = 0; + + for (site_no = asd_ha->hw_prof.max_scbs-1; + site_no != (u16) -1; + site_no--) { + u16 i; + + /* Initialize all fields in the SCB site to 0. */ + for (i = 0; i < ASD_SCB_SIZE; i += 4) + asd_scbsite_write_dword(asd_ha, site_no, i, 0); + + /* Workaround needed by SEQ to fix a SATA issue is to exclude + * certain SCB sites from the free list. */ + if (!SCB_SITE_VALID(site_no)) + continue; + + if (last_scb_site_no == 0) + last_scb_site_no = site_no; + + /* For every SCB site, we need to initialize the + * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS, + * and SG Element Flag. */ + + /* Q_NEXT field of the last SCB is invalidated. */ + asd_scbsite_write_word(asd_ha, site_no, 0, first_scb_site_no); + + /* Initialize SCB Site Opcode field to invalid. */ + asd_scbsite_write_byte(asd_ha, site_no, + offsetof(struct scb_header, opcode), + 0xFF); + + /* Initialize SCB Site Flags field to mean a response + * frame has been received. This means inadvertent + * frames received to be dropped. */ + asd_scbsite_write_byte(asd_ha, site_no, 0x49, 0x01); + + first_scb_site_no = site_no; + max_scbs++; + } + asd_ha->hw_prof.max_scbs = max_scbs; + ASD_DPRINTK("max_scbs:%d\n", asd_ha->hw_prof.max_scbs); + ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no); + ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no); +} + +/** + * asd_init_cseq_cio - initialize CSEQ CIO registers + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_cseq_cio(struct asd_ha_struct *asd_ha) +{ + int i; + + asd_write_reg_byte(asd_ha, CSEQCOMINTEN, 0); + asd_write_reg_byte(asd_ha, CSEQDLCTL, ASD_DL_SIZE_BITS); + asd_write_reg_byte(asd_ha, CSEQDLOFFS, 0); + asd_write_reg_byte(asd_ha, CSEQDLOFFS+1, 0); + asd_ha->seq.scbpro = 0; + asd_write_reg_dword(asd_ha, SCBPRO, 0); + asd_write_reg_dword(asd_ha, CSEQCON, 0); + + /* Intialize CSEQ Mode 11 Interrupt Vectors. + * The addresses are 16 bit wide and in dword units. + * The values of their macros are in byte units. + * Thus we have to divide by 4. */ + asd_write_reg_word(asd_ha, CM11INTVEC0, cseq_vecs[0]); + asd_write_reg_word(asd_ha, CM11INTVEC1, cseq_vecs[1]); + asd_write_reg_word(asd_ha, CM11INTVEC2, cseq_vecs[2]); + + /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */ + asd_write_reg_byte(asd_ha, CARP2INTEN, EN_ARP2HALTC); + + /* Initialize CSEQ Scratch Page to 0x04. */ + asd_write_reg_byte(asd_ha, CSCRATCHPAGE, 0x04); + + /* Initialize CSEQ Mode[0-8] Dependent registers. */ + /* Initialize Scratch Page to 0. */ + for (i = 0; i < 9; i++) + asd_write_reg_byte(asd_ha, CMnSCRATCHPAGE(i), 0); + + /* Reset the ARP2 Program Count. */ + asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); + + for (i = 0; i < 8; i++) { + /* Intialize Mode n Link m Interrupt Enable. */ + asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF); + /* Initialize Mode n Request Mailbox. */ + asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0); + } +} + +/** + * asd_init_lseq_cio -- initialize LmSEQ CIO registers + * @asd_ha: pointer to host adapter structure + */ +static void asd_init_lseq_cio(struct asd_ha_struct *asd_ha, int lseq) +{ + u8 *sas_addr; + int i; + + /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */ + asd_write_reg_dword(asd_ha, LmARP2INTEN(lseq), EN_ARP2HALTC); + + asd_write_reg_byte(asd_ha, LmSCRATCHPAGE(lseq), 0); + + /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */ + for (i = 0; i < 3; i++) + asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, i), 0); + + /* Initialize Mode 5 SCRATCHPAGE to 0. */ + asd_write_reg_byte(asd_ha, LmMnSCRATCHPAGE(lseq, 5), 0); + + asd_write_reg_dword(asd_ha, LmRSPMBX(lseq), 0); + /* Initialize Mode 0,1,2 and 5 Interrupt Enable and + * Interrupt registers. */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 0), LmM0INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 0), 0xFFFFFFFF); + /* Mode 1 */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 1), LmM1INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 1), 0xFFFFFFFF); + /* Mode 2 */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 2), LmM2INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 2), 0xFFFFFFFF); + /* Mode 5 */ + asd_write_reg_dword(asd_ha, LmMnINTEN(lseq, 5), LmM5INTEN_MASK); + asd_write_reg_dword(asd_ha, LmMnINT(lseq, 5), 0xFFFFFFFF); + + /* Enable HW Timer status. */ + asd_write_reg_byte(asd_ha, LmHWTSTATEN(lseq), LmHWTSTATEN_MASK); + + /* Enable Primitive Status 0 and 1. */ + asd_write_reg_dword(asd_ha, LmPRIMSTAT0EN(lseq), LmPRIMSTAT0EN_MASK); + asd_write_reg_dword(asd_ha, LmPRIMSTAT1EN(lseq), LmPRIMSTAT1EN_MASK); + + /* Enable Frame Error. */ + asd_write_reg_dword(asd_ha, LmFRMERREN(lseq), LmFRMERREN_MASK); + asd_write_reg_byte(asd_ha, LmMnHOLDLVL(lseq, 0), 0x50); + + /* Initialize Mode 0 Transfer Level to 512. */ + asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 0), LmMnXFRLVL_512); + /* Initialize Mode 1 Transfer Level to 256. */ + asd_write_reg_byte(asd_ha, LmMnXFRLVL(lseq, 1), LmMnXFRLVL_256); + + /* Initialize Program Count. */ + asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop); + + /* Enable Blind SG Move. */ + asd_write_reg_dword(asd_ha, LmMODECTL(lseq), LmBLIND48); + asd_write_reg_word(asd_ha, LmM3SATATIMER(lseq), + ASD_SATA_INTERLOCK_TIMEOUT); + + (void) asd_read_reg_dword(asd_ha, LmREQMBX(lseq)); + + /* Clear Primitive Status 0 and 1. */ + asd_write_reg_dword(asd_ha, LmPRMSTAT0(lseq), 0xFFFFFFFF); + asd_write_reg_dword(asd_ha, LmPRMSTAT1(lseq), 0xFFFFFFFF); + + /* Clear HW Timer status. */ + asd_write_reg_byte(asd_ha, LmHWTSTAT(lseq), 0xFF); + + /* Clear DMA Errors for Mode 0 and 1. */ + asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 0), 0xFF); + asd_write_reg_byte(asd_ha, LmMnDMAERRS(lseq, 1), 0xFF); + + /* Clear SG DMA Errors for Mode 0 and 1. */ + asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 0), 0xFF); + asd_write_reg_byte(asd_ha, LmMnSGDMAERRS(lseq, 1), 0xFF); + + /* Clear Mode 0 Buffer Parity Error. */ + asd_write_reg_byte(asd_ha, LmMnBUFSTAT(lseq, 0), LmMnBUFPERR); + + /* Clear Mode 0 Frame Error register. */ + asd_write_reg_dword(asd_ha, LmMnFRMERR(lseq, 0), 0xFFFFFFFF); + + /* Reset LSEQ external interrupt arbiter. */ + asd_write_reg_byte(asd_ha, LmARP2INTCTL(lseq), RSTINTCTL); + + /* Set the Phy SAS for the LmSEQ WWN. */ + sas_addr = asd_ha->phys[lseq].phy_desc->sas_addr; + for (i = 0; i < SAS_ADDR_SIZE; i++) + asd_write_reg_byte(asd_ha, LmWWN(lseq) + i, sas_addr[i]); + + /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */ + asd_write_reg_byte(asd_ha, LmMnXMTSIZE(lseq, 1), 0); + + /* Set the Bus Inactivity Time Limit Timer. */ + asd_write_reg_word(asd_ha, LmBITL_TIMER(lseq), 9); + + /* Enable SATA Port Multiplier. */ + asd_write_reg_byte(asd_ha, LmMnSATAFS(lseq, 1), 0x80); + + /* Initialize Interrupt Vector[0-10] address in Mode 3. + * See the comment on CSEQ_INT_* */ + asd_write_reg_word(asd_ha, LmM3INTVEC0(lseq), lseq_vecs[0]); + asd_write_reg_word(asd_ha, LmM3INTVEC1(lseq), lseq_vecs[1]); + asd_write_reg_word(asd_ha, LmM3INTVEC2(lseq), lseq_vecs[2]); + asd_write_reg_word(asd_ha, LmM3INTVEC3(lseq), lseq_vecs[3]); + asd_write_reg_word(asd_ha, LmM3INTVEC4(lseq), lseq_vecs[4]); + asd_write_reg_word(asd_ha, LmM3INTVEC5(lseq), lseq_vecs[5]); + asd_write_reg_word(asd_ha, LmM3INTVEC6(lseq), lseq_vecs[6]); + asd_write_reg_word(asd_ha, LmM3INTVEC7(lseq), lseq_vecs[7]); + asd_write_reg_word(asd_ha, LmM3INTVEC8(lseq), lseq_vecs[8]); + asd_write_reg_word(asd_ha, LmM3INTVEC9(lseq), lseq_vecs[9]); + asd_write_reg_word(asd_ha, LmM3INTVEC10(lseq), lseq_vecs[10]); + /* + * Program the Link LED control, applicable only for + * Chip Rev. B or later. + */ + asd_write_reg_dword(asd_ha, LmCONTROL(lseq), + (LEDTIMER | LEDMODE_TXRX | LEDTIMERS_100ms)); + + /* Set the Align Rate for SAS and STP mode. */ + asd_write_reg_byte(asd_ha, LmM1SASALIGN(lseq), SAS_ALIGN_DEFAULT); + asd_write_reg_byte(asd_ha, LmM1STPALIGN(lseq), STP_ALIGN_DEFAULT); +} + + +/** + * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox + * @asd_ha: pointer to host adapter struct + */ +static void asd_post_init_cseq(struct asd_ha_struct *asd_ha) +{ + int i; + + for (i = 0; i < 8; i++) + asd_write_reg_dword(asd_ha, CMnINT(i), 0xFFFFFFFF); + for (i = 0; i < 8; i++) + asd_read_reg_dword(asd_ha, CMnRSPMBX(i)); + /* Reset the external interrupt arbiter. */ + asd_write_reg_byte(asd_ha, CARP2INTCTL, RSTINTCTL); +} + +/** + * asd_init_ddb_0 -- initialize DDB 0 + * @asd_ha: pointer to host adapter structure + * + * Initialize DDB site 0 which is used internally by the sequencer. + */ +static void asd_init_ddb_0(struct asd_ha_struct *asd_ha) +{ + int i; + + /* Zero out the DDB explicitly */ + for (i = 0; i < sizeof(struct asd_ddb_seq_shared); i+=4) + asd_ddbsite_write_dword(asd_ha, 0, i, 0); + + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_free_ddb_head), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_free_ddb_tail), + asd_ha->hw_prof.max_ddbs-1); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_free_ddb_cnt), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_used_ddb_head), 0xFFFF); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, q_used_ddb_tail), 0xFFFF); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, shared_mem_lock), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, smp_conn_tag), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, est_nexus_buf_cnt), 0); + asd_ddbsite_write_word(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, est_nexus_buf_thresh), + asd_ha->hw_prof.num_phys * 2); + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, settable_max_contexts),0); + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, conn_not_active), 0xFF); + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, phy_is_up), 0x00); + /* DDB 0 is reserved */ + set_bit(0, asd_ha->hw_prof.ddb_bitmap); +} + +/** + * asd_seq_setup_seqs -- setup and initialize central and link sequencers + * @asd_ha: pointer to host adapter structure + */ +static void asd_seq_setup_seqs(struct asd_ha_struct *asd_ha) +{ + int lseq; + u8 lseq_mask; + + /* Initialize SCB sites. Done first to compute some values which + * the rest of the init code depends on. */ + asd_init_scb_sites(asd_ha); + + /* Initialize CSEQ Scratch RAM registers. */ + asd_init_cseq_scratch(asd_ha); + + /* Initialize LmSEQ Scratch RAM registers. */ + asd_init_lseq_scratch(asd_ha); + + /* Initialize CSEQ CIO registers. */ + asd_init_cseq_cio(asd_ha); + + asd_init_ddb_0(asd_ha); + + /* Initialize LmSEQ CIO registers. */ + lseq_mask = asd_ha->hw_prof.enabled_phys; + for_each_sequencer(lseq_mask, lseq_mask, lseq) + asd_init_lseq_cio(asd_ha, lseq); + asd_post_init_cseq(asd_ha); +} + + +/** + * asd_seq_start_cseq -- start the central sequencer, CSEQ + * @asd_ha: pointer to host adapter structure + */ +static int asd_seq_start_cseq(struct asd_ha_struct *asd_ha) +{ + /* Reset the ARP2 instruction to location zero. */ + asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop); + + /* Unpause the CSEQ */ + return asd_unpause_cseq(asd_ha); +} + +/** + * asd_seq_start_lseq -- start a link sequencer + * @asd_ha: pointer to host adapter structure + * @lseq: the link sequencer of interest + */ +static int asd_seq_start_lseq(struct asd_ha_struct *asd_ha, int lseq) +{ + /* Reset the ARP2 instruction to location zero. */ + asd_write_reg_word(asd_ha, LmPRGMCNT(lseq), lseq_idle_loop); + + /* Unpause the LmSEQ */ + return asd_seq_unpause_lseq(asd_ha, lseq); +} + +static int asd_request_firmware(struct asd_ha_struct *asd_ha) +{ + int err, i; + struct sequencer_file_header header, *hdr_ptr; + u32 csum = 0; + u16 *ptr_cseq_vecs, *ptr_lseq_vecs; + + if (sequencer_fw) + /* already loaded */ + return 0; + + err = request_firmware(&sequencer_fw, + SAS_RAZOR_SEQUENCER_FW_FILE, + &asd_ha->pcidev->dev); + if (err) + return err; + + hdr_ptr = (struct sequencer_file_header *)sequencer_fw->data; + + header.csum = le32_to_cpu(hdr_ptr->csum); + header.major = le32_to_cpu(hdr_ptr->major); + header.minor = le32_to_cpu(hdr_ptr->minor); + sequencer_version = hdr_ptr->version; + header.cseq_table_offset = le32_to_cpu(hdr_ptr->cseq_table_offset); + header.cseq_table_size = le32_to_cpu(hdr_ptr->cseq_table_size); + header.lseq_table_offset = le32_to_cpu(hdr_ptr->lseq_table_offset); + header.lseq_table_size = le32_to_cpu(hdr_ptr->lseq_table_size); + header.cseq_code_offset = le32_to_cpu(hdr_ptr->cseq_code_offset); + header.cseq_code_size = le32_to_cpu(hdr_ptr->cseq_code_size); + header.lseq_code_offset = le32_to_cpu(hdr_ptr->lseq_code_offset); + header.lseq_code_size = le32_to_cpu(hdr_ptr->lseq_code_size); + header.mode2_task = le16_to_cpu(hdr_ptr->mode2_task); + header.cseq_idle_loop = le16_to_cpu(hdr_ptr->cseq_idle_loop); + header.lseq_idle_loop = le16_to_cpu(hdr_ptr->lseq_idle_loop); + + for (i = sizeof(header.csum); i < sequencer_fw->size; i++) + csum += sequencer_fw->data[i]; + + if (csum != header.csum) { + asd_printk("Firmware file checksum mismatch\n"); + return -EINVAL; + } + + if (header.cseq_table_size != CSEQ_NUM_VECS || + header.lseq_table_size != LSEQ_NUM_VECS) { + asd_printk("Firmware file table size mismatch\n"); + return -EINVAL; + } + + ptr_cseq_vecs = (u16 *)&sequencer_fw->data[header.cseq_table_offset]; + ptr_lseq_vecs = (u16 *)&sequencer_fw->data[header.lseq_table_offset]; + mode2_task = header.mode2_task; + cseq_idle_loop = header.cseq_idle_loop; + lseq_idle_loop = header.lseq_idle_loop; + + for (i = 0; i < CSEQ_NUM_VECS; i++) + cseq_vecs[i] = le16_to_cpu(ptr_cseq_vecs[i]); + + for (i = 0; i < LSEQ_NUM_VECS; i++) + lseq_vecs[i] = le16_to_cpu(ptr_lseq_vecs[i]); + + cseq_code = &sequencer_fw->data[header.cseq_code_offset]; + cseq_code_size = header.cseq_code_size; + lseq_code = &sequencer_fw->data[header.lseq_code_offset]; + lseq_code_size = header.lseq_code_size; + + return 0; +} + +int asd_init_seqs(struct asd_ha_struct *asd_ha) +{ + int err; + + err = asd_request_firmware(asd_ha); + + if (err) { + asd_printk("Failed to load sequencer firmware file %s, error %d\n", + SAS_RAZOR_SEQUENCER_FW_FILE, err); + return err; + } + + asd_printk("using sequencer %s\n", sequencer_version); + err = asd_seq_download_seqs(asd_ha); + if (err) { + asd_printk("couldn't download sequencers for %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + + asd_seq_setup_seqs(asd_ha); + + return 0; +} + +int asd_start_seqs(struct asd_ha_struct *asd_ha) +{ + int err; + u8 lseq_mask; + int lseq; + + err = asd_seq_start_cseq(asd_ha); + if (err) { + asd_printk("couldn't start CSEQ for %s\n", + pci_name(asd_ha->pcidev)); + return err; + } + + lseq_mask = asd_ha->hw_prof.enabled_phys; + for_each_sequencer(lseq_mask, lseq_mask, lseq) { + err = asd_seq_start_lseq(asd_ha, lseq); + if (err) { + asd_printk("coudln't start LSEQ %d for %s\n", lseq, + pci_name(asd_ha->pcidev)); + return err; + } + } + + return 0; +} + +/** + * asd_update_port_links -- update port_map_by_links and phy_is_up + * @sas_phy: pointer to the phy which has been added to a port + * + * 1) When a link reset has completed and we got BYTES DMAED with a + * valid frame we call this function for that phy, to indicate that + * the phy is up, i.e. we update the phy_is_up in DDB 0. The + * sequencer checks phy_is_up when pending SCBs are to be sent, and + * when an open address frame has been received. + * + * 2) When we know of ports, we call this function to update the map + * of phys participaing in that port, i.e. we update the + * port_map_by_links in DDB 0. When a HARD_RESET primitive has been + * received, the sequencer disables all phys in that port. + * port_map_by_links is also used as the conn_mask byte in the + * initiator/target port DDB. + */ +void asd_update_port_links(struct asd_sas_phy *sas_phy) +{ + struct asd_ha_struct *asd_ha = sas_phy->ha->lldd_ha; + const u8 phy_mask = (u8) sas_phy->port->phy_mask; + u8 phy_is_up; + u8 mask; + int i, err; + + for_each_phy(phy_mask, mask, i) + asd_ddbsite_write_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, + port_map_by_links)+i,phy_mask); + + for (i = 0; i < 12; i++) { + phy_is_up = asd_ddbsite_read_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, phy_is_up)); + err = asd_ddbsite_update_byte(asd_ha, 0, + offsetof(struct asd_ddb_seq_shared, phy_is_up), + phy_is_up, + phy_is_up | phy_mask); + if (!err) + break; + else if (err == -EFAULT) { + asd_printk("phy_is_up: parity error in DDB 0\n"); + break; + } + } + + if (err) + asd_printk("couldn't update DDB 0:error:%d\n", err); +} diff --git a/drivers/scsi/aic94xx/aic94xx_seq.h b/drivers/scsi/aic94xx/aic94xx_seq.h new file mode 100644 index 0000000..42281c3 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_seq.h @@ -0,0 +1,70 @@ +/* + * Aic94xx SAS/SATA driver sequencer interface header file. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef _AIC94XX_SEQ_H_ +#define _AIC94XX_SEQ_H_ + +#define CSEQ_NUM_VECS 3 +#define LSEQ_NUM_VECS 11 + +#define SAS_RAZOR_SEQUENCER_FW_FILE "aic94xx-seq.fw" + +/* Note: All quantites in the sequencer file are little endian */ +struct sequencer_file_header { + /* Checksum of the entire contents of the sequencer excluding + * these four bytes */ + u32 csum; + /* numeric major version */ + u32 major; + /* numeric minor version */ + u32 minor; + /* version string printed by driver */ + char version[16]; + u32 cseq_table_offset; + u32 cseq_table_size; + u32 lseq_table_offset; + u32 lseq_table_size; + u32 cseq_code_offset; + u32 cseq_code_size; + u32 lseq_code_offset; + u32 lseq_code_size; + u16 mode2_task; + u16 cseq_idle_loop; + u16 lseq_idle_loop; +} __attribute__((packed)); + +#ifdef __KERNEL__ +int asd_pause_cseq(struct asd_ha_struct *asd_ha); +int asd_unpause_cseq(struct asd_ha_struct *asd_ha); +int asd_pause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask); +int asd_unpause_lseq(struct asd_ha_struct *asd_ha, u8 lseq_mask); +int asd_init_seqs(struct asd_ha_struct *asd_ha); +int asd_start_seqs(struct asd_ha_struct *asd_ha); + +void asd_update_port_links(struct asd_sas_phy *phy); +#endif + +#endif diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c new file mode 100644 index 0000000..285e70d --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_task.c @@ -0,0 +1,642 @@ +/* + * Aic94xx SAS/SATA Tasks + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <linux/spinlock.h> +#include "aic94xx.h" +#include "aic94xx_sas.h" +#include "aic94xx_hwi.h" + +static void asd_unbuild_ata_ascb(struct asd_ascb *a); +static void asd_unbuild_smp_ascb(struct asd_ascb *a); +static void asd_unbuild_ssp_ascb(struct asd_ascb *a); + +static inline void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num) +{ + unsigned long flags; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + asd_ha->seq.can_queue += num; + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); +} + +/* PCI_DMA_... to our direction translation. + */ +static const u8 data_dir_flags[] = { + [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ + [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */ + [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */ + [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ +}; + +static inline int asd_map_scatterlist(struct sas_task *task, + struct sg_el *sg_arr, + unsigned long gfp_flags) +{ + struct asd_ascb *ascb = task->lldd_task; + struct asd_ha_struct *asd_ha = ascb->ha; + struct scatterlist *sc; + int num_sg, res; + + if (task->data_dir == PCI_DMA_NONE) + return 0; + + if (task->num_scatter == 0) { + void *p = task->scatter; + dma_addr_t dma = pci_map_single(asd_ha->pcidev, p, + task->total_xfer_len, + task->data_dir); + sg_arr[0].bus_addr = cpu_to_le64((u64)dma); + sg_arr[0].size = cpu_to_le32(task->total_xfer_len); + sg_arr[0].flags |= ASD_SG_EL_LIST_EOL; + return 0; + } + + num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter, + task->data_dir); + if (num_sg == 0) + return -ENOMEM; + + if (num_sg > 3) { + int i; + + ascb->sg_arr = asd_alloc_coherent(asd_ha, + num_sg*sizeof(struct sg_el), + gfp_flags); + if (!ascb->sg_arr) { + res = -ENOMEM; + goto err_unmap; + } + for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) { + struct sg_el *sg = + &((struct sg_el *)ascb->sg_arr->vaddr)[i]; + sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); + sg->size = cpu_to_le32((u32)sg_dma_len(sc)); + if (i == num_sg-1) + sg->flags |= ASD_SG_EL_LIST_EOL; + } + + for (sc = task->scatter, i = 0; i < 2; i++, sc++) { + sg_arr[i].bus_addr = + cpu_to_le64((u64)sg_dma_address(sc)); + sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); + } + sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr); + sg_arr[1].flags |= ASD_SG_EL_LIST_EOS; + + memset(&sg_arr[2], 0, sizeof(*sg_arr)); + sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle); + } else { + int i; + for (sc = task->scatter, i = 0; i < num_sg; i++, sc++) { + sg_arr[i].bus_addr = + cpu_to_le64((u64)sg_dma_address(sc)); + sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); + } + sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL; + } + + return 0; +err_unmap: + pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, + task->data_dir); + return res; +} + +static inline void asd_unmap_scatterlist(struct asd_ascb *ascb) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_task *task = ascb->uldd_task; + + if (task->data_dir == PCI_DMA_NONE) + return; + + if (task->num_scatter == 0) { + dma_addr_t dma = (dma_addr_t) + le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr); + pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len, + task->data_dir); + return; + } + + asd_free_coherent(asd_ha, ascb->sg_arr); + pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, + task->data_dir); +} + +/* ---------- Task complete tasklet ---------- */ + +static void asd_get_response_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct sas_task *task = ascb->uldd_task; + struct task_status_struct *ts = &task->task_status; + unsigned long flags; + struct tc_resp_sb_struct { + __le16 index_escb; + u8 len_lsb; + u8 flags; + } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block; + +/* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */ + int edb_id = ((resp_sb->flags & 0x70) >> 4)-1; + struct asd_ascb *escb; + struct asd_dma_tok *edb; + void *r; + + spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags); + escb = asd_tc_index_find(&asd_ha->seq, + (int)le16_to_cpu(resp_sb->index_escb)); + spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags); + + if (!escb) { + ASD_DPRINTK("Uh-oh! No escb for this dl?!\n"); + return; + } + + ts->buf_valid_size = 0; + edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; + r = edb->vaddr; + if (task->task_proto == SAS_PROTO_SSP) { + struct ssp_response_iu *iu = + r + 16 + sizeof(struct ssp_frame_hdr); + + ts->residual = le32_to_cpu(*(__le32 *)r); + ts->resp = SAS_TASK_COMPLETE; + if (iu->datapres == 0) + ts->stat = iu->status; + else if (iu->datapres == 1) + ts->stat = iu->resp_data[3]; + else if (iu->datapres == 2) { + ts->stat = SAM_CHECK_COND; + ts->buf_valid_size = min((u32) SAS_STATUS_BUF_SIZE, + be32_to_cpu(iu->sense_data_len)); + memcpy(ts->buf, iu->sense_data, ts->buf_valid_size); + if (iu->status != SAM_CHECK_COND) { + ASD_DPRINTK("device %llx sent sense data, but " + "stat(0x%x) is not CHECK_CONDITION" + "\n", + SAS_ADDR(task->dev->sas_addr), + ts->stat); + } + } + } else { + struct ata_task_resp *resp = (void *) &ts->buf[0]; + + ts->residual = le32_to_cpu(*(__le32 *)r); + + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = le16_to_cpu(*(__le16 *)(r+6)); + memcpy(&resp->ending_fis[0], r+16, 24); + ts->buf_valid_size = sizeof(*resp); + } + } + + asd_invalidate_edb(escb, edb_id); +} + +static void asd_task_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct sas_task *task = ascb->uldd_task; + struct task_status_struct *ts = &task->task_status; + unsigned long flags; + u8 opcode = dl->opcode; + + asd_can_dequeue(ascb->ha, 1); + +Again: + switch (opcode) { + case TC_NO_ERROR: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_GOOD; + break; + case TC_UNDERRUN: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = le32_to_cpu(*(__le32 *)dl->status_block); + break; + case TC_OVERRUN: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + case TC_SSP_RESP: + case TC_ATA_RESP: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + asd_get_response_tasklet(ascb, dl); + break; + case TF_OPEN_REJECT: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + if (dl->status_block[1] & 2) + ts->open_rej_reason = 1 + dl->status_block[2]; + else if (dl->status_block[1] & 1) + ts->open_rej_reason = (dl->status_block[2] >> 4)+10; + else + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case TF_OPEN_TO: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_TO; + break; + case TF_PHY_DOWN: + case TU_PHY_DOWN: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case TI_PHY_DOWN: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case TI_BREAK: + case TI_PROTO_ERR: + case TI_NAK: + case TI_ACK_NAK_TO: + case TF_SMP_XMIT_RCV_ERR: + case TC_ATA_R_ERR_RECV: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case TF_BREAK: + case TU_BREAK: + case TU_ACK_NAK_TO: + case TF_SMPRSP_TO: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case TF_NAK_RECV: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case TA_I_T_NEXUS_LOSS: + opcode = dl->status_block[0]; + goto Again; + break; + case TF_INV_CONN_HANDLE: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEVICE_UNKNOWN; + break; + case TF_REQUESTED_N_PENDING: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PENDING; + break; + case TC_TASK_CLEARED: + case TA_ON_REQ: + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + + case TF_NO_SMP_CONN: + case TF_TMF_NO_CTX: + case TF_TMF_NO_TAG: + case TF_TMF_TAG_FREE: + case TF_TMF_TASK_DONE: + case TF_TMF_NO_CONN_HANDLE: + case TF_IRTT_TO: + case TF_IU_SHORT: + case TF_DATA_OFFS_ERR: + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + + case TC_LINK_ADM_RESP: + case TC_CONTROL_PHY: + case TC_RESUME: + case TC_PARTIAL_SG_LIST: + default: + ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode); + break; + } + + switch (task->task_proto) { + case SATA_PROTO: + case SAS_PROTO_STP: + asd_unbuild_ata_ascb(ascb); + break; + case SAS_PROTO_SMP: + asd_unbuild_smp_ascb(ascb); + break; + case SAS_PROTO_SSP: + asd_unbuild_ssp_ascb(ascb); + default: + break; + } + + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + task, opcode, ts->resp, ts->stat); + complete(&ascb->completion); + } else { + spin_unlock_irqrestore(&task->task_state_lock, flags); + task->lldd_task = NULL; + asd_ascb_free(ascb); + mb(); + task->task_done(task); + } +} + +/* ---------- ATA ---------- */ + +static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, + unsigned long gfp_flags) +{ + struct domain_device *dev = task->dev; + struct scb *scb; + u8 flags; + int res = 0; + + scb = ascb->scb; + + if (unlikely(task->ata_task.device_control_reg_update)) + scb->header.opcode = CONTROL_ATA_DEV; + else if (dev->sata_dev.command_set == ATA_COMMAND_SET) + scb->header.opcode = INITIATE_ATA_TASK; + else + scb->header.opcode = INITIATE_ATAPI_TASK; + + scb->ata_task.proto_conn_rate = (1 << 5); /* STP */ + if (dev->port->oob_mode == SAS_OOB_MODE) + scb->ata_task.proto_conn_rate |= dev->linkrate; + + scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); + scb->ata_task.fis = task->ata_task.fis; + scb->ata_task.fis.fis_type = 0x27; + if (likely(!task->ata_task.device_control_reg_update)) + scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */ + if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) + memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet, + 16); + scb->ata_task.sister_scb = cpu_to_le16(0xFFFF); + scb->ata_task.conn_handle = cpu_to_le16( + (u16)(unsigned long)dev->lldd_dev); + + if (likely(!task->ata_task.device_control_reg_update)) { + flags = 0; + if (task->ata_task.dma_xfer) + flags |= DATA_XFER_MODE_DMA; + if (task->ata_task.use_ncq && + dev->sata_dev.command_set != ATAPI_COMMAND_SET) + flags |= ATA_Q_TYPE_NCQ; + flags |= data_dir_flags[task->data_dir]; + scb->ata_task.ata_flags = flags; + + scb->ata_task.retry_count = task->ata_task.retry_count; + + flags = 0; + if (task->ata_task.set_affil_pol) + flags |= SET_AFFIL_POLICY; + if (task->ata_task.stp_affil_pol) + flags |= STP_AFFIL_POLICY; + scb->ata_task.flags = flags; + } + ascb->tasklet_complete = asd_task_tasklet_complete; + + if (likely(!task->ata_task.device_control_reg_update)) + res = asd_map_scatterlist(task, scb->ata_task.sg_element, + gfp_flags); + + return res; +} + +static void asd_unbuild_ata_ascb(struct asd_ascb *a) +{ + asd_unmap_scatterlist(a); +} + +/* ---------- SMP ---------- */ + +static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task, + unsigned long gfp_flags) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + struct domain_device *dev = task->dev; + struct scb *scb; + + pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, + PCI_DMA_FROMDEVICE); + pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); + + scb = ascb->scb; + + scb->header.opcode = INITIATE_SMP_TASK; + + scb->smp_task.proto_conn_rate = dev->linkrate; + + scb->smp_task.smp_req.bus_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + scb->smp_task.smp_req.size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + + scb->smp_task.smp_resp.bus_addr = + cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); + scb->smp_task.smp_resp.size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + + scb->smp_task.sister_scb = cpu_to_le16(0xFFFF); + scb->smp_task.conn_handle = cpu_to_le16((u16) + (unsigned long)dev->lldd_dev); + + ascb->tasklet_complete = asd_task_tasklet_complete; + + return 0; +} + +static void asd_unbuild_smp_ascb(struct asd_ascb *a) +{ + struct sas_task *task = a->uldd_task; + + BUG_ON(!task); + pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, + PCI_DMA_FROMDEVICE); + pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +} + +/* ---------- SSP ---------- */ + +static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task, + unsigned long gfp_flags) +{ + struct domain_device *dev = task->dev; + struct scb *scb; + int res = 0; + + scb = ascb->scb; + + scb->header.opcode = INITIATE_SSP_TASK; + + scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */ + scb->ssp_task.proto_conn_rate |= dev->linkrate; + scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); + scb->ssp_task.ssp_frame.frame_type = SSP_DATA; + memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr, + HASHED_SAS_ADDR_SIZE); + memcpy(scb->ssp_task.ssp_frame.hashed_src_addr, + dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF); + + memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8); + if (task->ssp_task.enable_first_burst) + scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK; + scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3); + scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cdb, 16); + + scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF); + scb->ssp_task.conn_handle = cpu_to_le16( + (u16)(unsigned long)dev->lldd_dev); + scb->ssp_task.data_dir = data_dir_flags[task->data_dir]; + scb->ssp_task.retry_count = scb->ssp_task.retry_count; + + ascb->tasklet_complete = asd_task_tasklet_complete; + + res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags); + + return res; +} + +static void asd_unbuild_ssp_ascb(struct asd_ascb *a) +{ + asd_unmap_scatterlist(a); +} + +/* ---------- Execute Task ---------- */ + +static inline int asd_can_queue(struct asd_ha_struct *asd_ha, int num) +{ + int res = 0; + unsigned long flags; + + spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); + if ((asd_ha->seq.can_queue - num) < 0) + res = -SAS_QUEUE_FULL; + else + asd_ha->seq.can_queue -= num; + spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); + + return res; +} + +int asd_execute_task(struct sas_task *task, const int num, + unsigned long gfp_flags) +{ + int res = 0; + LIST_HEAD(alist); + struct sas_task *t = task; + struct asd_ascb *ascb = NULL, *a; + struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; + + res = asd_can_queue(asd_ha, num); + if (res) + return res; + + res = num; + ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags); + if (res) { + res = -ENOMEM; + goto out_err; + } + + __list_add(&alist, ascb->list.prev, &ascb->list); + list_for_each_entry(a, &alist, list) { + a->uldd_task = t; + t->lldd_task = a; + t = list_entry(t->list.next, struct sas_task, list); + } + list_for_each_entry(a, &alist, list) { + t = a->uldd_task; + a->uldd_timer = 1; + if (t->task_proto & SAS_PROTO_STP) + t->task_proto = SAS_PROTO_STP; + switch (t->task_proto) { + case SATA_PROTO: + case SAS_PROTO_STP: + res = asd_build_ata_ascb(a, t, gfp_flags); + break; + case SAS_PROTO_SMP: + res = asd_build_smp_ascb(a, t, gfp_flags); + break; + case SAS_PROTO_SSP: + res = asd_build_ssp_ascb(a, t, gfp_flags); + break; + default: + asd_printk("unknown sas_task proto: 0x%x\n", + t->task_proto); + res = -ENOMEM; + break; + } + if (res) + goto out_err_unmap; + } + list_del_init(&alist); + + res = asd_post_ascb_list(asd_ha, ascb, num); + if (unlikely(res)) { + a = NULL; + __list_add(&alist, ascb->list.prev, &ascb->list); + goto out_err_unmap; + } + + return 0; +out_err_unmap: + { + struct asd_ascb *b = a; + list_for_each_entry(a, &alist, list) { + if (a == b) + break; + t = a->uldd_task; + switch (t->task_proto) { + case SATA_PROTO: + case SAS_PROTO_STP: + asd_unbuild_ata_ascb(a); + break; + case SAS_PROTO_SMP: + asd_unbuild_smp_ascb(a); + break; + case SAS_PROTO_SSP: + asd_unbuild_ssp_ascb(a); + default: + break; + } + t->lldd_task = NULL; + } + } + list_del_init(&alist); +out_err: + if (ascb) + asd_ascb_free_list(ascb); + asd_can_dequeue(asd_ha, num); + return res; +} diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c new file mode 100644 index 0000000..6123438 --- /dev/null +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -0,0 +1,636 @@ +/* + * Aic94xx Task Management Functions + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This file is part of the aic94xx driver. + * + * The aic94xx driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * The aic94xx driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with the aic94xx driver; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <linux/spinlock.h> +#include "aic94xx.h" +#include "aic94xx_sas.h" +#include "aic94xx_hwi.h" + +/* ---------- Internal enqueue ---------- */ + +static int asd_enqueue_internal(struct asd_ascb *ascb, + void (*tasklet_complete)(struct asd_ascb *, + struct done_list_struct *), + void (*timed_out)(unsigned long)) +{ + int res; + + ascb->tasklet_complete = tasklet_complete; + ascb->uldd_timer = 1; + + ascb->timer.data = (unsigned long) ascb; + ascb->timer.function = timed_out; + ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; + + add_timer(&ascb->timer); + + res = asd_post_ascb_list(ascb->ha, ascb, 1); + if (unlikely(res)) + del_timer(&ascb->timer); + return res; +} + +static inline void asd_timedout_common(unsigned long data) +{ + struct asd_ascb *ascb = (void *) data; + struct asd_seq_data *seq = &ascb->ha->seq; + unsigned long flags; + + spin_lock_irqsave(&seq->pend_q_lock, flags); + seq->pending--; + list_del_init(&ascb->list); + spin_unlock_irqrestore(&seq->pend_q_lock, flags); +} + +/* ---------- CLEAR NEXUS ---------- */ + +static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + ASD_DPRINTK("%s: here\n", __FUNCTION__); + if (!del_timer(&ascb->timer)) { + ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); + return; + } + ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); + ascb->uldd_task = (void *) (unsigned long) dl->opcode; + complete(&ascb->completion); +} + +static void asd_clear_nexus_timedout(unsigned long data) +{ + struct asd_ascb *ascb = (void *) data; + + ASD_DPRINTK("%s: here\n", __FUNCTION__); + asd_timedout_common(data); + ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; + complete(&ascb->completion); +} + +#define CLEAR_NEXUS_PRE \ + ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ + res = 1; \ + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ + if (!ascb) \ + return -ENOMEM; \ + \ + scb = ascb->scb; \ + scb->header.opcode = CLEAR_NEXUS + +#define CLEAR_NEXUS_POST \ + ASD_DPRINTK("%s: POST\n", __FUNCTION__); \ + res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \ + asd_clear_nexus_timedout); \ + if (res) \ + goto out_err; \ + ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ + wait_for_completion(&ascb->completion); \ + res = (int) (unsigned long) ascb->uldd_task; \ + if (res == TC_NO_ERROR) \ + res = TMF_RESP_FUNC_COMPLETE; \ +out_err: \ + asd_ascb_free(ascb); \ + return res + +int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) +{ + struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_ADAPTER; + CLEAR_NEXUS_POST; +} + +int asd_clear_nexus_port(struct asd_sas_port *port) +{ + struct asd_ha_struct *asd_ha = port->ha->lldd_ha; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_PORT; + scb->clear_nexus.conn_mask = port->phy_mask; + CLEAR_NEXUS_POST; +} + +#if 0 +static int asd_clear_nexus_I_T(struct domain_device *dev) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_I_T; + scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; + if (dev->tproto) + scb->clear_nexus.flags |= SUSPEND_TX; + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + dev->lldd_dev); + CLEAR_NEXUS_POST; +} +#endif + +static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_I_T_L; + scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; + if (dev->tproto) + scb->clear_nexus.flags |= SUSPEND_TX; + memcpy(scb->clear_nexus.ssp_task.lun, lun, 8); + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + dev->lldd_dev); + CLEAR_NEXUS_POST; +} + +static int asd_clear_nexus_tag(struct sas_task *task) +{ + struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; + struct asd_ascb *tascb = task->lldd_task; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_TAG; + memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8); + scb->clear_nexus.ssp_task.tag = tascb->tag; + if (task->dev->tproto) + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + task->dev->lldd_dev); + CLEAR_NEXUS_POST; +} + +static int asd_clear_nexus_index(struct sas_task *task) +{ + struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; + struct asd_ascb *tascb = task->lldd_task; + struct asd_ascb *ascb; + struct scb *scb; + int res; + + CLEAR_NEXUS_PRE; + scb->clear_nexus.nexus = NEXUS_TRANS_CX; + if (task->dev->tproto) + scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) + task->dev->lldd_dev); + scb->clear_nexus.index = cpu_to_le16(tascb->tc_index); + CLEAR_NEXUS_POST; +} + +/* ---------- TMFs ---------- */ + +static void asd_tmf_timedout(unsigned long data) +{ + struct asd_ascb *ascb = (void *) data; + + ASD_DPRINTK("tmf timed out\n"); + asd_timedout_common(data); + ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; + complete(&ascb->completion); +} + +static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + struct asd_ha_struct *asd_ha = ascb->ha; + unsigned long flags; + struct tc_resp_sb_struct { + __le16 index_escb; + u8 len_lsb; + u8 flags; + } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block; + + int edb_id = ((resp_sb->flags & 0x70) >> 4)-1; + struct asd_ascb *escb; + struct asd_dma_tok *edb; + struct ssp_frame_hdr *fh; + struct ssp_response_iu *ru; + int res = TMF_RESP_FUNC_FAILED; + + ASD_DPRINTK("tmf resp tasklet\n"); + + spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags); + escb = asd_tc_index_find(&asd_ha->seq, + (int)le16_to_cpu(resp_sb->index_escb)); + spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags); + + if (!escb) { + ASD_DPRINTK("Uh-oh! No escb for this dl?!\n"); + return res; + } + + edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; + ascb->tag = *(__be16 *)(edb->vaddr+4); + fh = edb->vaddr + 16; + ru = edb->vaddr + 16 + sizeof(*fh); + res = ru->status; + if (ru->datapres == 1) /* Response data present */ + res = ru->resp_data[3]; +#if 0 + ascb->tag = fh->tag; +#endif + ascb->tag_valid = 1; + + asd_invalidate_edb(escb, edb_id); + return res; +} + +static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, + struct done_list_struct *dl) +{ + if (!del_timer(&ascb->timer)) + return; + + ASD_DPRINTK("tmf tasklet complete\n"); + + if (dl->opcode == TC_SSP_RESP) + ascb->uldd_task = (void *) (unsigned long) + asd_get_tmf_resp_tasklet(ascb, dl); + else + ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode; + + complete(&ascb->completion); +} + +static inline int asd_clear_nexus(struct sas_task *task) +{ + int res = TMF_RESP_FUNC_FAILED; + struct asd_ascb *tascb = task->lldd_task; + unsigned long flags; + + ASD_DPRINTK("task not done, clearing nexus\n"); + if (tascb->tag_valid) + res = asd_clear_nexus_tag(task); + else + res = asd_clear_nexus_index(task); + wait_for_completion_timeout(&tascb->completion, + AIC94XX_SCB_TIMEOUT); + ASD_DPRINTK("came back from clear nexus\n"); + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) + res = TMF_RESP_FUNC_COMPLETE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + return res; +} + +/** + * asd_abort_task -- ABORT TASK TMF + * @task: the task to be aborted + * + * Before calling ABORT TASK the task state flags should be ORed with + * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under + * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called. + * + * Implements the ABORT TASK TMF, I_T_L_Q nexus. + * Returns: SAS TMF responses (see sas_task.h), + * -ENOMEM, + * -SAS_QUEUE_FULL. + * + * When ABORT TASK returns, the caller of ABORT TASK checks first the + * task->task_state_flags, and then the return value of ABORT TASK. + * + * If the task has task state bit SAS_TASK_STATE_DONE set, then the + * task was completed successfully prior to it being aborted. The + * caller of ABORT TASK has responsibility to call task->task_done() + * xor free the task, depending on their framework. The return code + * is TMF_RESP_FUNC_FAILED in this case. + * + * Else the SAS_TASK_STATE_DONE bit is not set, + * If the return code is TMF_RESP_FUNC_COMPLETE, then + * the task was aborted successfully. The caller of + * ABORT TASK has responsibility to call task->task_done() + * to finish the task, xor free the task depending on their + * framework. + * else + * the ABORT TASK returned some kind of error. The task + * was _not_ cancelled. Nothing can be assumed. + * The caller of ABORT TASK may wish to retry. + */ +int asd_abort_task(struct sas_task *task) +{ + struct asd_ascb *tascb = task->lldd_task; + struct asd_ha_struct *asd_ha = tascb->ha; + int res = 1; + unsigned long flags; + struct asd_ascb *ascb = NULL; + struct scb *scb; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + res = TMF_RESP_FUNC_COMPLETE; + ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task); + goto out_done; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); + if (!ascb) + return -ENOMEM; + scb = ascb->scb; + + scb->header.opcode = ABORT_TASK; + + switch (task->task_proto) { + case SATA_PROTO: + case SAS_PROTO_STP: + scb->abort_task.proto_conn_rate = (1 << 5); /* STP */ + break; + case SAS_PROTO_SSP: + scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */ + scb->abort_task.proto_conn_rate |= task->dev->linkrate; + break; + case SAS_PROTO_SMP: + break; + default: + break; + } + + if (task->task_proto == SAS_PROTO_SSP) { + scb->abort_task.ssp_frame.frame_type = SSP_TASK; + memcpy(scb->abort_task.ssp_frame.hashed_dest_addr, + task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + memcpy(scb->abort_task.ssp_frame.hashed_src_addr, + task->dev->port->ha->hashed_sas_addr, + HASHED_SAS_ADDR_SIZE); + scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF); + + memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8); + scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK; + scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF); + } + + scb->abort_task.sister_scb = cpu_to_le16(0xFFFF); + scb->abort_task.conn_handle = cpu_to_le16( + (u16)(unsigned long)task->dev->lldd_dev); + scb->abort_task.retry_count = 1; + scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index); + scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST); + + res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, + asd_tmf_timedout); + if (res) + goto out; + wait_for_completion(&ascb->completion); + ASD_DPRINTK("tmf came back\n"); + + res = (int) (unsigned long) ascb->uldd_task; + tascb->tag = ascb->tag; + tascb->tag_valid = ascb->tag_valid; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + res = TMF_RESP_FUNC_COMPLETE; + ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task); + goto out_done; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + switch (res) { + /* The task to be aborted has been sent to the device. + * We got a Response IU for the ABORT TASK TMF. */ + case TC_NO_ERROR + 0xFF00: + case TMF_RESP_FUNC_COMPLETE: + case TMF_RESP_FUNC_FAILED: + res = asd_clear_nexus(task); + break; + case TMF_RESP_INVALID_FRAME: + case TMF_RESP_OVERLAPPED_TAG: + case TMF_RESP_FUNC_ESUPP: + case TMF_RESP_NO_LUN: + goto out_done; break; + } + /* In the following we assume that the managing layer + * will _never_ make a mistake, when issuing ABORT TASK. + */ + switch (res) { + default: + res = asd_clear_nexus(task); + /* fallthrough */ + case TC_NO_ERROR + 0xFF00: + case TMF_RESP_FUNC_COMPLETE: + break; + /* The task hasn't been sent to the device xor we never got + * a (sane) Response IU for the ABORT TASK TMF. + */ + case TF_NAK_RECV + 0xFF00: + res = TMF_RESP_INVALID_FRAME; + break; + case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */ + res = TMF_RESP_FUNC_FAILED; + wait_for_completion_timeout(&tascb->completion, + AIC94XX_SCB_TIMEOUT); + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) + res = TMF_RESP_FUNC_COMPLETE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + goto out_done; + case TF_TMF_NO_TAG + 0xFF00: + case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ + case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ + res = TMF_RESP_FUNC_COMPLETE; + goto out_done; + case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ + res = TMF_RESP_FUNC_ESUPP; + goto out; + } +out_done: + if (res == TMF_RESP_FUNC_COMPLETE) { + task->lldd_task = NULL; + mb(); + asd_ascb_free(tascb); + } +out: + asd_ascb_free(ascb); + ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); + return res; +} + +/** + * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus + * @dev: pointer to struct domain_device of interest + * @lun: pointer to u8[8] which is the LUN + * @tmf: the TMF to be performed (see sas_task.h or the SAS spec) + * @index: the transaction context of the task to be queried if QT TMF + * + * This function is used to send ABORT TASK SET, CLEAR ACA, + * CLEAR TASK SET, LU RESET and QUERY TASK TMFs. + * + * No SCBs should be queued to the I_T_L nexus when this SCB is + * pending. + * + * Returns: TMF response code (see sas_task.h or the SAS spec) + */ +static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, + int tmf, int index) +{ + struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; + struct asd_ascb *ascb; + int res = 1; + struct scb *scb; + + if (!(dev->tproto & SAS_PROTO_SSP)) + return TMF_RESP_FUNC_ESUPP; + + ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); + if (!ascb) + return -ENOMEM; + scb = ascb->scb; + + if (tmf == TMF_QUERY_TASK) + scb->header.opcode = QUERY_SSP_TASK; + else + scb->header.opcode = INITIATE_SSP_TMF; + + scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */ + scb->ssp_tmf.proto_conn_rate |= dev->linkrate; + /* SSP frame header */ + scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK; + memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr, + dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr, + dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF); + /* SSP Task IU */ + memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8); + scb->ssp_tmf.ssp_task.tmf = tmf; + + scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF); + scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long) + dev->lldd_dev); + scb->ssp_tmf.retry_count = 1; + scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST); + if (tmf == TMF_QUERY_TASK) + scb->ssp_tmf.index = cpu_to_le16(index); + + res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, + asd_tmf_timedout); + if (res) + goto out_err; + wait_for_completion(&ascb->completion); + res = (int) (unsigned long) ascb->uldd_task; + + switch (res) { + case TC_NO_ERROR + 0xFF00: + res = TMF_RESP_FUNC_COMPLETE; + break; + case TF_NAK_RECV + 0xFF00: + res = TMF_RESP_INVALID_FRAME; + break; + case TF_TMF_TASK_DONE + 0xFF00: + res = TMF_RESP_FUNC_FAILED; + break; + case TF_TMF_NO_TAG + 0xFF00: + case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ + case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ + res = TMF_RESP_FUNC_COMPLETE; + break; + case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ + res = TMF_RESP_FUNC_ESUPP; + break; + default: + ASD_DPRINTK("%s: converting result 0x%x to TMF_RESP_FUNC_FAILED\n", + __FUNCTION__, res); + res = TMF_RESP_FUNC_FAILED; + break; + } +out_err: + asd_ascb_free(ascb); + return res; +} + +int asd_abort_task_set(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +int asd_clear_aca(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +int asd_clear_task_set(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +int asd_lu_reset(struct domain_device *dev, u8 *lun) +{ + int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0); + + if (res == TMF_RESP_FUNC_COMPLETE) + asd_clear_nexus_I_T_L(dev, lun); + return res; +} + +/** + * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus + * task: pointer to sas_task struct of interest + * + * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set, + * or TMF_RESP_FUNC_SUCC if the task is in the task set. + * + * Normally the management layer sets the task to aborted state, + * and then calls query task and then abort task. + */ +int asd_query_task(struct sas_task *task) +{ + struct asd_ascb *ascb = task->lldd_task; + int index; + + if (ascb) { + index = ascb->tc_index; + return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN, + TMF_QUERY_TASK, index); + } + return TMF_RESP_FUNC_COMPLETE; +} diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig new file mode 100644 index 0000000..aafdc92f --- /dev/null +++ b/drivers/scsi/libsas/Kconfig @@ -0,0 +1,39 @@ +# +# Kernel configuration file for the SAS Class +# +# Copyright (C) 2005 Adaptec, Inc. All rights reserved. +# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> +# +# This file is licensed under GPLv2. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2 of the +# License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +# USA +# + +config SCSI_SAS_LIBSAS + tristate "SAS Domain Transport Attributes" + depends on SCSI + select SCSI_SAS_ATTRS + help + This provides transport specific helpers for SAS drivers which + use the domain device construct (like the aic94xxx). + +config SCSI_SAS_LIBSAS_DEBUG + bool "Compile the SAS Domain Transport Attributes in debug mode" + default y + depends on SCSI_SAS_LIBSAS + help + Compiles the SAS Layer in debug mode. In debug mode, the + SAS Layer prints diagnostic and debug messages. diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile new file mode 100644 index 0000000..44d972a --- /dev/null +++ b/drivers/scsi/libsas/Makefile @@ -0,0 +1,36 @@ +# +# Kernel Makefile for the libsas helpers +# +# Copyright (C) 2005 Adaptec, Inc. All rights reserved. +# Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> +# +# This file is licensed under GPLv2. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2 of the +# License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +# USA + +ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y) + EXTRA_CFLAGS += -DSAS_DEBUG +endif + +obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o +libsas-y += sas_init.o \ + sas_phy.o \ + sas_port.o \ + sas_event.o \ + sas_dump.o \ + sas_discover.o \ + sas_expander.o \ + sas_scsi_host.o diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c new file mode 100644 index 0000000..d977bd4 --- /dev/null +++ b/drivers/scsi/libsas/sas_discover.c @@ -0,0 +1,749 @@ +/* + * Serial Attached SCSI (SAS) Discover process + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <linux/pci.h> +#include <linux/scatterlist.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_eh.h> +#include "sas_internal.h" + +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_sas.h> +#include "../scsi_sas_internal.h" + +/* ---------- Basic task processing for discovery purposes ---------- */ + +void sas_init_dev(struct domain_device *dev) +{ + INIT_LIST_HEAD(&dev->siblings); + INIT_LIST_HEAD(&dev->dev_list_node); + switch (dev->dev_type) { + case SAS_END_DEV: + break; + case EDGE_DEV: + case FANOUT_DEV: + INIT_LIST_HEAD(&dev->ex_dev.children); + break; + case SATA_DEV: + case SATA_PM: + case SATA_PM_PORT: + INIT_LIST_HEAD(&dev->sata_dev.children); + break; + default: + break; + } +} + +static void sas_task_timedout(unsigned long _task) +{ + struct sas_task *task = (void *) _task; + unsigned long flags; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + complete(&task->completion); +} + +static void sas_disc_task_done(struct sas_task *task) +{ + if (!del_timer(&task->timer)) + return; + complete(&task->completion); +} + +#define SAS_DEV_TIMEOUT 10 + +/** + * sas_execute_task -- Basic task processing for discovery + * @task: the task to be executed + * @buffer: pointer to buffer to do I/O + * @size: size of @buffer + * @pci_dma_dir: PCI_DMA_... + */ +static int sas_execute_task(struct sas_task *task, void *buffer, int size, + int pci_dma_dir) +{ + int res = 0; + struct scatterlist *scatter = NULL; + struct task_status_struct *ts = &task->task_status; + int num_scatter = 0; + int retries = 0; + struct sas_internal *i = + to_sas_internal(task->dev->port->ha->core.shost->transportt); + + if (pci_dma_dir != PCI_DMA_NONE) { + scatter = kzalloc(sizeof(*scatter), GFP_KERNEL); + if (!scatter) + goto out; + + sg_init_one(scatter, buffer, size); + num_scatter = 1; + } + + task->task_proto = task->dev->tproto; + task->scatter = scatter; + task->num_scatter = num_scatter; + task->total_xfer_len = size; + task->data_dir = pci_dma_dir; + task->task_done = sas_disc_task_done; + + for (retries = 0; retries < 5; retries++) { + task->task_state_flags = SAS_TASK_STATE_PENDING; + init_completion(&task->completion); + + task->timer.data = (unsigned long) task; + task->timer.function = sas_task_timedout; + task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ; + add_timer(&task->timer); + + res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); + if (res) { + del_timer(&task->timer); + SAS_DPRINTK("executing SAS discovery task failed:%d\n", + res); + goto ex_err; + } + wait_for_completion(&task->completion); + res = -ETASK; + if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + int res2; + SAS_DPRINTK("task aborted, flags:0x%x\n", + task->task_state_flags); + res2 = i->dft->lldd_abort_task(task); + SAS_DPRINTK("came back from abort task\n"); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + if (res2 == TMF_RESP_FUNC_COMPLETE) + continue; /* Retry the task */ + else + goto ex_err; + } + } + if (task->task_status.stat == SAM_BUSY || + task->task_status.stat == SAM_TASK_SET_FULL || + task->task_status.stat == SAS_QUEUE_FULL) { + SAS_DPRINTK("task: q busy, sleeping...\n"); + schedule_timeout_interruptible(HZ); + } else if (task->task_status.stat == SAM_CHECK_COND) { + struct scsi_sense_hdr shdr; + + if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size, + &shdr)) { + SAS_DPRINTK("couldn't normalize sense\n"); + continue; + } + if ((shdr.sense_key == 6 && shdr.asc == 0x29) || + (shdr.sense_key == 2 && shdr.asc == 4 && + shdr.ascq == 1)) { + SAS_DPRINTK("device %016llx LUN: %016llx " + "powering up or not ready yet, " + "sleeping...\n", + SAS_ADDR(task->dev->sas_addr), + SAS_ADDR(task->ssp_task.LUN)); + + schedule_timeout_interruptible(5*HZ); + } else if (shdr.sense_key == 1) { + res = 0; + break; + } else if (shdr.sense_key == 5) { + break; + } else { + SAS_DPRINTK("dev %016llx LUN: %016llx " + "sense key:0x%x ASC:0x%x ASCQ:0x%x" + "\n", + SAS_ADDR(task->dev->sas_addr), + SAS_ADDR(task->ssp_task.LUN), + shdr.sense_key, + shdr.asc, shdr.ascq); + } + } else if (task->task_status.resp != SAS_TASK_COMPLETE || + task->task_status.stat != SAM_GOOD) { + SAS_DPRINTK("task finished with resp:0x%x, " + "stat:0x%x\n", + task->task_status.resp, + task->task_status.stat); + goto ex_err; + } else { + res = 0; + break; + } + } +ex_err: + if (pci_dma_dir != PCI_DMA_NONE) + kfree(scatter); +out: + return res; +} + +/* ---------- Domain device discovery ---------- */ + +/** + * sas_get_port_device -- Discover devices which caused port creation + * @port: pointer to struct sas_port of interest + * + * Devices directly attached to a HA port, have no parent. This is + * how we know they are (domain) "root" devices. All other devices + * do, and should have their "parent" pointer set appropriately as + * soon as a child device is discovered. + */ +static int sas_get_port_device(struct asd_sas_port *port) +{ + unsigned long flags; + struct asd_sas_phy *phy; + struct sas_rphy *rphy; + struct domain_device *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + spin_lock_irqsave(&port->phy_list_lock, flags); + if (list_empty(&port->phy_list)) { + spin_unlock_irqrestore(&port->phy_list_lock, flags); + kfree(dev); + return -ENODEV; + } + phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el); + spin_lock(&phy->frame_rcvd_lock); + memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd), + (size_t)phy->frame_rcvd_size)); + spin_unlock(&phy->frame_rcvd_lock); + spin_unlock_irqrestore(&port->phy_list_lock, flags); + + if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) { + struct dev_to_host_fis *fis = + (struct dev_to_host_fis *) dev->frame_rcvd; + if (fis->interrupt_reason == 1 && fis->lbal == 1 && + fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 + && (fis->device & ~0x10) == 0) + dev->dev_type = SATA_PM; + else + dev->dev_type = SATA_DEV; + dev->tproto = SATA_PROTO; + } else { + struct sas_identify_frame *id = + (struct sas_identify_frame *) dev->frame_rcvd; + dev->dev_type = id->dev_type; + dev->iproto = id->initiator_bits; + dev->tproto = id->target_bits; + } + + sas_init_dev(dev); + + switch (dev->dev_type) { + case SAS_END_DEV: + rphy = sas_end_device_alloc(port->port); + break; + case EDGE_DEV: + rphy = sas_expander_alloc(port->port, + SAS_EDGE_EXPANDER_DEVICE); + break; + case FANOUT_DEV: + rphy = sas_expander_alloc(port->port, + SAS_FANOUT_EXPANDER_DEVICE); + break; + case SATA_DEV: + default: + printk("ERROR: Unidentified device type %d\n", dev->dev_type); + rphy = NULL; + break; + } + + if (!rphy) { + kfree(dev); + return -ENODEV; + } + rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; + memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); + sas_fill_in_rphy(dev, rphy); + sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); + port->port_dev = dev; + dev->port = port; + dev->linkrate = port->linkrate; + dev->min_linkrate = port->linkrate; + dev->max_linkrate = port->linkrate; + dev->pathways = port->num_phys; + memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE); + memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE); + memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE); + port->disc.max_level = 0; + + dev->rphy = rphy; + spin_lock(&port->dev_list_lock); + list_add_tail(&dev->dev_list_node, &port->dev_list); + spin_unlock(&port->dev_list_lock); + + return 0; +} + +/* ---------- Discover and Revalidate ---------- */ + +/* ---------- SATA ---------- */ + +static void sas_get_ata_command_set(struct domain_device *dev) +{ + struct dev_to_host_fis *fis = + (struct dev_to_host_fis *) dev->frame_rcvd; + + if ((fis->sector_count == 1 && /* ATA */ + fis->lbal == 1 && + fis->lbam == 0 && + fis->lbah == 0 && + fis->device == 0) + || + (fis->sector_count == 0 && /* CE-ATA (mATA) */ + fis->lbal == 0 && + fis->lbam == 0xCE && + fis->lbah == 0xAA && + (fis->device & ~0x10) == 0)) + + dev->sata_dev.command_set = ATA_COMMAND_SET; + + else if ((fis->interrupt_reason == 1 && /* ATAPI */ + fis->lbal == 1 && + fis->byte_count_low == 0x14 && + fis->byte_count_high == 0xEB && + (fis->device & ~0x10) == 0)) + + dev->sata_dev.command_set = ATAPI_COMMAND_SET; + + else if ((fis->sector_count == 1 && /* SEMB */ + fis->lbal == 1 && + fis->lbam == 0x3C && + fis->lbah == 0xC3 && + fis->device == 0) + || + (fis->interrupt_reason == 1 && /* SATA PM */ + fis->lbal == 1 && + fis->byte_count_low == 0x69 && + fis->byte_count_high == 0x96 && + (fis->device & ~0x10) == 0)) + + /* Treat it as a superset? */ + dev->sata_dev.command_set = ATAPI_COMMAND_SET; +} + +/** + * sas_issue_ata_cmd -- Basic SATA command processing for discovery + * @dev: the device to send the command to + * @command: the command register + * @features: the features register + * @buffer: pointer to buffer to do I/O + * @size: size of @buffer + * @pci_dma_dir: PCI_DMA_... + */ +static int sas_issue_ata_cmd(struct domain_device *dev, u8 command, + u8 features, void *buffer, int size, + int pci_dma_dir) +{ + int res = 0; + struct sas_task *task; + struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *) + &dev->frame_rcvd[0]; + + res = -ENOMEM; + task = sas_alloc_task(GFP_KERNEL); + if (!task) + goto out; + + task->dev = dev; + + task->ata_task.fis.command = command; + task->ata_task.fis.features = features; + task->ata_task.fis.device = d2h_fis->device; + task->ata_task.retry_count = 1; + + res = sas_execute_task(task, buffer, size, pci_dma_dir); + + sas_free_task(task); +out: + return res; +} + +static void sas_sata_propagate_sas_addr(struct domain_device *dev) +{ + unsigned long flags; + struct asd_sas_port *port = dev->port; + struct asd_sas_phy *phy; + + BUG_ON(dev->parent); + + memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE); + spin_lock_irqsave(&port->phy_list_lock, flags); + list_for_each_entry(phy, &port->phy_list, port_phy_el) + memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE); + spin_unlock_irqrestore(&port->phy_list_lock, flags); +} + +#define ATA_IDENTIFY_DEV 0xEC +#define ATA_IDENTIFY_PACKET_DEV 0xA1 +#define ATA_SET_FEATURES 0xEF +#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07 + +/** + * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV) + * @dev: STP/SATA device of interest (ATA/ATAPI) + * + * The LLDD has already been notified of this device, so that we can + * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY + * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its + * performance for this device. + */ +static int sas_discover_sata_dev(struct domain_device *dev) +{ + int res; + __le16 *identify_x; + u8 command; + + identify_x = kzalloc(512, GFP_KERNEL); + if (!identify_x) + return -ENOMEM; + + if (dev->sata_dev.command_set == ATA_COMMAND_SET) { + dev->sata_dev.identify_device = identify_x; + command = ATA_IDENTIFY_DEV; + } else { + dev->sata_dev.identify_packet_device = identify_x; + command = ATA_IDENTIFY_PACKET_DEV; + } + + res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512, + PCI_DMA_FROMDEVICE); + if (res) + goto out_err; + + /* lives on the media? */ + if (le16_to_cpu(identify_x[0]) & 4) { + /* incomplete response */ + SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to " + "dev %llx\n", SAS_ADDR(dev->sas_addr)); + if (!le16_to_cpu(identify_x[83] & (1<<6))) + goto cont1; + res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES, + ATA_FEATURE_PUP_STBY_SPIN_UP, + NULL, 0, PCI_DMA_NONE); + if (res) + goto cont1; + + schedule_timeout_interruptible(5*HZ); /* More time? */ + res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512, + PCI_DMA_FROMDEVICE); + if (res) + goto out_err; + } +cont1: + /* Get WWN */ + if (dev->port->oob_mode != SATA_OOB_MODE) { + memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr, + SAS_ADDR_SIZE); + } else if (dev->sata_dev.command_set == ATA_COMMAND_SET && + (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000) + == 0x5000) { + int i; + + for (i = 0; i < 4; i++) { + dev->sas_addr[2*i] = + (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8; + dev->sas_addr[2*i+1] = + le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF; + } + } + sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); + if (!dev->parent) + sas_sata_propagate_sas_addr(dev); + + /* XXX Hint: register this SATA device with SATL. + When this returns, dev->sata_dev->lu is alive and + present. + sas_satl_register_dev(dev); + */ + return 0; +out_err: + dev->sata_dev.identify_packet_device = NULL; + dev->sata_dev.identify_device = NULL; + kfree(identify_x); + return res; +} + +static int sas_discover_sata_pm(struct domain_device *dev) +{ + return -ENODEV; +} + +int sas_notify_lldd_dev_found(struct domain_device *dev) +{ + int res = 0; + struct sas_ha_struct *sas_ha = dev->port->ha; + struct Scsi_Host *shost = sas_ha->core.shost; + struct sas_internal *i = to_sas_internal(shost->transportt); + + if (i->dft->lldd_dev_found) { + res = i->dft->lldd_dev_found(dev); + if (res) { + printk("sas: driver on pcidev %s cannot handle " + "device %llx, error:%d\n", + pci_name(sas_ha->pcidev), + SAS_ADDR(dev->sas_addr), res); + } + } + return res; +} + + +void sas_notify_lldd_dev_gone(struct domain_device *dev) +{ + struct sas_ha_struct *sas_ha = dev->port->ha; + struct Scsi_Host *shost = sas_ha->core.shost; + struct sas_internal *i = to_sas_internal(shost->transportt); + + if (i->dft->lldd_dev_gone) + i->dft->lldd_dev_gone(dev); +} + +/* ---------- Common/dispatchers ---------- */ + +/** + * sas_discover_sata -- discover an STP/SATA domain device + * @dev: pointer to struct domain_device of interest + * + * First we notify the LLDD of this device, so we can send frames to + * it. Then depending on the type of device we call the appropriate + * discover functions. Once device discover is done, we notify the + * LLDD so that it can fine-tune its parameters for the device, by + * removing it and then adding it. That is, the second time around, + * the driver would have certain fields, that it is looking at, set. + * Finally we initialize the kobj so that the device can be added to + * the system at registration time. Devices directly attached to a HA + * port, have no parents. All other devices do, and should have their + * "parent" pointer set appropriately before calling this function. + */ +int sas_discover_sata(struct domain_device *dev) +{ + int res; + + sas_get_ata_command_set(dev); + + res = sas_notify_lldd_dev_found(dev); + if (res) + return res; + + switch (dev->dev_type) { + case SATA_DEV: + res = sas_discover_sata_dev(dev); + break; + case SATA_PM: + res = sas_discover_sata_pm(dev); + break; + default: + break; + } + + sas_notify_lldd_dev_gone(dev); + if (!res) { + sas_notify_lldd_dev_found(dev); + } + return res; +} + +/** + * sas_discover_end_dev -- discover an end device (SSP, etc) + * @end: pointer to domain device of interest + * + * See comment in sas_discover_sata(). + */ +int sas_discover_end_dev(struct domain_device *dev) +{ + int res; + + res = sas_notify_lldd_dev_found(dev); + if (res) + return res; + + res = sas_rphy_add(dev->rphy); + if (res) + goto out_err; + + /* do this to get the end device port attributes which will have + * been scanned in sas_rphy_add */ + sas_notify_lldd_dev_gone(dev); + sas_notify_lldd_dev_found(dev); + + return 0; + +out_err: + sas_notify_lldd_dev_gone(dev); + return res; +} + +/* ---------- Device registration and unregistration ---------- */ + +static inline void sas_unregister_common_dev(struct domain_device *dev) +{ + sas_notify_lldd_dev_gone(dev); + if (!dev->parent) + dev->port->port_dev = NULL; + else + list_del_init(&dev->siblings); + list_del_init(&dev->dev_list_node); +} + +void sas_unregister_dev(struct domain_device *dev) +{ + if (dev->rphy) { + sas_remove_children(&dev->rphy->dev); + sas_rphy_delete(dev->rphy); + dev->rphy = NULL; + } + if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { + /* remove the phys and ports, everything else should be gone */ + kfree(dev->ex_dev.ex_phy); + dev->ex_dev.ex_phy = NULL; + } + sas_unregister_common_dev(dev); +} + +void sas_unregister_domain_devices(struct asd_sas_port *port) +{ + struct domain_device *dev, *n; + + list_for_each_entry_safe_reverse(dev,n,&port->dev_list,dev_list_node) + sas_unregister_dev(dev); + + port->port->rphy = NULL; + +} + +/* ---------- Discovery and Revalidation ---------- */ + +/** + * sas_discover_domain -- discover the domain + * @port: port to the domain of interest + * + * NOTE: this process _must_ quit (return) as soon as any connection + * errors are encountered. Connection recovery is done elsewhere. + * Discover process only interrogates devices in order to discover the + * domain. + */ +static void sas_discover_domain(void *data) +{ + int error = 0; + struct asd_sas_port *port = data; + + sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, + &port->disc.pending); + + if (port->port_dev) + return ; + else { + error = sas_get_port_device(port); + if (error) + return; + } + + SAS_DPRINTK("DOING DISCOVERY on port %d, pid:%d\n", port->id, + current->pid); + + switch (port->port_dev->dev_type) { + case SAS_END_DEV: + error = sas_discover_end_dev(port->port_dev); + break; + case EDGE_DEV: + case FANOUT_DEV: + error = sas_discover_root_expander(port->port_dev); + break; + case SATA_DEV: + case SATA_PM: + error = sas_discover_sata(port->port_dev); + break; + default: + SAS_DPRINTK("unhandled device %d\n", port->port_dev->dev_type); + break; + } + + if (error) { + kfree(port->port_dev); /* not kobject_register-ed yet */ + port->port_dev = NULL; + } + + SAS_DPRINTK("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id, + current->pid, error); +} + +static void sas_revalidate_domain(void *data) +{ + int res = 0; + struct asd_sas_port *port = data; + + sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, + &port->disc.pending); + + SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, + current->pid); + if (port->port_dev) + res = sas_ex_revalidate_domain(port->port_dev); + + SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", + port->id, current->pid, res); +} + +/* ---------- Events ---------- */ + +int sas_discover_event(struct asd_sas_port *port, enum discover_event ev) +{ + struct sas_discovery *disc; + + if (!port) + return 0; + disc = &port->disc; + + BUG_ON(ev >= DISC_NUM_EVENTS); + + sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, + &disc->disc_work[ev], port->ha->core.shost); + + return 0; +} + +/** + * sas_init_disc -- initialize the discovery struct in the port + * @port: pointer to struct port + * + * Called when the ports are being initialized. + */ +void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) +{ + int i; + + static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = { + [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, + [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, + }; + + spin_lock_init(&disc->disc_event_lock); + disc->pending = 0; + for (i = 0; i < DISC_NUM_EVENTS; i++) + INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port); +} diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c new file mode 100644 index 0000000..f1246d2 --- /dev/null +++ b/drivers/scsi/libsas/sas_dump.c @@ -0,0 +1,76 @@ +/* + * Serial Attached SCSI (SAS) Dump/Debugging routines + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include "sas_dump.h" + +#ifdef SAS_DEBUG + +static const char *sas_hae_str[] = { + [0] = "HAE_RESET", +}; + +static const char *sas_porte_str[] = { + [0] = "PORTE_BYTES_DMAED", + [1] = "PORTE_BROADCAST_RCVD", + [2] = "PORTE_LINK_RESET_ERR", + [3] = "PORTE_TIMER_EVENT", + [4] = "PORTE_HARD_RESET", +}; + +static const char *sas_phye_str[] = { + [0] = "PHYE_LOSS_OF_SIGNAL", + [1] = "PHYE_OOB_DONE", + [2] = "PHYE_OOB_ERROR", + [3] = "PHYE_SPINUP_HOLD", +}; + +void sas_dprint_porte(int phyid, enum port_event pe) +{ + SAS_DPRINTK("phy%d: port event: %s\n", phyid, sas_porte_str[pe]); +} +void sas_dprint_phye(int phyid, enum phy_event pe) +{ + SAS_DPRINTK("phy%d: phy event: %s\n", phyid, sas_phye_str[pe]); +} + +void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he) +{ + SAS_DPRINTK("ha %s: %s event\n", pci_name(sas_ha->pcidev), + sas_hae_str[he]); +} + +void sas_dump_port(struct asd_sas_port *port) +{ + SAS_DPRINTK("port%d: class:0x%x\n", port->id, port->class); + SAS_DPRINTK("port%d: sas_addr:%llx\n", port->id, + SAS_ADDR(port->sas_addr)); + SAS_DPRINTK("port%d: attached_sas_addr:%llx\n", port->id, + SAS_ADDR(port->attached_sas_addr)); + SAS_DPRINTK("port%d: iproto:0x%x\n", port->id, port->iproto); + SAS_DPRINTK("port%d: tproto:0x%x\n", port->id, port->tproto); + SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode); + SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys); +} + +#endif /* SAS_DEBUG */ diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h new file mode 100644 index 0000000..47b45d4 --- /dev/null +++ b/drivers/scsi/libsas/sas_dump.h @@ -0,0 +1,42 @@ +/* + * Serial Attached SCSI (SAS) Dump/Debugging routines header file + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include "sas_internal.h" + +#ifdef SAS_DEBUG + +void sas_dprint_porte(int phyid, enum port_event pe); +void sas_dprint_phye(int phyid, enum phy_event pe); +void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he); +void sas_dump_port(struct asd_sas_port *port); + +#else /* SAS_DEBUG */ + +static inline void sas_dprint_porte(int phyid, enum port_event pe) { } +static inline void sas_dprint_phye(int phyid, enum phy_event pe) { } +static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha, + enum ha_event he) { } +static inline void sas_dump_port(struct asd_sas_port *port) { } + +#endif /* SAS_DEBUG */ diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c new file mode 100644 index 0000000..19110ed --- /dev/null +++ b/drivers/scsi/libsas/sas_event.c @@ -0,0 +1,75 @@ +/* + * Serial Attached SCSI (SAS) Event processing + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <scsi/scsi_host.h> +#include "sas_internal.h" +#include "sas_dump.h" + +static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) +{ + BUG_ON(event >= HA_NUM_EVENTS); + + sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, + &sas_ha->ha_events[event], sas_ha->core.shost); +} + +static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) +{ + struct sas_ha_struct *ha = phy->ha; + + BUG_ON(event >= PORT_NUM_EVENTS); + + sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, + &phy->port_events[event], ha->core.shost); +} + +static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) +{ + struct sas_ha_struct *ha = phy->ha; + + BUG_ON(event >= PHY_NUM_EVENTS); + + sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, + &phy->phy_events[event], ha->core.shost); +} + +int sas_init_events(struct sas_ha_struct *sas_ha) +{ + static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = { + [HAE_RESET] = sas_hae_reset, + }; + + int i; + + spin_lock_init(&sas_ha->event_lock); + + for (i = 0; i < HA_NUM_EVENTS; i++) + INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha); + + sas_ha->notify_ha_event = notify_ha_event; + sas_ha->notify_port_event = notify_port_event; + sas_ha->notify_phy_event = notify_phy_event; + + return 0; +} diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c new file mode 100644 index 0000000..b653a26 --- /dev/null +++ b/drivers/scsi/libsas/sas_expander.c @@ -0,0 +1,1862 @@ +/* + * Serial Attached SCSI (SAS) Expander discovery and configuration + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include <linux/pci.h> +#include <linux/scatterlist.h> + +#include "sas_internal.h" + +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_sas.h> +#include "../scsi_sas_internal.h" + +static int sas_discover_expander(struct domain_device *dev); +static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr); +static int sas_configure_phy(struct domain_device *dev, int phy_id, + u8 *sas_addr, int include); +static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); + +#if 0 +/* FIXME: smp needs to migrate into the sas class */ +static ssize_t smp_portal_read(struct kobject *, char *, loff_t, size_t); +static ssize_t smp_portal_write(struct kobject *, char *, loff_t, size_t); +#endif + +/* ---------- SMP task management ---------- */ + +static void smp_task_timedout(unsigned long _task) +{ + struct sas_task *task = (void *) _task; + unsigned long flags; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + complete(&task->completion); +} + +static void smp_task_done(struct sas_task *task) +{ + if (!del_timer(&task->timer)) + return; + complete(&task->completion); +} + +/* Give it some long enough timeout. In seconds. */ +#define SMP_TIMEOUT 10 + +static int smp_execute_task(struct domain_device *dev, void *req, int req_size, + void *resp, int resp_size) +{ + int res; + struct sas_task *task = sas_alloc_task(GFP_KERNEL); + struct sas_internal *i = + to_sas_internal(dev->port->ha->core.shost->transportt); + + if (!task) + return -ENOMEM; + + task->dev = dev; + task->task_proto = dev->tproto; + sg_init_one(&task->smp_task.smp_req, req, req_size); + sg_init_one(&task->smp_task.smp_resp, resp, resp_size); + + task->task_done = smp_task_done; + + task->timer.data = (unsigned long) task; + task->timer.function = smp_task_timedout; + task->timer.expires = jiffies + SMP_TIMEOUT*HZ; + add_timer(&task->timer); + + res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); + + if (res) { + del_timer(&task->timer); + SAS_DPRINTK("executing SMP task failed:%d\n", res); + goto ex_err; + } + + wait_for_completion(&task->completion); + res = -ETASK; + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + SAS_DPRINTK("smp task timed out or aborted\n"); + i->dft->lldd_abort_task(task); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + SAS_DPRINTK("SMP task aborted and not done\n"); + goto ex_err; + } + } + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAM_GOOD) + res = 0; + else + SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " + "status 0x%x\n", __FUNCTION__, + SAS_ADDR(dev->sas_addr), + task->task_status.resp, + task->task_status.stat); +ex_err: + sas_free_task(task); + return res; +} + +/* ---------- Allocations ---------- */ + +static inline void *alloc_smp_req(int size) +{ + u8 *p = kzalloc(size, GFP_KERNEL); + if (p) + p[0] = SMP_REQUEST; + return p; +} + +static inline void *alloc_smp_resp(int size) +{ + return kzalloc(size, GFP_KERNEL); +} + +/* ---------- Expander configuration ---------- */ + +static void sas_set_ex_phy(struct domain_device *dev, int phy_id, + void *disc_resp) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + struct smp_resp *resp = disc_resp; + struct discover_resp *dr = &resp->disc; + struct sas_rphy *rphy = dev->rphy; + int rediscover = (phy->phy != NULL); + + if (!rediscover) { + phy->phy = sas_phy_alloc(&rphy->dev, phy_id); + + /* FIXME: error_handling */ + BUG_ON(!phy->phy); + } + + switch (resp->result) { + case SMP_RESP_PHY_VACANT: + phy->phy_state = PHY_VACANT; + return; + default: + phy->phy_state = PHY_NOT_PRESENT; + return; + case SMP_RESP_FUNC_ACC: + phy->phy_state = PHY_EMPTY; /* do not know yet */ + break; + } + + phy->phy_id = phy_id; + phy->attached_dev_type = dr->attached_dev_type; + phy->linkrate = dr->linkrate; + phy->attached_sata_host = dr->attached_sata_host; + phy->attached_sata_dev = dr->attached_sata_dev; + phy->attached_sata_ps = dr->attached_sata_ps; + phy->attached_iproto = dr->iproto << 1; + phy->attached_tproto = dr->tproto << 1; + memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); + phy->attached_phy_id = dr->attached_phy_id; + phy->phy_change_count = dr->change_count; + phy->routing_attr = dr->routing_attr; + phy->virtual = dr->virtual; + phy->last_da_index = -1; + + phy->phy->identify.initiator_port_protocols = phy->attached_iproto; + phy->phy->identify.target_port_protocols = phy->attached_tproto; + phy->phy->identify.phy_identifier = phy_id; + phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; + phy->phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS; + switch (phy->linkrate) { + case PHY_LINKRATE_1_5: + phy->phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; + break; + case PHY_LINKRATE_3: + phy->phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; + break; + case PHY_LINKRATE_6: + phy->phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; + break; + default: + phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + break; + } + + if (!rediscover) + sas_phy_add(phy->phy); + + SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n", + SAS_ADDR(dev->sas_addr), phy->phy_id, + phy->routing_attr == TABLE_ROUTING ? 'T' : + phy->routing_attr == DIRECT_ROUTING ? 'D' : + phy->routing_attr == SUBTRACTIVE_ROUTING ? 'S' : '?', + SAS_ADDR(phy->attached_sas_addr)); + + return; +} + +#define DISCOVER_REQ_SIZE 16 +#define DISCOVER_RESP_SIZE 56 + +static int sas_ex_phy_discover(struct domain_device *dev, int single) +{ + struct expander_device *ex = &dev->ex_dev; + int res = 0; + u8 *disc_req; + u8 *disc_resp; + + disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); + if (!disc_req) + return -ENOMEM; + + disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); + if (!disc_resp) { + kfree(disc_req); + return -ENOMEM; + } + + disc_req[1] = SMP_DISCOVER; + + if (0 <= single && single < ex->num_phys) { + disc_req[9] = single; + res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, + disc_resp, DISCOVER_RESP_SIZE); + if (res) + goto out_err; + sas_set_ex_phy(dev, single, disc_resp); + } else { + int i; + + for (i = 0; i < ex->num_phys; i++) { + disc_req[9] = i; + res = smp_execute_task(dev, disc_req, + DISCOVER_REQ_SIZE, disc_resp, + DISCOVER_RESP_SIZE); + if (res) + goto out_err; + sas_set_ex_phy(dev, i, disc_resp); + } + } +out_err: + kfree(disc_resp); + kfree(disc_req); + return res; +} + +static int sas_expander_discover(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + int res = -ENOMEM; + + ex->ex_phy = kzalloc(sizeof(*ex->ex_phy)*ex->num_phys, GFP_KERNEL); + if (!ex->ex_phy) + return -ENOMEM; + + res = sas_ex_phy_discover(dev, -1); + if (res) + goto out_err; + + return 0; + out_err: + kfree(ex->ex_phy); + ex->ex_phy = NULL; + return res; +} + +#define MAX_EXPANDER_PHYS 128 + +static void ex_assign_report_general(struct domain_device *dev, + struct smp_resp *resp) +{ + struct report_general_resp *rg = &resp->rg; + + dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); + dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); + dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); + dev->ex_dev.conf_route_table = rg->conf_route_table; + dev->ex_dev.configuring = rg->configuring; + memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8); +} + +#define RG_REQ_SIZE 8 +#define RG_RESP_SIZE 32 + +static int sas_ex_general(struct domain_device *dev) +{ + u8 *rg_req; + struct smp_resp *rg_resp; + int res; + int i; + + rg_req = alloc_smp_req(RG_REQ_SIZE); + if (!rg_req) + return -ENOMEM; + + rg_resp = alloc_smp_resp(RG_RESP_SIZE); + if (!rg_resp) { + kfree(rg_req); + return -ENOMEM; + } + + rg_req[1] = SMP_REPORT_GENERAL; + + for (i = 0; i < 5; i++) { + res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, + RG_RESP_SIZE); + + if (res) { + SAS_DPRINTK("RG to ex %016llx failed:0x%x\n", + SAS_ADDR(dev->sas_addr), res); + goto out; + } else if (rg_resp->result != SMP_RESP_FUNC_ACC) { + SAS_DPRINTK("RG:ex %016llx returned SMP result:0x%x\n", + SAS_ADDR(dev->sas_addr), rg_resp->result); + res = rg_resp->result; + goto out; + } + + ex_assign_report_general(dev, rg_resp); + + if (dev->ex_dev.configuring) { + SAS_DPRINTK("RG: ex %llx self-configuring...\n", + SAS_ADDR(dev->sas_addr)); + schedule_timeout_interruptible(5*HZ); + } else + break; + } +out: + kfree(rg_req); + kfree(rg_resp); + return res; +} + +static void ex_assign_manuf_info(struct domain_device *dev, void + *_mi_resp) +{ + u8 *mi_resp = _mi_resp; + struct sas_rphy *rphy = dev->rphy; + struct sas_expander_device *edev = rphy_to_expander_device(rphy); + + memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN); + memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN); + memcpy(edev->product_rev, mi_resp + 36, + SAS_EXPANDER_PRODUCT_REV_LEN); + + if (mi_resp[8] & 1) { + memcpy(edev->component_vendor_id, mi_resp + 40, + SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); + edev->component_id = mi_resp[48] << 8 | mi_resp[49]; + edev->component_revision_id = mi_resp[50]; + } +} + +#define MI_REQ_SIZE 8 +#define MI_RESP_SIZE 64 + +static int sas_ex_manuf_info(struct domain_device *dev) +{ + u8 *mi_req; + u8 *mi_resp; + int res; + + mi_req = alloc_smp_req(MI_REQ_SIZE); + if (!mi_req) + return -ENOMEM; + + mi_resp = alloc_smp_resp(MI_RESP_SIZE); + if (!mi_resp) { + kfree(mi_req); + return -ENOMEM; + } + + mi_req[1] = SMP_REPORT_MANUF_INFO; + + res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp,MI_RESP_SIZE); + if (res) { + SAS_DPRINTK("MI: ex %016llx failed:0x%x\n", + SAS_ADDR(dev->sas_addr), res); + goto out; + } else if (mi_resp[2] != SMP_RESP_FUNC_ACC) { + SAS_DPRINTK("MI ex %016llx returned SMP result:0x%x\n", + SAS_ADDR(dev->sas_addr), mi_resp[2]); + goto out; + } + + ex_assign_manuf_info(dev, mi_resp); +out: + kfree(mi_req); + kfree(mi_resp); + return res; +} + +#define PC_REQ_SIZE 44 +#define PC_RESP_SIZE 8 + +int sas_smp_phy_control(struct domain_device *dev, int phy_id, + enum phy_func phy_func) +{ + u8 *pc_req; + u8 *pc_resp; + int res; + + pc_req = alloc_smp_req(PC_REQ_SIZE); + if (!pc_req) + return -ENOMEM; + + pc_resp = alloc_smp_resp(PC_RESP_SIZE); + if (!pc_resp) { + kfree(pc_req); + return -ENOMEM; + } + + pc_req[1] = SMP_PHY_CONTROL; + pc_req[9] = phy_id; + pc_req[10]= phy_func; + + res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); + + kfree(pc_resp); + kfree(pc_req); + return res; +} + +static void sas_ex_disable_phy(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + + sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE); + phy->linkrate = PHY_DISABLED; +} + +static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr) +{ + struct expander_device *ex = &dev->ex_dev; + int i; + + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr)) + sas_ex_disable_phy(dev, i); + } +} + +static int sas_dev_present_in_domain(struct asd_sas_port *port, + u8 *sas_addr) +{ + struct domain_device *dev; + + if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr)) + return 1; + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr)) + return 1; + } + return 0; +} + +#define RPEL_REQ_SIZE 16 +#define RPEL_RESP_SIZE 32 +int sas_smp_get_phy_events(struct sas_phy *phy) +{ + int res; + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *dev = sas_find_dev_by_rphy(rphy); + u8 *req = alloc_smp_req(RPEL_REQ_SIZE); + u8 *resp = kzalloc(RPEL_RESP_SIZE, GFP_KERNEL); + + if (!resp) + return -ENOMEM; + + req[1] = SMP_REPORT_PHY_ERR_LOG; + req[9] = phy->number; + + res = smp_execute_task(dev, req, RPEL_REQ_SIZE, + resp, RPEL_RESP_SIZE); + + if (!res) + goto out; + + phy->invalid_dword_count = scsi_to_u32(&resp[12]); + phy->running_disparity_error_count = scsi_to_u32(&resp[16]); + phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]); + phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); + + out: + kfree(resp); + return res; + +} + +#define RPS_REQ_SIZE 16 +#define RPS_RESP_SIZE 60 + +static int sas_get_report_phy_sata(struct domain_device *dev, + int phy_id, + struct smp_resp *rps_resp) +{ + int res; + u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); + + if (!rps_req) + return -ENOMEM; + + rps_req[1] = SMP_REPORT_PHY_SATA; + rps_req[9] = phy_id; + + res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE, + rps_resp, RPS_RESP_SIZE); + + kfree(rps_req); + return 0; +} + +static void sas_ex_get_linkrate(struct domain_device *parent, + struct domain_device *child, + struct ex_phy *parent_phy) +{ + struct expander_device *parent_ex = &parent->ex_dev; + struct sas_port *port; + int i; + + child->pathways = 0; + + port = parent_phy->port; + + for (i = 0; i < parent_ex->num_phys; i++) { + struct ex_phy *phy = &parent_ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(child->sas_addr)) { + + child->min_linkrate = min(parent->min_linkrate, + phy->linkrate); + child->max_linkrate = max(parent->max_linkrate, + phy->linkrate); + child->pathways++; + sas_port_add_phy(port, phy->phy); + } + } + child->linkrate = min(parent_phy->linkrate, child->max_linkrate); + child->pathways = min(child->pathways, parent->pathways); +} + +static struct domain_device *sas_ex_discover_end_dev( + struct domain_device *parent, int phy_id) +{ + struct expander_device *parent_ex = &parent->ex_dev; + struct ex_phy *phy = &parent_ex->ex_phy[phy_id]; + struct domain_device *child = NULL; + struct sas_rphy *rphy; + int res; + + if (phy->attached_sata_host || phy->attached_sata_ps) + return NULL; + + child = kzalloc(sizeof(*child), GFP_KERNEL); + if (!child) + return NULL; + + child->parent = parent; + child->port = parent->port; + child->iproto = phy->attached_iproto; + memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + sas_hash_addr(child->hashed_sas_addr, child->sas_addr); + phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); + BUG_ON(!phy->port); + /* FIXME: better error handling*/ + BUG_ON(sas_port_add(phy->port) != 0); + sas_ex_get_linkrate(parent, child, phy); + + if ((phy->attached_tproto & SAS_PROTO_STP) || phy->attached_sata_dev) { + child->dev_type = SATA_DEV; + if (phy->attached_tproto & SAS_PROTO_STP) + child->tproto = phy->attached_tproto; + if (phy->attached_sata_dev) + child->tproto |= SATA_DEV; + res = sas_get_report_phy_sata(parent, phy_id, + &child->sata_dev.rps_resp); + if (res) { + SAS_DPRINTK("report phy sata to %016llx:0x%x returned " + "0x%x\n", SAS_ADDR(parent->sas_addr), + phy_id, res); + kfree(child); + return NULL; + } + memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis, + sizeof(struct dev_to_host_fis)); + sas_init_dev(child); + res = sas_discover_sata(child); + if (res) { + SAS_DPRINTK("sas_discover_sata() for device %16llx at " + "%016llx:0x%x returned 0x%x\n", + SAS_ADDR(child->sas_addr), + SAS_ADDR(parent->sas_addr), phy_id, res); + kfree(child); + return NULL; + } + } else if (phy->attached_tproto & SAS_PROTO_SSP) { + child->dev_type = SAS_END_DEV; + rphy = sas_end_device_alloc(phy->port); + /* FIXME: error handling */ + BUG_ON(!rphy); + child->tproto = phy->attached_tproto; + sas_init_dev(child); + + child->rphy = rphy; + sas_fill_in_rphy(child, rphy); + + spin_lock(&parent->port->dev_list_lock); + list_add_tail(&child->dev_list_node, &parent->port->dev_list); + spin_unlock(&parent->port->dev_list_lock); + + res = sas_discover_end_dev(child); + if (res) { + SAS_DPRINTK("sas_discover_end_dev() for device %16llx " + "at %016llx:0x%x returned 0x%x\n", + SAS_ADDR(child->sas_addr), + SAS_ADDR(parent->sas_addr), phy_id, res); + /* FIXME: this kfrees list elements without removing them */ + //kfree(child); + return NULL; + } + } else { + SAS_DPRINTK("target proto 0x%x at %016llx:0x%x not handled\n", + phy->attached_tproto, SAS_ADDR(parent->sas_addr), + phy_id); + } + + list_add_tail(&child->siblings, &parent_ex->children); + return child; +} + +static struct domain_device *sas_ex_discover_expander( + struct domain_device *parent, int phy_id) +{ + struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy); + struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id]; + struct domain_device *child = NULL; + struct sas_rphy *rphy; + struct sas_expander_device *edev; + struct asd_sas_port *port; + int res; + + if (phy->routing_attr == DIRECT_ROUTING) { + SAS_DPRINTK("ex %016llx:0x%x:D <--> ex %016llx:0x%x is not " + "allowed\n", + SAS_ADDR(parent->sas_addr), phy_id, + SAS_ADDR(phy->attached_sas_addr), + phy->attached_phy_id); + return NULL; + } + child = kzalloc(sizeof(*child), GFP_KERNEL); + if (!child) + return NULL; + + phy->port = sas_port_alloc(&parent->rphy->dev, phy_id); + /* FIXME: better error handling */ + BUG_ON(sas_port_add(phy->port) != 0); + + + switch (phy->attached_dev_type) { + case EDGE_DEV: + rphy = sas_expander_alloc(phy->port, + SAS_EDGE_EXPANDER_DEVICE); + break; + case FANOUT_DEV: + rphy = sas_expander_alloc(phy->port, + SAS_FANOUT_EXPANDER_DEVICE); + break; + default: + rphy = NULL; /* shut gcc up */ + BUG(); + } + port = parent->port; + child->rphy = rphy; + edev = rphy_to_expander_device(rphy); + child->dev_type = phy->attached_dev_type; + child->parent = parent; + child->port = port; + child->iproto = phy->attached_iproto; + child->tproto = phy->attached_tproto; + memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); + sas_hash_addr(child->hashed_sas_addr, child->sas_addr); + sas_ex_get_linkrate(parent, child, phy); + edev->level = parent_ex->level + 1; + parent->port->disc.max_level = max(parent->port->disc.max_level, + edev->level); + sas_init_dev(child); + sas_fill_in_rphy(child, rphy); + sas_rphy_add(rphy); + + spin_lock(&parent->port->dev_list_lock); + list_add_tail(&child->dev_list_node, &parent->port->dev_list); + spin_unlock(&parent->port->dev_list_lock); + + res = sas_discover_expander(child); + if (res) { + kfree(child); + return NULL; + } + list_add_tail(&child->siblings, &parent->ex_dev.children); + return child; +} + +static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; + struct domain_device *child = NULL; + int res = 0; + + /* Phy state */ + if (ex_phy->linkrate == PHY_SPINUP_HOLD) { + if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET)) + res = sas_ex_phy_discover(dev, phy_id); + if (res) + return res; + } + + /* Parent and domain coherency */ + if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == + SAS_ADDR(dev->port->sas_addr))) { + sas_add_parent_port(dev, phy_id); + return 0; + } + if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == + SAS_ADDR(dev->parent->sas_addr))) { + sas_add_parent_port(dev, phy_id); + if (ex_phy->routing_attr == TABLE_ROUTING) + sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); + return 0; + } + + if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) + sas_ex_disable_port(dev, ex_phy->attached_sas_addr); + + if (ex_phy->attached_dev_type == NO_DEVICE) { + if (ex_phy->routing_attr == DIRECT_ROUTING) { + memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + sas_configure_routing(dev, ex_phy->attached_sas_addr); + } + return 0; + } else if (ex_phy->linkrate == PHY_LINKRATE_UNKNOWN) + return 0; + + if (ex_phy->attached_dev_type != SAS_END_DEV && + ex_phy->attached_dev_type != FANOUT_DEV && + ex_phy->attached_dev_type != EDGE_DEV) { + SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " + "phy 0x%x\n", ex_phy->attached_dev_type, + SAS_ADDR(dev->sas_addr), + phy_id); + return 0; + } + + res = sas_configure_routing(dev, ex_phy->attached_sas_addr); + if (res) { + SAS_DPRINTK("configure routing for dev %016llx " + "reported 0x%x. Forgotten\n", + SAS_ADDR(ex_phy->attached_sas_addr), res); + sas_disable_routing(dev, ex_phy->attached_sas_addr); + return res; + } + + switch (ex_phy->attached_dev_type) { + case SAS_END_DEV: + child = sas_ex_discover_end_dev(dev, phy_id); + break; + case FANOUT_DEV: + if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { + SAS_DPRINTK("second fanout expander %016llx phy 0x%x " + "attached to ex %016llx phy 0x%x\n", + SAS_ADDR(ex_phy->attached_sas_addr), + ex_phy->attached_phy_id, + SAS_ADDR(dev->sas_addr), + phy_id); + sas_ex_disable_phy(dev, phy_id); + break; + } else + memcpy(dev->port->disc.fanout_sas_addr, + ex_phy->attached_sas_addr, SAS_ADDR_SIZE); + /* fallthrough */ + case EDGE_DEV: + child = sas_ex_discover_expander(dev, phy_id); + break; + default: + break; + } + + if (child) { + int i; + + for (i = 0; i < ex->num_phys; i++) { + if (ex->ex_phy[i].phy_state == PHY_VACANT || + ex->ex_phy[i].phy_state == PHY_NOT_PRESENT) + continue; + + if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) == + SAS_ADDR(child->sas_addr)) + ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED; + } + } + + return res; +} + +static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) +{ + struct expander_device *ex = &dev->ex_dev; + int i; + + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if ((phy->attached_dev_type == EDGE_DEV || + phy->attached_dev_type == FANOUT_DEV) && + phy->routing_attr == SUBTRACTIVE_ROUTING) { + + memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); + + return 1; + } + } + return 0; +} + +static int sas_check_level_subtractive_boundary(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + struct domain_device *child; + u8 sub_addr[8] = {0, }; + + list_for_each_entry(child, &ex->children, siblings) { + if (child->dev_type != EDGE_DEV && + child->dev_type != FANOUT_DEV) + continue; + if (sub_addr[0] == 0) { + sas_find_sub_addr(child, sub_addr); + continue; + } else { + u8 s2[8]; + + if (sas_find_sub_addr(child, s2) && + (SAS_ADDR(sub_addr) != SAS_ADDR(s2))) { + + SAS_DPRINTK("ex %016llx->%016llx-?->%016llx " + "diverges from subtractive " + "boundary %016llx\n", + SAS_ADDR(dev->sas_addr), + SAS_ADDR(child->sas_addr), + SAS_ADDR(s2), + SAS_ADDR(sub_addr)); + + sas_ex_disable_port(child, s2); + } + } + } + return 0; +} +/** + * sas_ex_discover_devices -- discover devices attached to this expander + * dev: pointer to the expander domain device + * single: if you want to do a single phy, else set to -1; + * + * Configure this expander for use with its devices and register the + * devices of this expander. + */ +static int sas_ex_discover_devices(struct domain_device *dev, int single) +{ + struct expander_device *ex = &dev->ex_dev; + int i = 0, end = ex->num_phys; + int res = 0; + + if (0 <= single && single < end) { + i = single; + end = i+1; + } + + for ( ; i < end; i++) { + struct ex_phy *ex_phy = &ex->ex_phy[i]; + + if (ex_phy->phy_state == PHY_VACANT || + ex_phy->phy_state == PHY_NOT_PRESENT || + ex_phy->phy_state == PHY_DEVICE_DISCOVERED) + continue; + + switch (ex_phy->linkrate) { + case PHY_DISABLED: + case PHY_RESET_PROBLEM: + case PHY_PORT_SELECTOR: + continue; + default: + res = sas_ex_discover_dev(dev, i); + if (res) + break; + continue; + } + } + + if (!res) + sas_check_level_subtractive_boundary(dev); + + return res; +} + +static int sas_check_ex_subtractive_boundary(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + int i; + u8 *sub_sas_addr = NULL; + + if (dev->dev_type != EDGE_DEV) + return 0; + + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (phy->phy_state == PHY_VACANT || + phy->phy_state == PHY_NOT_PRESENT) + continue; + + if ((phy->attached_dev_type == FANOUT_DEV || + phy->attached_dev_type == EDGE_DEV) && + phy->routing_attr == SUBTRACTIVE_ROUTING) { + + if (!sub_sas_addr) + sub_sas_addr = &phy->attached_sas_addr[0]; + else if (SAS_ADDR(sub_sas_addr) != + SAS_ADDR(phy->attached_sas_addr)) { + + SAS_DPRINTK("ex %016llx phy 0x%x " + "diverges(%016llx) on subtractive " + "boundary(%016llx). Disabled\n", + SAS_ADDR(dev->sas_addr), i, + SAS_ADDR(phy->attached_sas_addr), + SAS_ADDR(sub_sas_addr)); + sas_ex_disable_phy(dev, i); + } + } + } + return 0; +} + +static void sas_print_parent_topology_bug(struct domain_device *child, + struct ex_phy *parent_phy, + struct ex_phy *child_phy) +{ + static const char ra_char[] = { + [DIRECT_ROUTING] = 'D', + [SUBTRACTIVE_ROUTING] = 'S', + [TABLE_ROUTING] = 'T', + }; + static const char *ex_type[] = { + [EDGE_DEV] = "edge", + [FANOUT_DEV] = "fanout", + }; + struct domain_device *parent = child->parent; + + sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x " + "has %c:%c routing link!\n", + + ex_type[parent->dev_type], + SAS_ADDR(parent->sas_addr), + parent_phy->phy_id, + + ex_type[child->dev_type], + SAS_ADDR(child->sas_addr), + child_phy->phy_id, + + ra_char[parent_phy->routing_attr], + ra_char[child_phy->routing_attr]); +} + +static int sas_check_eeds(struct domain_device *child, + struct ex_phy *parent_phy, + struct ex_phy *child_phy) +{ + int res = 0; + struct domain_device *parent = child->parent; + + if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) { + res = -ENODEV; + SAS_DPRINTK("edge ex %016llx phy S:0x%x <--> edge ex %016llx " + "phy S:0x%x, while there is a fanout ex %016llx\n", + SAS_ADDR(parent->sas_addr), + parent_phy->phy_id, + SAS_ADDR(child->sas_addr), + child_phy->phy_id, + SAS_ADDR(parent->port->disc.fanout_sas_addr)); + } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) { + memcpy(parent->port->disc.eeds_a, parent->sas_addr, + SAS_ADDR_SIZE); + memcpy(parent->port->disc.eeds_b, child->sas_addr, + SAS_ADDR_SIZE); + } else if (((SAS_ADDR(parent->port->disc.eeds_a) == + SAS_ADDR(parent->sas_addr)) || + (SAS_ADDR(parent->port->disc.eeds_a) == + SAS_ADDR(child->sas_addr))) + && + ((SAS_ADDR(parent->port->disc.eeds_b) == + SAS_ADDR(parent->sas_addr)) || + (SAS_ADDR(parent->port->disc.eeds_b) == + SAS_ADDR(child->sas_addr)))) + ; + else { + res = -ENODEV; + SAS_DPRINTK("edge ex %016llx phy 0x%x <--> edge ex %016llx " + "phy 0x%x link forms a third EEDS!\n", + SAS_ADDR(parent->sas_addr), + parent_phy->phy_id, + SAS_ADDR(child->sas_addr), + child_phy->phy_id); + } + + return res; +} + +/* Here we spill over 80 columns. It is intentional. + */ +static int sas_check_parent_topology(struct domain_device *child) +{ + struct expander_device *child_ex = &child->ex_dev; + struct expander_device *parent_ex; + int i; + int res = 0; + + if (!child->parent) + return 0; + + if (child->parent->dev_type != EDGE_DEV && + child->parent->dev_type != FANOUT_DEV) + return 0; + + parent_ex = &child->parent->ex_dev; + + for (i = 0; i < parent_ex->num_phys; i++) { + struct ex_phy *parent_phy = &parent_ex->ex_phy[i]; + struct ex_phy *child_phy; + + if (parent_phy->phy_state == PHY_VACANT || + parent_phy->phy_state == PHY_NOT_PRESENT) + continue; + + if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr)) + continue; + + child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; + + switch (child->parent->dev_type) { + case EDGE_DEV: + if (child->dev_type == FANOUT_DEV) { + if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || + child_phy->routing_attr != TABLE_ROUTING) { + sas_print_parent_topology_bug(child, parent_phy, child_phy); + res = -ENODEV; + } + } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { + if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) { + res = sas_check_eeds(child, parent_phy, child_phy); + } else if (child_phy->routing_attr != TABLE_ROUTING) { + sas_print_parent_topology_bug(child, parent_phy, child_phy); + res = -ENODEV; + } + } else if (parent_phy->routing_attr == TABLE_ROUTING && + child_phy->routing_attr != SUBTRACTIVE_ROUTING) { + sas_print_parent_topology_bug(child, parent_phy, child_phy); + res = -ENODEV; + } + break; + case FANOUT_DEV: + if (parent_phy->routing_attr != TABLE_ROUTING || + child_phy->routing_attr != SUBTRACTIVE_ROUTING) { + sas_print_parent_topology_bug(child, parent_phy, child_phy); + res = -ENODEV; + } + break; + default: + break; + } + } + + return res; +} + +#define RRI_REQ_SIZE 16 +#define RRI_RESP_SIZE 44 + +static int sas_configure_present(struct domain_device *dev, int phy_id, + u8 *sas_addr, int *index, int *present) +{ + int i, res = 0; + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + u8 *rri_req; + u8 *rri_resp; + + *present = 0; + *index = 0; + + rri_req = alloc_smp_req(RRI_REQ_SIZE); + if (!rri_req) + return -ENOMEM; + + rri_resp = alloc_smp_resp(RRI_RESP_SIZE); + if (!rri_resp) { + kfree(rri_req); + return -ENOMEM; + } + + rri_req[1] = SMP_REPORT_ROUTE_INFO; + rri_req[9] = phy_id; + + for (i = 0; i < ex->max_route_indexes ; i++) { + *(__be16 *)(rri_req+6) = cpu_to_be16(i); + res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp, + RRI_RESP_SIZE); + if (res) + goto out; + res = rri_resp[2]; + if (res == SMP_RESP_NO_INDEX) { + SAS_DPRINTK("overflow of indexes: dev %016llx " + "phy 0x%x index 0x%x\n", + SAS_ADDR(dev->sas_addr), phy_id, i); + goto out; + } else if (res != SMP_RESP_FUNC_ACC) { + SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " + "result 0x%x\n", __FUNCTION__, + SAS_ADDR(dev->sas_addr), phy_id, i, res); + goto out; + } + if (SAS_ADDR(sas_addr) != 0) { + if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) { + *index = i; + if ((rri_resp[12] & 0x80) == 0x80) + *present = 0; + else + *present = 1; + goto out; + } else if (SAS_ADDR(rri_resp+16) == 0) { + *index = i; + *present = 0; + goto out; + } + } else if (SAS_ADDR(rri_resp+16) == 0 && + phy->last_da_index < i) { + phy->last_da_index = i; + *index = i; + *present = 0; + goto out; + } + } + res = -1; +out: + kfree(rri_req); + kfree(rri_resp); + return res; +} + +#define CRI_REQ_SIZE 44 +#define CRI_RESP_SIZE 8 + +static int sas_configure_set(struct domain_device *dev, int phy_id, + u8 *sas_addr, int index, int include) +{ + int res; + u8 *cri_req; + u8 *cri_resp; + + cri_req = alloc_smp_req(CRI_REQ_SIZE); + if (!cri_req) + return -ENOMEM; + + cri_resp = alloc_smp_resp(CRI_RESP_SIZE); + if (!cri_resp) { + kfree(cri_req); + return -ENOMEM; + } + + cri_req[1] = SMP_CONF_ROUTE_INFO; + *(__be16 *)(cri_req+6) = cpu_to_be16(index); + cri_req[9] = phy_id; + if (SAS_ADDR(sas_addr) == 0 || !include) + cri_req[12] |= 0x80; + memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE); + + res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp, + CRI_RESP_SIZE); + if (res) + goto out; + res = cri_resp[2]; + if (res == SMP_RESP_NO_INDEX) { + SAS_DPRINTK("overflow of indexes: dev %016llx phy 0x%x " + "index 0x%x\n", + SAS_ADDR(dev->sas_addr), phy_id, index); + } +out: + kfree(cri_req); + kfree(cri_resp); + return res; +} + +static int sas_configure_phy(struct domain_device *dev, int phy_id, + u8 *sas_addr, int include) +{ + int index; + int present; + int res; + + res = sas_configure_present(dev, phy_id, sas_addr, &index, &present); + if (res) + return res; + if (include ^ present) + return sas_configure_set(dev, phy_id, sas_addr, index,include); + + return res; +} + +/** + * sas_configure_parent -- configure routing table of parent + * parent: parent expander + * child: child expander + * sas_addr: SAS port identifier of device directly attached to child + */ +static int sas_configure_parent(struct domain_device *parent, + struct domain_device *child, + u8 *sas_addr, int include) +{ + struct expander_device *ex_parent = &parent->ex_dev; + int res = 0; + int i; + + if (parent->parent) { + res = sas_configure_parent(parent->parent, parent, sas_addr, + include); + if (res) + return res; + } + + if (ex_parent->conf_route_table == 0) { + SAS_DPRINTK("ex %016llx has self-configuring routing table\n", + SAS_ADDR(parent->sas_addr)); + return 0; + } + + for (i = 0; i < ex_parent->num_phys; i++) { + struct ex_phy *phy = &ex_parent->ex_phy[i]; + + if ((phy->routing_attr == TABLE_ROUTING) && + (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(child->sas_addr))) { + res = sas_configure_phy(parent, i, sas_addr, include); + if (res) + return res; + } + } + + return res; +} + +/** + * sas_configure_routing -- configure routing + * dev: expander device + * sas_addr: port identifier of device directly attached to the expander device + */ +static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr) +{ + if (dev->parent) + return sas_configure_parent(dev->parent, dev, sas_addr, 1); + return 0; +} + +static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr) +{ + if (dev->parent) + return sas_configure_parent(dev->parent, dev, sas_addr, 0); + return 0; +} + +#if 0 +#define SMP_BIN_ATTR_NAME "smp_portal" + +static void sas_ex_smp_hook(struct domain_device *dev) +{ + struct expander_device *ex_dev = &dev->ex_dev; + struct bin_attribute *bin_attr = &ex_dev->smp_bin_attr; + + memset(bin_attr, 0, sizeof(*bin_attr)); + + bin_attr->attr.name = SMP_BIN_ATTR_NAME; + bin_attr->attr.owner = THIS_MODULE; + bin_attr->attr.mode = 0600; + + bin_attr->size = 0; + bin_attr->private = NULL; + bin_attr->read = smp_portal_read; + bin_attr->write= smp_portal_write; + bin_attr->mmap = NULL; + + ex_dev->smp_portal_pid = -1; + init_MUTEX(&ex_dev->smp_sema); +} +#endif + +/** + * sas_discover_expander -- expander discovery + * @ex: pointer to expander domain device + * + * See comment in sas_discover_sata(). + */ +static int sas_discover_expander(struct domain_device *dev) +{ + int res; + + res = sas_notify_lldd_dev_found(dev); + if (res) + return res; + + res = sas_ex_general(dev); + if (res) + goto out_err; + res = sas_ex_manuf_info(dev); + if (res) + goto out_err; + + res = sas_expander_discover(dev); + if (res) { + SAS_DPRINTK("expander %016llx discovery failed(0x%x)\n", + SAS_ADDR(dev->sas_addr), res); + goto out_err; + } + + sas_check_ex_subtractive_boundary(dev); + res = sas_check_parent_topology(dev); + if (res) + goto out_err; + return 0; +out_err: + sas_notify_lldd_dev_gone(dev); + return res; +} + +static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) +{ + int res = 0; + struct domain_device *dev; + + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (dev->dev_type == EDGE_DEV || + dev->dev_type == FANOUT_DEV) { + struct sas_expander_device *ex = + rphy_to_expander_device(dev->rphy); + + if (level == ex->level) + res = sas_ex_discover_devices(dev, -1); + else if (level > 0) + res = sas_ex_discover_devices(port->port_dev, -1); + + } + } + + return res; +} + +static int sas_ex_bfs_disc(struct asd_sas_port *port) +{ + int res; + int level; + + do { + level = port->disc.max_level; + res = sas_ex_level_discovery(port, level); + mb(); + } while (level < port->disc.max_level); + + return res; +} + +int sas_discover_root_expander(struct domain_device *dev) +{ + int res; + struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); + + sas_rphy_add(dev->rphy); + + ex->level = dev->port->disc.max_level; /* 0 */ + res = sas_discover_expander(dev); + if (!res) + sas_ex_bfs_disc(dev->port); + + return res; +} + +/* ---------- Domain revalidation ---------- */ + +static int sas_get_phy_discover(struct domain_device *dev, + int phy_id, struct smp_resp *disc_resp) +{ + int res; + u8 *disc_req; + + disc_req = alloc_smp_req(DISCOVER_REQ_SIZE); + if (!disc_req) + return -ENOMEM; + + disc_req[1] = SMP_DISCOVER; + disc_req[9] = phy_id; + + res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, + disc_resp, DISCOVER_RESP_SIZE); + if (res) + goto out; + else if (disc_resp->result != SMP_RESP_FUNC_ACC) { + res = disc_resp->result; + goto out; + } +out: + kfree(disc_req); + return res; +} + +static int sas_get_phy_change_count(struct domain_device *dev, + int phy_id, int *pcc) +{ + int res; + struct smp_resp *disc_resp; + + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); + if (!disc_resp) + return -ENOMEM; + + res = sas_get_phy_discover(dev, phy_id, disc_resp); + if (!res) + *pcc = disc_resp->disc.change_count; + + kfree(disc_resp); + return res; +} + +static int sas_get_phy_attached_sas_addr(struct domain_device *dev, + int phy_id, u8 *attached_sas_addr) +{ + int res; + struct smp_resp *disc_resp; + struct discover_resp *dr; + + disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); + if (!disc_resp) + return -ENOMEM; + dr = &disc_resp->disc; + + res = sas_get_phy_discover(dev, phy_id, disc_resp); + if (!res) { + memcpy(attached_sas_addr,disc_resp->disc.attached_sas_addr,8); + if (dr->attached_dev_type == 0) + memset(attached_sas_addr, 0, 8); + } + kfree(disc_resp); + return res; +} + +static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, + int from_phy) +{ + struct expander_device *ex = &dev->ex_dev; + int res = 0; + int i; + + for (i = from_phy; i < ex->num_phys; i++) { + int phy_change_count = 0; + + res = sas_get_phy_change_count(dev, i, &phy_change_count); + if (res) + goto out; + else if (phy_change_count != ex->ex_phy[i].phy_change_count) { + ex->ex_phy[i].phy_change_count = phy_change_count; + *phy_id = i; + return 0; + } + } +out: + return res; +} + +static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) +{ + int res; + u8 *rg_req; + struct smp_resp *rg_resp; + + rg_req = alloc_smp_req(RG_REQ_SIZE); + if (!rg_req) + return -ENOMEM; + + rg_resp = alloc_smp_resp(RG_RESP_SIZE); + if (!rg_resp) { + kfree(rg_req); + return -ENOMEM; + } + + rg_req[1] = SMP_REPORT_GENERAL; + + res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp, + RG_RESP_SIZE); + if (res) + goto out; + if (rg_resp->result != SMP_RESP_FUNC_ACC) { + res = rg_resp->result; + goto out; + } + + *ecc = be16_to_cpu(rg_resp->rg.change_count); +out: + kfree(rg_resp); + kfree(rg_req); + return res; +} + +static int sas_find_bcast_dev(struct domain_device *dev, + struct domain_device **src_dev) +{ + struct expander_device *ex = &dev->ex_dev; + int ex_change_count = -1; + int res; + + res = sas_get_ex_change_count(dev, &ex_change_count); + if (res) + goto out; + if (ex_change_count != -1 && + ex_change_count != ex->ex_change_count) { + *src_dev = dev; + ex->ex_change_count = ex_change_count; + } else { + struct domain_device *ch; + + list_for_each_entry(ch, &ex->children, siblings) { + if (ch->dev_type == EDGE_DEV || + ch->dev_type == FANOUT_DEV) { + res = sas_find_bcast_dev(ch, src_dev); + if (src_dev) + return res; + } + } + } +out: + return res; +} + +static void sas_unregister_ex_tree(struct domain_device *dev) +{ + struct expander_device *ex = &dev->ex_dev; + struct domain_device *child, *n; + + list_for_each_entry_safe(child, n, &ex->children, siblings) { + if (child->dev_type == EDGE_DEV || + child->dev_type == FANOUT_DEV) + sas_unregister_ex_tree(child); + else + sas_unregister_dev(child); + } + sas_unregister_dev(dev); +} + +static void sas_unregister_devs_sas_addr(struct domain_device *parent, + int phy_id) +{ + struct expander_device *ex_dev = &parent->ex_dev; + struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; + struct domain_device *child, *n; + + list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { + if (SAS_ADDR(child->sas_addr) == + SAS_ADDR(phy->attached_sas_addr)) { + if (child->dev_type == EDGE_DEV || + child->dev_type == FANOUT_DEV) + sas_unregister_ex_tree(child); + else + sas_unregister_dev(child); + break; + } + } + sas_disable_routing(parent, phy->attached_sas_addr); + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + sas_port_delete_phy(phy->port, phy->phy); + if (phy->port->num_phys == 0) + sas_port_delete(phy->port); + phy->port = NULL; +} + +static int sas_discover_bfs_by_root_level(struct domain_device *root, + const int level) +{ + struct expander_device *ex_root = &root->ex_dev; + struct domain_device *child; + int res = 0; + + list_for_each_entry(child, &ex_root->children, siblings) { + if (child->dev_type == EDGE_DEV || + child->dev_type == FANOUT_DEV) { + struct sas_expander_device *ex = + rphy_to_expander_device(child->rphy); + + if (level > ex->level) + res = sas_discover_bfs_by_root_level(child, + level); + else if (level == ex->level) + res = sas_ex_discover_devices(child, -1); + } + } + return res; +} + +static int sas_discover_bfs_by_root(struct domain_device *dev) +{ + int res; + struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); + int level = ex->level+1; + + res = sas_ex_discover_devices(dev, -1); + if (res) + goto out; + do { + res = sas_discover_bfs_by_root_level(dev, level); + mb(); + level += 1; + } while (level <= dev->port->disc.max_level); +out: + return res; +} + +static int sas_discover_new(struct domain_device *dev, int phy_id) +{ + struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id]; + struct domain_device *child; + int res; + + SAS_DPRINTK("ex %016llx phy%d new device attached\n", + SAS_ADDR(dev->sas_addr), phy_id); + res = sas_ex_phy_discover(dev, phy_id); + if (res) + goto out; + res = sas_ex_discover_devices(dev, phy_id); + if (res) + goto out; + list_for_each_entry(child, &dev->ex_dev.children, siblings) { + if (SAS_ADDR(child->sas_addr) == + SAS_ADDR(ex_phy->attached_sas_addr)) { + if (child->dev_type == EDGE_DEV || + child->dev_type == FANOUT_DEV) + res = sas_discover_bfs_by_root(child); + break; + } + } +out: + return res; +} + +static int sas_rediscover_dev(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *phy = &ex->ex_phy[phy_id]; + u8 attached_sas_addr[8]; + int res; + + res = sas_get_phy_attached_sas_addr(dev, phy_id, attached_sas_addr); + switch (res) { + case SMP_RESP_NO_PHY: + phy->phy_state = PHY_NOT_PRESENT; + sas_unregister_devs_sas_addr(dev, phy_id); + goto out; break; + case SMP_RESP_PHY_VACANT: + phy->phy_state = PHY_VACANT; + sas_unregister_devs_sas_addr(dev, phy_id); + goto out; break; + case SMP_RESP_FUNC_ACC: + break; + } + + if (SAS_ADDR(attached_sas_addr) == 0) { + phy->phy_state = PHY_EMPTY; + sas_unregister_devs_sas_addr(dev, phy_id); + } else if (SAS_ADDR(attached_sas_addr) == + SAS_ADDR(phy->attached_sas_addr)) { + SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n", + SAS_ADDR(dev->sas_addr), phy_id); + } else + res = sas_discover_new(dev, phy_id); +out: + return res; +} + +static int sas_rediscover(struct domain_device *dev, const int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *changed_phy = &ex->ex_phy[phy_id]; + int res = 0; + int i; + + SAS_DPRINTK("ex %016llx phy%d originated BROADCAST(CHANGE)\n", + SAS_ADDR(dev->sas_addr), phy_id); + + if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) { + for (i = 0; i < ex->num_phys; i++) { + struct ex_phy *phy = &ex->ex_phy[i]; + + if (i == phy_id) + continue; + if (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(changed_phy->attached_sas_addr)) { + SAS_DPRINTK("phy%d part of wide port with " + "phy%d\n", phy_id, i); + goto out; + } + } + res = sas_rediscover_dev(dev, phy_id); + } else + res = sas_discover_new(dev, phy_id); +out: + return res; +} + +/** + * sas_revalidate_domain -- revalidate the domain + * @port: port to the domain of interest + * + * NOTE: this process _must_ quit (return) as soon as any connection + * errors are encountered. Connection recovery is done elsewhere. + * Discover process only interrogates devices in order to discover the + * domain. + */ +int sas_ex_revalidate_domain(struct domain_device *port_dev) +{ + int res; + struct domain_device *dev = NULL; + + res = sas_find_bcast_dev(port_dev, &dev); + if (res) + goto out; + if (dev) { + struct expander_device *ex = &dev->ex_dev; + int i = 0, phy_id; + + do { + phy_id = -1; + res = sas_find_bcast_phy(dev, &phy_id, i); + if (phy_id == -1) + break; + res = sas_rediscover(dev, phy_id); + i = phy_id + 1; + } while (i < ex->num_phys); + } +out: + return res; +} + +#if 0 +/* ---------- SMP portal ---------- */ + +static ssize_t smp_portal_write(struct kobject *kobj, char *buf, loff_t offs, + size_t size) +{ + struct domain_device *dev = to_dom_device(kobj); + struct expander_device *ex = &dev->ex_dev; + + if (offs != 0) + return -EFBIG; + else if (size == 0) + return 0; + + down_interruptible(&ex->smp_sema); + if (ex->smp_req) + kfree(ex->smp_req); + ex->smp_req = kzalloc(size, GFP_USER); + if (!ex->smp_req) { + up(&ex->smp_sema); + return -ENOMEM; + } + memcpy(ex->smp_req, buf, size); + ex->smp_req_size = size; + ex->smp_portal_pid = current->pid; + up(&ex->smp_sema); + + return size; +} + +static ssize_t smp_portal_read(struct kobject *kobj, char *buf, loff_t offs, + size_t size) +{ + struct domain_device *dev = to_dom_device(kobj); + struct expander_device *ex = &dev->ex_dev; + u8 *smp_resp; + int res = -EINVAL; + + /* XXX: sysfs gives us an offset of 0x10 or 0x8 while in fact + * it should be 0. + */ + + down_interruptible(&ex->smp_sema); + if (!ex->smp_req || ex->smp_portal_pid != current->pid) + goto out; + + res = 0; + if (size == 0) + goto out; + + res = -ENOMEM; + smp_resp = alloc_smp_resp(size); + if (!smp_resp) + goto out; + res = smp_execute_task(dev, ex->smp_req, ex->smp_req_size, + smp_resp, size); + if (!res) { + memcpy(buf, smp_resp, size); + res = size; + } + + kfree(smp_resp); +out: + kfree(ex->smp_req); + ex->smp_req = NULL; + ex->smp_req_size = 0; + ex->smp_portal_pid = -1; + up(&ex->smp_sema); + return res; +} +#endif diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c new file mode 100644 index 0000000..b961664 --- /dev/null +++ b/drivers/scsi/libsas/sas_init.c @@ -0,0 +1,227 @@ +/* + * Serial Attached SCSI (SAS) Transport Layer initialization + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_sas.h> + +#include "sas_internal.h" + +#include "../scsi_sas_internal.h" + +kmem_cache_t *sas_task_cache; + +/*------------ SAS addr hash -----------*/ +void sas_hash_addr(u8 *hashed, const u8 *sas_addr) +{ + const u32 poly = 0x00DB2777; + u32 r = 0; + int i; + + for (i = 0; i < 8; i++) { + int b; + for (b = 7; b >= 0; b--) { + r <<= 1; + if ((1 << b) & sas_addr[i]) { + if (!(r & 0x01000000)) + r ^= poly; + } else if (r & 0x01000000) + r ^= poly; + } + } + + hashed[0] = (r >> 16) & 0xFF; + hashed[1] = (r >> 8) & 0xFF ; + hashed[2] = r & 0xFF; +} + + +/* ---------- HA events ---------- */ + +void sas_hae_reset(void *data) +{ + struct sas_ha_struct *ha = data; + + sas_begin_event(HAE_RESET, &ha->event_lock, + &ha->pending); +} + +int sas_register_ha(struct sas_ha_struct *sas_ha) +{ + int error = 0; + + spin_lock_init(&sas_ha->phy_port_lock); + sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr); + + if (sas_ha->lldd_queue_size == 0) + sas_ha->lldd_queue_size = 1; + else if (sas_ha->lldd_queue_size == -1) + sas_ha->lldd_queue_size = 128; /* Sanity */ + + error = sas_register_phys(sas_ha); + if (error) { + printk(KERN_NOTICE "couldn't register sas phys:%d\n", error); + return error; + } + + error = sas_register_ports(sas_ha); + if (error) { + printk(KERN_NOTICE "couldn't register sas ports:%d\n", error); + goto Undo_phys; + } + + error = sas_init_events(sas_ha); + if (error) { + printk(KERN_NOTICE "couldn't start event thread:%d\n", error); + goto Undo_ports; + } + + if (sas_ha->lldd_max_execute_num > 1) { + error = sas_init_queue(sas_ha); + if (error) { + printk(KERN_NOTICE "couldn't start queue thread:%d, " + "running in direct mode\n", error); + sas_ha->lldd_max_execute_num = 1; + } + } + + return 0; + +Undo_ports: + sas_unregister_ports(sas_ha); +Undo_phys: + + return error; +} + +int sas_unregister_ha(struct sas_ha_struct *sas_ha) +{ + if (sas_ha->lldd_max_execute_num > 1) { + sas_shutdown_queue(sas_ha); + } + + sas_unregister_ports(sas_ha); + + return 0; +} + +static int sas_get_linkerrors(struct sas_phy *phy) +{ + if (scsi_is_sas_phy_local(phy)) + /* FIXME: we have no local phy stats + * gathering at this time */ + return -EINVAL; + + return sas_smp_get_phy_events(phy); +} + +static int sas_phy_reset(struct sas_phy *phy, int hard_reset) +{ + int ret; + enum phy_func reset_type; + + if (hard_reset) + reset_type = PHY_FUNC_HARD_RESET; + else + reset_type = PHY_FUNC_LINK_RESET; + + if (scsi_is_sas_phy_local(phy)) { + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; + struct sas_internal *i = + to_sas_internal(sas_ha->core.shost->transportt); + + ret = i->dft->lldd_control_phy(asd_phy, reset_type); + } else { + struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); + struct domain_device *ddev = sas_find_dev_by_rphy(rphy); + ret = sas_smp_phy_control(ddev, phy->number, reset_type); + } + return ret; +} + +static struct sas_function_template sft = { + .phy_reset = sas_phy_reset, + .get_linkerrors = sas_get_linkerrors, +}; + +struct scsi_transport_template * +sas_domain_attach_transport(struct sas_domain_function_template *dft) +{ + struct scsi_transport_template *stt = sas_attach_transport(&sft); + struct sas_internal *i; + + if (!stt) + return stt; + + i = to_sas_internal(stt); + i->dft = dft; + stt->create_work_queue = 1; + stt->eh_timed_out = sas_scsi_timed_out; + stt->eh_strategy_handler = sas_scsi_recover_host; + + return stt; +} +EXPORT_SYMBOL_GPL(sas_domain_attach_transport); + + +void sas_domain_release_transport(struct scsi_transport_template *stt) +{ + sas_release_transport(stt); +} +EXPORT_SYMBOL_GPL(sas_domain_release_transport); + +/* ---------- SAS Class register/unregister ---------- */ + +static int __init sas_class_init(void) +{ + sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task), + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!sas_task_cache) + return -ENOMEM; + + return 0; +} + +static void __exit sas_class_exit(void) +{ + kmem_cache_destroy(sas_task_cache); +} + +MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>"); +MODULE_DESCRIPTION("SAS Transport Layer"); +MODULE_LICENSE("GPL v2"); + +module_init(sas_class_init); +module_exit(sas_class_exit); + +EXPORT_SYMBOL_GPL(sas_register_ha); +EXPORT_SYMBOL_GPL(sas_unregister_ha); diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h new file mode 100644 index 0000000..89c3976 --- /dev/null +++ b/drivers/scsi/libsas/sas_internal.h @@ -0,0 +1,146 @@ +/* + * Serial Attached SCSI (SAS) class internal header file + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#ifndef _SAS_INTERNAL_H_ +#define _SAS_INTERNAL_H_ + +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_transport_sas.h> +#include <scsi/libsas.h> + +#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__) + +#ifdef SAS_DEBUG +#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__) +#else +#define SAS_DPRINTK(fmt, ...) +#endif + +void sas_scsi_recover_host(struct Scsi_Host *shost); + +int sas_show_class(enum sas_class class, char *buf); +int sas_show_proto(enum sas_proto proto, char *buf); +int sas_show_linkrate(enum sas_phy_linkrate linkrate, char *buf); +int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf); + +int sas_register_phys(struct sas_ha_struct *sas_ha); +void sas_unregister_phys(struct sas_ha_struct *sas_ha); + +int sas_register_ports(struct sas_ha_struct *sas_ha); +void sas_unregister_ports(struct sas_ha_struct *sas_ha); + +enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); + +int sas_init_queue(struct sas_ha_struct *sas_ha); +int sas_init_events(struct sas_ha_struct *sas_ha); +void sas_shutdown_queue(struct sas_ha_struct *sas_ha); + +void sas_deform_port(struct asd_sas_phy *phy); + +void sas_porte_bytes_dmaed(void *); +void sas_porte_broadcast_rcvd(void *); +void sas_porte_link_reset_err(void *); +void sas_porte_timer_event(void *); +void sas_porte_hard_reset(void *); + +int sas_notify_lldd_dev_found(struct domain_device *); +void sas_notify_lldd_dev_gone(struct domain_device *); + +int sas_smp_phy_control(struct domain_device *dev, int phy_id, + enum phy_func phy_func); +int sas_smp_get_phy_events(struct sas_phy *phy); + +struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); + +void sas_hae_reset(void *); + +static inline void sas_queue_event(int event, spinlock_t *lock, + unsigned long *pending, + struct work_struct *work, + struct Scsi_Host *shost) +{ + unsigned long flags; + + spin_lock_irqsave(lock, flags); + if (test_bit(event, pending)) { + spin_unlock_irqrestore(lock, flags); + return; + } + __set_bit(event, pending); + spin_unlock_irqrestore(lock, flags); + scsi_queue_work(shost, work); +} + +static inline void sas_begin_event(int event, spinlock_t *lock, + unsigned long *pending) +{ + unsigned long flags; + + spin_lock_irqsave(lock, flags); + __clear_bit(event, pending); + spin_unlock_irqrestore(lock, flags); +} + +static inline void sas_fill_in_rphy(struct domain_device *dev, + struct sas_rphy *rphy) +{ + rphy->identify.sas_address = SAS_ADDR(dev->sas_addr); + rphy->identify.initiator_port_protocols = dev->iproto; + rphy->identify.target_port_protocols = dev->tproto; + switch (dev->dev_type) { + case SATA_DEV: + /* FIXME: need sata device type */ + case SAS_END_DEV: + rphy->identify.device_type = SAS_END_DEVICE; + break; + case EDGE_DEV: + rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + case FANOUT_DEV: + rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE; + break; + default: + rphy->identify.device_type = SAS_PHY_UNUSED; + break; + } +} + +static inline void sas_add_parent_port(struct domain_device *dev, int phy_id) +{ + struct expander_device *ex = &dev->ex_dev; + struct ex_phy *ex_phy = &ex->ex_phy[phy_id]; + + if (!ex->parent_port) { + ex->parent_port = sas_port_alloc(&dev->rphy->dev, phy_id); + /* FIXME: error handling */ + BUG_ON(!ex->parent_port); + BUG_ON(sas_port_add(ex->parent_port)); + sas_port_mark_backlink(ex->parent_port); + } + sas_port_add_phy(ex->parent_port, ex_phy->phy); +} + +#endif /* _SAS_INTERNAL_H_ */ diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c new file mode 100644 index 0000000..024ab00 --- /dev/null +++ b/drivers/scsi/libsas/sas_phy.c @@ -0,0 +1,157 @@ +/* + * Serial Attached SCSI (SAS) Phy class + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include "sas_internal.h" +#include <scsi/scsi_host.h> +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_sas.h> +#include "../scsi_sas_internal.h" + +/* ---------- Phy events ---------- */ + +static void sas_phye_loss_of_signal(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, + &phy->phy_events_pending); + phy->error = 0; + sas_deform_port(phy); +} + +static void sas_phye_oob_done(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, + &phy->phy_events_pending); + phy->error = 0; +} + +static void sas_phye_oob_error(void *data) +{ + struct asd_sas_phy *phy = data; + struct sas_ha_struct *sas_ha = phy->ha; + struct asd_sas_port *port = phy->port; + struct sas_internal *i = + to_sas_internal(sas_ha->core.shost->transportt); + + sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock, + &phy->phy_events_pending); + + sas_deform_port(phy); + + if (!port && phy->enabled && i->dft->lldd_control_phy) { + phy->error++; + switch (phy->error) { + case 1: + case 2: + i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET); + break; + case 3: + default: + phy->error = 0; + phy->enabled = 0; + i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE); + break; + } + } +} + +static void sas_phye_spinup_hold(void *data) +{ + struct asd_sas_phy *phy = data; + struct sas_ha_struct *sas_ha = phy->ha; + struct sas_internal *i = + to_sas_internal(sas_ha->core.shost->transportt); + + sas_begin_event(PHYE_SPINUP_HOLD, &phy->ha->event_lock, + &phy->phy_events_pending); + + phy->error = 0; + i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD); +} + +/* ---------- Phy class registration ---------- */ + +int sas_register_phys(struct sas_ha_struct *sas_ha) +{ + int i; + + static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = { + [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, + [PHYE_OOB_DONE] = sas_phye_oob_done, + [PHYE_OOB_ERROR] = sas_phye_oob_error, + [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, + }; + + static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = { + [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, + [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, + [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, + [PORTE_TIMER_EVENT] = sas_porte_timer_event, + [PORTE_HARD_RESET] = sas_porte_hard_reset, + }; + + /* Now register the phys. */ + for (i = 0; i < sas_ha->num_phys; i++) { + int k; + struct asd_sas_phy *phy = sas_ha->sas_phy[i]; + + phy->error = 0; + INIT_LIST_HEAD(&phy->port_phy_el); + for (k = 0; k < PORT_NUM_EVENTS; k++) + INIT_WORK(&phy->port_events[k], sas_port_event_fns[k], + phy); + + for (k = 0; k < PHY_NUM_EVENTS; k++) + INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k], + phy); + phy->port = NULL; + phy->ha = sas_ha; + spin_lock_init(&phy->frame_rcvd_lock); + spin_lock_init(&phy->sas_prim_lock); + phy->frame_rcvd_size = 0; + + phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, + i); + if (!phy->phy) + return -ENOMEM; + + phy->phy->identify.initiator_port_protocols = + phy->iproto; + phy->phy->identify.target_port_protocols = phy->tproto; + phy->phy->identify.sas_address = SAS_ADDR(sas_ha->sas_addr); + phy->phy->identify.phy_identifier = i; + phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; + phy->phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS; + phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; + + sas_phy_add(phy->phy); + } + + return 0; +} diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c new file mode 100644 index 0000000..253cdcf --- /dev/null +++ b/drivers/scsi/libsas/sas_port.c @@ -0,0 +1,279 @@ +/* + * Serial Attached SCSI (SAS) Port class + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include "sas_internal.h" + +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_sas.h> +#include "../scsi_sas_internal.h" + +/** + * sas_form_port -- add this phy to a port + * @phy: the phy of interest + * + * This function adds this phy to an existing port, thus creating a wide + * port, or it creates a port and adds the phy to the port. + */ +static void sas_form_port(struct asd_sas_phy *phy) +{ + int i; + struct sas_ha_struct *sas_ha = phy->ha; + struct asd_sas_port *port = phy->port; + struct sas_internal *si = + to_sas_internal(sas_ha->core.shost->transportt); + + if (port) { + if (memcmp(port->attached_sas_addr, phy->attached_sas_addr, + SAS_ADDR_SIZE) == 0) + sas_deform_port(phy); + else { + SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", + __FUNCTION__, phy->id, phy->port->id, + phy->port->num_phys); + return; + } + } + + /* find a port */ + spin_lock(&sas_ha->phy_port_lock); + for (i = 0; i < sas_ha->num_phys; i++) { + port = sas_ha->sas_port[i]; + spin_lock(&port->phy_list_lock); + if (*(u64 *) port->sas_addr && + memcmp(port->attached_sas_addr, + phy->attached_sas_addr, SAS_ADDR_SIZE) == 0 && + port->num_phys > 0) { + /* wide port */ + SAS_DPRINTK("phy%d matched wide port%d\n", phy->id, + port->id); + break; + } else if (*(u64 *) port->sas_addr == 0 && port->num_phys==0) { + memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE); + break; + } + spin_unlock(&port->phy_list_lock); + } + + if (i >= sas_ha->num_phys) { + printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", + __FUNCTION__); + spin_unlock(&sas_ha->phy_port_lock); + return; + } + + /* add the phy to the port */ + list_add_tail(&phy->port_phy_el, &port->phy_list); + phy->port = port; + port->num_phys++; + port->phy_mask |= (1U << phy->id); + + if (!port->phy) + port->phy = phy->phy; + + SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id, + port->id, port->phy_mask); + + if (*(u64 *)port->attached_sas_addr == 0) { + port->class = phy->class; + memcpy(port->attached_sas_addr, phy->attached_sas_addr, + SAS_ADDR_SIZE); + port->iproto = phy->iproto; + port->tproto = phy->tproto; + port->oob_mode = phy->oob_mode; + port->linkrate = phy->linkrate; + } else + port->linkrate = max(port->linkrate, phy->linkrate); + spin_unlock(&port->phy_list_lock); + spin_unlock(&sas_ha->phy_port_lock); + + if (!port->port) { + port->port = sas_port_alloc(phy->phy->dev.parent, port->id); + BUG_ON(!port->port); + sas_port_add(port->port); + } + sas_port_add_phy(port->port, phy->phy); + + if (port->port_dev) + port->port_dev->pathways = port->num_phys; + + /* Tell the LLDD about this port formation. */ + if (si->dft->lldd_port_formed) + si->dft->lldd_port_formed(phy); + + sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN); +} + +/** + * sas_deform_port -- remove this phy from the port it belongs to + * @phy: the phy of interest + * + * This is called when the physical link to the other phy has been + * lost (on this phy), in Event thread context. We cannot delay here. + */ +void sas_deform_port(struct asd_sas_phy *phy) +{ + struct sas_ha_struct *sas_ha = phy->ha; + struct asd_sas_port *port = phy->port; + struct sas_internal *si = + to_sas_internal(sas_ha->core.shost->transportt); + + if (!port) + return; /* done by a phy event */ + + if (port->port_dev) + port->port_dev->pathways--; + + if (port->num_phys == 1) { + sas_unregister_domain_devices(port); + sas_port_delete(port->port); + port->port = NULL; + } else + sas_port_delete_phy(port->port, phy->phy); + + + if (si->dft->lldd_port_deformed) + si->dft->lldd_port_deformed(phy); + + spin_lock(&sas_ha->phy_port_lock); + spin_lock(&port->phy_list_lock); + + list_del_init(&phy->port_phy_el); + phy->port = NULL; + port->num_phys--; + port->phy_mask &= ~(1U << phy->id); + + if (port->num_phys == 0) { + INIT_LIST_HEAD(&port->phy_list); + memset(port->sas_addr, 0, SAS_ADDR_SIZE); + memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE); + port->class = 0; + port->iproto = 0; + port->tproto = 0; + port->oob_mode = 0; + port->phy_mask = 0; + } + spin_unlock(&port->phy_list_lock); + spin_unlock(&sas_ha->phy_port_lock); + + return; +} + +/* ---------- SAS port events ---------- */ + +void sas_porte_bytes_dmaed(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, + &phy->port_events_pending); + + sas_form_port(phy); +} + +void sas_porte_broadcast_rcvd(void *data) +{ + unsigned long flags; + u32 prim; + struct asd_sas_phy *phy = data; + + sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, + &phy->port_events_pending); + + spin_lock_irqsave(&phy->sas_prim_lock, flags); + prim = phy->sas_prim; + spin_unlock_irqrestore(&phy->sas_prim_lock, flags); + + SAS_DPRINTK("broadcast received: %d\n", prim); + sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); +} + +void sas_porte_link_reset_err(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, + &phy->port_events_pending); + + sas_deform_port(phy); +} + +void sas_porte_timer_event(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, + &phy->port_events_pending); + + sas_deform_port(phy); +} + +void sas_porte_hard_reset(void *data) +{ + struct asd_sas_phy *phy = data; + + sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, + &phy->port_events_pending); + + sas_deform_port(phy); +} + +/* ---------- SAS port registration ---------- */ + +static void sas_init_port(struct asd_sas_port *port, + struct sas_ha_struct *sas_ha, int i) +{ + port->id = i; + INIT_LIST_HEAD(&port->dev_list); + spin_lock_init(&port->phy_list_lock); + INIT_LIST_HEAD(&port->phy_list); + port->num_phys = 0; + port->phy_mask = 0; + port->ha = sas_ha; + + spin_lock_init(&port->dev_list_lock); +} + +int sas_register_ports(struct sas_ha_struct *sas_ha) +{ + int i; + + /* initialize the ports and discovery */ + for (i = 0; i < sas_ha->num_phys; i++) { + struct asd_sas_port *port = sas_ha->sas_port[i]; + + sas_init_port(port, sas_ha, i); + sas_init_disc(&port->disc, port); + } + return 0; +} + +void sas_unregister_ports(struct sas_ha_struct *sas_ha) +{ + int i; + + for (i = 0; i < sas_ha->num_phys; i++) + if (sas_ha->sas_phy[i]->port) + sas_deform_port(sas_ha->sas_phy[i]); + +} diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c new file mode 100644 index 0000000..43e0e4e --- /dev/null +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -0,0 +1,786 @@ +/* + * Serial Attached SCSI (SAS) class SCSI Host glue. + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#include "sas_internal.h" + +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_tcq.h> +#include <scsi/scsi.h> +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_sas.h> +#include "../scsi_sas_internal.h" + +#include <linux/err.h> +#include <linux/blkdev.h> +#include <linux/scatterlist.h> + +/* ---------- SCSI Host glue ---------- */ + +#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble) +#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0) + +static void sas_scsi_task_done(struct sas_task *task) +{ + struct task_status_struct *ts = &task->task_status; + struct scsi_cmnd *sc = task->uldd_task; + unsigned ts_flags = task->task_state_flags; + int hs = 0, stat = 0; + + if (unlikely(!sc)) { + SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); + list_del_init(&task->list); + sas_free_task(task); + return; + } + + if (ts->resp == SAS_TASK_UNDELIVERED) { + /* transport error */ + hs = DID_NO_CONNECT; + } else { /* ts->resp == SAS_TASK_COMPLETE */ + /* task delivered, what happened afterwards? */ + switch (ts->stat) { + case SAS_DEV_NO_RESPONSE: + case SAS_INTERRUPTED: + case SAS_PHY_DOWN: + case SAS_NAK_R_ERR: + case SAS_OPEN_TO: + hs = DID_NO_CONNECT; + break; + case SAS_DATA_UNDERRUN: + sc->resid = ts->residual; + if (sc->request_bufflen - sc->resid < sc->underflow) + hs = DID_ERROR; + break; + case SAS_DATA_OVERRUN: + hs = DID_ERROR; + break; + case SAS_QUEUE_FULL: + hs = DID_SOFT_ERROR; /* retry */ + break; + case SAS_DEVICE_UNKNOWN: + hs = DID_BAD_TARGET; + break; + case SAS_SG_ERR: + hs = DID_PARITY; + break; + case SAS_OPEN_REJECT: + if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY) + hs = DID_SOFT_ERROR; /* retry */ + else + hs = DID_ERROR; + break; + case SAS_PROTO_RESPONSE: + SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP " + "task; please report this\n", + task->dev->port->ha->sas_ha_name); + break; + case SAS_ABORTED_TASK: + hs = DID_ABORT; + break; + case SAM_CHECK_COND: + memcpy(sc->sense_buffer, ts->buf, + max(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size)); + stat = SAM_CHECK_COND; + break; + default: + stat = ts->stat; + break; + } + } + ASSIGN_SAS_TASK(sc, NULL); + sc->result = (hs << 16) | stat; + list_del_init(&task->list); + sas_free_task(task); + /* This is very ugly but this is how SCSI Core works. */ + if (ts_flags & SAS_TASK_STATE_ABORTED) + scsi_finish_command(sc); + else + sc->scsi_done(sc); +} + +static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) +{ + enum task_attribute ta = TASK_ATTR_SIMPLE; + if (cmd->request && blk_rq_tagged(cmd->request)) { + if (cmd->device->ordered_tags && + (cmd->request->flags & REQ_HARDBARRIER)) + ta = TASK_ATTR_HOQ; + } + return ta; +} + +static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, + struct domain_device *dev, + unsigned long gfp_flags) +{ + struct sas_task *task = sas_alloc_task(gfp_flags); + struct scsi_lun lun; + + if (!task) + return NULL; + + *(u32 *)cmd->sense_buffer = 0; + task->uldd_task = cmd; + ASSIGN_SAS_TASK(cmd, task); + + task->dev = dev; + task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */ + + task->ssp_task.retry_count = 1; + int_to_scsilun(cmd->device->lun, &lun); + memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); + task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd); + memcpy(task->ssp_task.cdb, cmd->cmnd, 16); + + task->scatter = cmd->request_buffer; + task->num_scatter = cmd->use_sg; + task->total_xfer_len = cmd->request_bufflen; + task->data_dir = cmd->sc_data_direction; + + task->task_done = sas_scsi_task_done; + + return task; +} + +static int sas_queue_up(struct sas_task *task) +{ + struct sas_ha_struct *sas_ha = task->dev->port->ha; + struct scsi_core *core = &sas_ha->core; + unsigned long flags; + LIST_HEAD(list); + + spin_lock_irqsave(&core->task_queue_lock, flags); + if (sas_ha->lldd_queue_size < core->task_queue_size + 1) { + spin_unlock_irqrestore(&core->task_queue_lock, flags); + return -SAS_QUEUE_FULL; + } + list_add_tail(&task->list, &core->task_queue); + core->task_queue_size += 1; + spin_unlock_irqrestore(&core->task_queue_lock, flags); + up(&core->queue_thread_sema); + + return 0; +} + +/** + * sas_queuecommand -- Enqueue a command for processing + * @parameters: See SCSI Core documentation + * + * Note: XXX: Remove the host unlock/lock pair when SCSI Core can + * call us without holding an IRQ spinlock... + */ +int sas_queuecommand(struct scsi_cmnd *cmd, + void (*scsi_done)(struct scsi_cmnd *)) +{ + int res = 0; + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct Scsi_Host *host = cmd->device->host; + struct sas_internal *i = to_sas_internal(host->transportt); + + spin_unlock_irq(host->host_lock); + + { + struct sas_ha_struct *sas_ha = dev->port->ha; + struct sas_task *task; + + res = -ENOMEM; + task = sas_create_task(cmd, dev, GFP_ATOMIC); + if (!task) + goto out; + + cmd->scsi_done = scsi_done; + /* Queue up, Direct Mode or Task Collector Mode. */ + if (sas_ha->lldd_max_execute_num < 2) + res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); + else + res = sas_queue_up(task); + + /* Examine */ + if (res) { + SAS_DPRINTK("lldd_execute_task returned: %d\n", res); + ASSIGN_SAS_TASK(cmd, NULL); + sas_free_task(task); + if (res == -SAS_QUEUE_FULL) { + cmd->result = DID_SOFT_ERROR << 16; /* retry */ + res = 0; + scsi_done(cmd); + } + goto out; + } + } +out: + spin_lock_irq(host->host_lock); + return res; +} + +static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) +{ + struct scsi_cmnd *cmd, *n; + + list_for_each_entry_safe(cmd, n, error_q, eh_entry) { + if (cmd == my_cmd) + list_del_init(&cmd->eh_entry); + } +} + +static void sas_scsi_clear_queue_I_T(struct list_head *error_q, + struct domain_device *dev) +{ + struct scsi_cmnd *cmd, *n; + + list_for_each_entry_safe(cmd, n, error_q, eh_entry) { + struct domain_device *x = cmd_to_domain_dev(cmd); + + if (x == dev) + list_del_init(&cmd->eh_entry); + } +} + +static void sas_scsi_clear_queue_port(struct list_head *error_q, + struct asd_sas_port *port) +{ + struct scsi_cmnd *cmd, *n; + + list_for_each_entry_safe(cmd, n, error_q, eh_entry) { + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct asd_sas_port *x = dev->port; + + if (x == port) + list_del_init(&cmd->eh_entry); + } +} + +enum task_disposition { + TASK_IS_DONE, + TASK_IS_ABORTED, + TASK_IS_AT_LU, + TASK_IS_NOT_AT_LU, +}; + +static enum task_disposition sas_scsi_find_task(struct sas_task *task) +{ + struct sas_ha_struct *ha = task->dev->port->ha; + unsigned long flags; + int i, res; + struct sas_internal *si = + to_sas_internal(task->dev->port->ha->core.shost->transportt); + + if (ha->lldd_max_execute_num > 1) { + struct scsi_core *core = &ha->core; + struct sas_task *t, *n; + + spin_lock_irqsave(&core->task_queue_lock, flags); + list_for_each_entry_safe(t, n, &core->task_queue, list) { + if (task == t) { + list_del_init(&t->list); + spin_unlock_irqrestore(&core->task_queue_lock, + flags); + SAS_DPRINTK("%s: task 0x%p aborted from " + "task_queue\n", + __FUNCTION__, task); + return TASK_IS_ABORTED; + } + } + spin_unlock_irqrestore(&core->task_queue_lock, flags); + } + + for (i = 0; i < 5; i++) { + SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); + res = si->dft->lldd_abort_task(task); + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, + task); + return TASK_IS_DONE; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + if (res == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("%s: task 0x%p is aborted\n", + __FUNCTION__, task); + return TASK_IS_ABORTED; + } else if (si->dft->lldd_query_task) { + SAS_DPRINTK("%s: querying task 0x%p\n", + __FUNCTION__, task); + res = si->dft->lldd_query_task(task); + if (res == TMF_RESP_FUNC_SUCC) { + SAS_DPRINTK("%s: task 0x%p at LU\n", + __FUNCTION__, task); + return TASK_IS_AT_LU; + } else if (res == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("%s: task 0x%p not at LU\n", + __FUNCTION__, task); + return TASK_IS_NOT_AT_LU; + } + } + } + return res; +} + +static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd) +{ + int res = TMF_RESP_FUNC_FAILED; + struct scsi_lun lun; + struct sas_internal *i = + to_sas_internal(dev->port->ha->core.shost->transportt); + + int_to_scsilun(cmd->device->lun, &lun); + + SAS_DPRINTK("eh: device %llx LUN %x has the task\n", + SAS_ADDR(dev->sas_addr), + cmd->device->lun); + + if (i->dft->lldd_abort_task_set) + res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun); + + if (res == TMF_RESP_FUNC_FAILED) { + if (i->dft->lldd_clear_task_set) + res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun); + } + + if (res == TMF_RESP_FUNC_FAILED) { + if (i->dft->lldd_lu_reset) + res = i->dft->lldd_lu_reset(dev, lun.scsi_lun); + } + + return res; +} + +static int sas_recover_I_T(struct domain_device *dev) +{ + int res = TMF_RESP_FUNC_FAILED; + struct sas_internal *i = + to_sas_internal(dev->port->ha->core.shost->transportt); + + SAS_DPRINTK("I_T nexus reset for dev %016llx\n", + SAS_ADDR(dev->sas_addr)); + + if (i->dft->lldd_I_T_nexus_reset) + res = i->dft->lldd_I_T_nexus_reset(dev); + + return res; +} + +void sas_scsi_recover_host(struct Scsi_Host *shost) +{ + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + unsigned long flags; + LIST_HEAD(error_q); + struct scsi_cmnd *cmd, *n; + enum task_disposition res = TASK_IS_DONE; + int tmf_resp; + struct sas_internal *i = to_sas_internal(shost->transportt); + + spin_lock_irqsave(shost->host_lock, flags); + list_splice_init(&shost->eh_cmd_q, &error_q); + spin_unlock_irqrestore(shost->host_lock, flags); + + SAS_DPRINTK("Enter %s\n", __FUNCTION__); + + /* All tasks on this list were marked SAS_TASK_STATE_ABORTED + * by sas_scsi_timed_out() callback. + */ +Again: + SAS_DPRINTK("going over list...\n"); + list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { + struct sas_task *task = TO_SAS_TASK(cmd); + + SAS_DPRINTK("trying to find task 0x%p\n", task); + list_del_init(&cmd->eh_entry); + res = sas_scsi_find_task(task); + + cmd->eh_eflags = 0; + shost->host_failed--; + + switch (res) { + case TASK_IS_DONE: + SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, + task); + task->task_done(task); + continue; + case TASK_IS_ABORTED: + SAS_DPRINTK("%s: task 0x%p is aborted\n", + __FUNCTION__, task); + task->task_done(task); + continue; + case TASK_IS_AT_LU: + SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); + tmf_resp = sas_recover_lu(task->dev, cmd); + if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("dev %016llx LU %x is " + "recovered\n", + SAS_ADDR(task->dev), + cmd->device->lun); + task->task_done(task); + sas_scsi_clear_queue_lu(&error_q, cmd); + goto Again; + } + /* fallthrough */ + case TASK_IS_NOT_AT_LU: + SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n", + task); + tmf_resp = sas_recover_I_T(task->dev); + if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("I_T %016llx recovered\n", + SAS_ADDR(task->dev->sas_addr)); + task->task_done(task); + sas_scsi_clear_queue_I_T(&error_q, task->dev); + goto Again; + } + /* Hammer time :-) */ + if (i->dft->lldd_clear_nexus_port) { + struct asd_sas_port *port = task->dev->port; + SAS_DPRINTK("clearing nexus for port:%d\n", + port->id); + res = i->dft->lldd_clear_nexus_port(port); + if (res == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("clear nexus port:%d " + "succeeded\n", port->id); + task->task_done(task); + sas_scsi_clear_queue_port(&error_q, + port); + goto Again; + } + } + if (i->dft->lldd_clear_nexus_ha) { + SAS_DPRINTK("clear nexus ha\n"); + res = i->dft->lldd_clear_nexus_ha(ha); + if (res == TMF_RESP_FUNC_COMPLETE) { + SAS_DPRINTK("clear nexus ha " + "succeeded\n"); + task->task_done(task); + goto out; + } + } + /* If we are here -- this means that no amount + * of effort could recover from errors. Quite + * possibly the HA just disappeared. + */ + SAS_DPRINTK("error from device %llx, LUN %x " + "couldn't be recovered in any way\n", + SAS_ADDR(task->dev->sas_addr), + cmd->device->lun); + + task->task_done(task); + goto clear_q; + } + } +out: + SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); + return; +clear_q: + SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); + list_for_each_entry_safe(cmd, n, &error_q, eh_entry) { + struct sas_task *task = TO_SAS_TASK(cmd); + list_del_init(&cmd->eh_entry); + task->task_done(task); + } +} + +enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) +{ + struct sas_task *task = TO_SAS_TASK(cmd); + unsigned long flags; + + if (!task) { + SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", + cmd, task); + return EH_HANDLED; + } + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n", + cmd, task); + return EH_HANDLED; + } + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + spin_unlock_irqrestore(&task->task_state_lock, flags); + + SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n", + cmd, task); + + return EH_NOT_HANDLED; +} + +struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent); + struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); + struct domain_device *found_dev = NULL; + int i; + + spin_lock(&ha->phy_port_lock); + for (i = 0; i < ha->num_phys; i++) { + struct asd_sas_port *port = ha->sas_port[i]; + struct domain_device *dev; + + spin_lock(&port->dev_list_lock); + list_for_each_entry(dev, &port->dev_list, dev_list_node) { + if (rphy == dev->rphy) { + found_dev = dev; + spin_unlock(&port->dev_list_lock); + goto found; + } + } + spin_unlock(&port->dev_list_lock); + } + found: + spin_unlock(&ha->phy_port_lock); + + return found_dev; +} + +static inline struct domain_device *sas_find_target(struct scsi_target *starget) +{ + struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent); + + return sas_find_dev_by_rphy(rphy); +} + +int sas_target_alloc(struct scsi_target *starget) +{ + struct domain_device *found_dev = sas_find_target(starget); + + if (!found_dev) + return -ENODEV; + + starget->hostdata = found_dev; + return 0; +} + +#define SAS_DEF_QD 32 +#define SAS_MAX_QD 64 + +int sas_slave_configure(struct scsi_device *scsi_dev) +{ + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); + struct sas_ha_struct *sas_ha; + + BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); + + sas_ha = dev->port->ha; + + sas_read_port_mode_page(scsi_dev); + + if (scsi_dev->tagged_supported) { + scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG); + scsi_activate_tcq(scsi_dev, SAS_DEF_QD); + } else { + SAS_DPRINTK("device %llx, LUN %x doesn't support " + "TCQ\n", SAS_ADDR(dev->sas_addr), + scsi_dev->lun); + scsi_dev->tagged_supported = 0; + scsi_set_tag_type(scsi_dev, 0); + scsi_deactivate_tcq(scsi_dev, 1); + } + + return 0; +} + +void sas_slave_destroy(struct scsi_device *scsi_dev) +{ +} + +int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth) +{ + int res = min(new_depth, SAS_MAX_QD); + + if (scsi_dev->tagged_supported) + scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), + res); + else { + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); + sas_printk("device %llx LUN %x queue depth changed to 1\n", + SAS_ADDR(dev->sas_addr), + scsi_dev->lun); + scsi_adjust_queue_depth(scsi_dev, 0, 1); + res = 1; + } + + return res; +} + +int sas_change_queue_type(struct scsi_device *scsi_dev, int qt) +{ + if (!scsi_dev->tagged_supported) + return 0; + + scsi_deactivate_tcq(scsi_dev, 1); + + scsi_set_tag_type(scsi_dev, qt); + scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth); + + return qt; +} + +int sas_bios_param(struct scsi_device *scsi_dev, + struct block_device *bdev, + sector_t capacity, int *hsc) +{ + hsc[0] = 255; + hsc[1] = 63; + sector_div(capacity, 255*63); + hsc[2] = capacity; + + return 0; +} + +/* ---------- Task Collector Thread implementation ---------- */ + +static void sas_queue(struct sas_ha_struct *sas_ha) +{ + struct scsi_core *core = &sas_ha->core; + unsigned long flags; + LIST_HEAD(q); + int can_queue; + int res; + struct sas_internal *i = to_sas_internal(core->shost->transportt); + + spin_lock_irqsave(&core->task_queue_lock, flags); + while (!core->queue_thread_kill && + !list_empty(&core->task_queue)) { + + can_queue = sas_ha->lldd_queue_size - core->task_queue_size; + if (can_queue >= 0) { + can_queue = core->task_queue_size; + list_splice_init(&core->task_queue, &q); + } else { + struct list_head *a, *n; + + can_queue = sas_ha->lldd_queue_size; + list_for_each_safe(a, n, &core->task_queue) { + list_move_tail(a, &q); + if (--can_queue == 0) + break; + } + can_queue = sas_ha->lldd_queue_size; + } + core->task_queue_size -= can_queue; + spin_unlock_irqrestore(&core->task_queue_lock, flags); + { + struct sas_task *task = list_entry(q.next, + struct sas_task, + list); + list_del_init(&q); + res = i->dft->lldd_execute_task(task, can_queue, + GFP_KERNEL); + if (unlikely(res)) + __list_add(&q, task->list.prev, &task->list); + } + spin_lock_irqsave(&core->task_queue_lock, flags); + if (res) { + list_splice_init(&q, &core->task_queue); /*at head*/ + core->task_queue_size += can_queue; + } + } + spin_unlock_irqrestore(&core->task_queue_lock, flags); +} + +static DECLARE_COMPLETION(queue_th_comp); + +/** + * sas_queue_thread -- The Task Collector thread + * @_sas_ha: pointer to struct sas_ha + */ +static int sas_queue_thread(void *_sas_ha) +{ + struct sas_ha_struct *sas_ha = _sas_ha; + struct scsi_core *core = &sas_ha->core; + + daemonize("sas_queue_%d", core->shost->host_no); + current->flags |= PF_NOFREEZE; + + complete(&queue_th_comp); + + while (1) { + down_interruptible(&core->queue_thread_sema); + sas_queue(sas_ha); + if (core->queue_thread_kill) + break; + } + + complete(&queue_th_comp); + + return 0; +} + +int sas_init_queue(struct sas_ha_struct *sas_ha) +{ + int res; + struct scsi_core *core = &sas_ha->core; + + spin_lock_init(&core->task_queue_lock); + core->task_queue_size = 0; + INIT_LIST_HEAD(&core->task_queue); + init_MUTEX_LOCKED(&core->queue_thread_sema); + + res = kernel_thread(sas_queue_thread, sas_ha, 0); + if (res >= 0) + wait_for_completion(&queue_th_comp); + + return res < 0 ? res : 0; +} + +void sas_shutdown_queue(struct sas_ha_struct *sas_ha) +{ + unsigned long flags; + struct scsi_core *core = &sas_ha->core; + struct sas_task *task, *n; + + init_completion(&queue_th_comp); + core->queue_thread_kill = 1; + up(&core->queue_thread_sema); + wait_for_completion(&queue_th_comp); + + if (!list_empty(&core->task_queue)) + SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n", + SAS_ADDR(sas_ha->sas_addr)); + + spin_lock_irqsave(&core->task_queue_lock, flags); + list_for_each_entry_safe(task, n, &core->task_queue, list) { + struct scsi_cmnd *cmd = task->uldd_task; + + list_del_init(&task->list); + + ASSIGN_SAS_TASK(cmd, NULL); + sas_free_task(task); + cmd->result = DID_ABORT << 16; + cmd->scsi_done(cmd); + } + spin_unlock_irqrestore(&core->task_queue_lock, flags); +} + +EXPORT_SYMBOL_GPL(sas_queuecommand); +EXPORT_SYMBOL_GPL(sas_target_alloc); +EXPORT_SYMBOL_GPL(sas_slave_configure); +EXPORT_SYMBOL_GPL(sas_slave_destroy); +EXPORT_SYMBOL_GPL(sas_change_queue_depth); +EXPORT_SYMBOL_GPL(sas_change_queue_type); +EXPORT_SYMBOL_GPL(sas_bios_param); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h new file mode 100644 index 0000000..72acdab --- /dev/null +++ b/include/scsi/libsas.h @@ -0,0 +1,627 @@ +/* + * SAS host prototypes and structures header file + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#ifndef _LIBSAS_H_ +#define _LIBSAS_H_ + + +#include <linux/timer.h> +#include <linux/pci.h> +#include <scsi/sas.h> +#include <linux/list.h> +#include <asm/semaphore.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_transport_sas.h> + +struct block_device; + +enum sas_class { + SAS, + EXPANDER +}; + +enum sas_phy_role { + PHY_ROLE_NONE = 0, + PHY_ROLE_TARGET = 0x40, + PHY_ROLE_INITIATOR = 0x80, +}; + +enum sas_phy_type { + PHY_TYPE_PHYSICAL, + PHY_TYPE_VIRTUAL +}; + +/* The events are mnemonically described in sas_dump.c + * so when updating/adding events here, please also + * update the other file too. + */ +enum ha_event { + HAE_RESET = 0U, + HA_NUM_EVENTS = 1, +}; + +enum port_event { + PORTE_BYTES_DMAED = 0U, + PORTE_BROADCAST_RCVD = 1, + PORTE_LINK_RESET_ERR = 2, + PORTE_TIMER_EVENT = 3, + PORTE_HARD_RESET = 4, + PORT_NUM_EVENTS = 5, +}; + +enum phy_event { + PHYE_LOSS_OF_SIGNAL = 0U, + PHYE_OOB_DONE = 1, + PHYE_OOB_ERROR = 2, + PHYE_SPINUP_HOLD = 3, /* hot plug SATA, no COMWAKE sent */ + PHY_NUM_EVENTS = 4, +}; + +enum discover_event { + DISCE_DISCOVER_DOMAIN = 0U, + DISCE_REVALIDATE_DOMAIN = 1, + DISCE_PORT_GONE = 2, + DISC_NUM_EVENTS = 3, +}; + +/* ---------- Expander Devices ---------- */ + +#define ETASK 0xFA + +#define to_dom_device(_obj) container_of(_obj, struct domain_device, dev_obj) +#define to_dev_attr(_attr) container_of(_attr, struct domain_dev_attribute,\ + attr) + +enum routing_attribute { + DIRECT_ROUTING, + SUBTRACTIVE_ROUTING, + TABLE_ROUTING, +}; + +enum ex_phy_state { + PHY_EMPTY, + PHY_VACANT, + PHY_NOT_PRESENT, + PHY_DEVICE_DISCOVERED +}; + +struct ex_phy { + int phy_id; + + enum ex_phy_state phy_state; + + enum sas_dev_type attached_dev_type; + enum sas_phy_linkrate linkrate; + + u8 attached_sata_host:1; + u8 attached_sata_dev:1; + u8 attached_sata_ps:1; + + enum sas_proto attached_tproto; + enum sas_proto attached_iproto; + + u8 attached_sas_addr[SAS_ADDR_SIZE]; + u8 attached_phy_id; + + u8 phy_change_count; + enum routing_attribute routing_attr; + u8 virtual:1; + + int last_da_index; + + struct sas_phy *phy; + struct sas_port *port; +}; + +struct expander_device { + struct list_head children; + + u16 ex_change_count; + u16 max_route_indexes; + u8 num_phys; + u8 configuring:1; + u8 conf_route_table:1; + u8 enclosure_logical_id[8]; + + struct ex_phy *ex_phy; + struct sas_port *parent_port; +}; + +/* ---------- SATA device ---------- */ +enum ata_command_set { + ATA_COMMAND_SET = 0, + ATAPI_COMMAND_SET = 1, +}; + +struct sata_device { + enum ata_command_set command_set; + struct smp_resp rps_resp; /* report_phy_sata_resp */ + __le16 *identify_device; + __le16 *identify_packet_device; + + u8 port_no; /* port number, if this is a PM (Port) */ + struct list_head children; /* PM Ports if this is a PM */ +}; + +/* ---------- Domain device ---------- */ +struct domain_device { + enum sas_dev_type dev_type; + + enum sas_phy_linkrate linkrate; + enum sas_phy_linkrate min_linkrate; + enum sas_phy_linkrate max_linkrate; + + int pathways; + + struct domain_device *parent; + struct list_head siblings; /* devices on the same level */ + struct asd_sas_port *port; /* shortcut to root of the tree */ + + struct list_head dev_list_node; + + enum sas_proto iproto; + enum sas_proto tproto; + + struct sas_rphy *rphy; + + u8 sas_addr[SAS_ADDR_SIZE]; + u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE]; + + u8 frame_rcvd[32]; + + union { + struct expander_device ex_dev; + struct sata_device sata_dev; /* STP & directly attached */ + }; + + void *lldd_dev; +}; + +struct sas_discovery { + spinlock_t disc_event_lock; + struct work_struct disc_work[DISC_NUM_EVENTS]; + unsigned long pending; + u8 fanout_sas_addr[8]; + u8 eeds_a[8]; + u8 eeds_b[8]; + int max_level; +}; + + +/* The port struct is Class:RW, driver:RO */ +struct asd_sas_port { +/* private: */ + struct completion port_gone_completion; + + struct sas_discovery disc; + struct domain_device *port_dev; + spinlock_t dev_list_lock; + struct list_head dev_list; + enum sas_phy_linkrate linkrate; + + struct sas_phy *phy; + struct work_struct work; + +/* public: */ + int id; + + enum sas_class class; + u8 sas_addr[SAS_ADDR_SIZE]; + u8 attached_sas_addr[SAS_ADDR_SIZE]; + enum sas_proto iproto; + enum sas_proto tproto; + + enum sas_oob_mode oob_mode; + + spinlock_t phy_list_lock; + struct list_head phy_list; + int num_phys; + u32 phy_mask; + + struct sas_ha_struct *ha; + + struct sas_port *port; + + void *lldd_port; /* not touched by the sas class code */ +}; + +/* The phy pretty much is controlled by the LLDD. + * The class only reads those fields. + */ +struct asd_sas_phy { +/* private: */ + /* protected by ha->event_lock */ + struct work_struct port_events[PORT_NUM_EVENTS]; + struct work_struct phy_events[PHY_NUM_EVENTS]; + + unsigned long port_events_pending; + unsigned long phy_events_pending; + + int error; + + struct sas_phy *phy; + +/* public: */ + /* The following are class:RO, driver:R/W */ + int enabled; /* must be set */ + + int id; /* must be set */ + enum sas_class class; + enum sas_proto iproto; + enum sas_proto tproto; + + enum sas_phy_type type; + enum sas_phy_role role; + enum sas_oob_mode oob_mode; + enum sas_phy_linkrate linkrate; + + u8 *sas_addr; /* must be set */ + u8 attached_sas_addr[SAS_ADDR_SIZE]; /* class:RO, driver: R/W */ + + spinlock_t frame_rcvd_lock; + u8 *frame_rcvd; /* must be set */ + int frame_rcvd_size; + + spinlock_t sas_prim_lock; + u32 sas_prim; + + struct list_head port_phy_el; /* driver:RO */ + struct asd_sas_port *port; /* Class:RW, driver: RO */ + + struct sas_ha_struct *ha; /* may be set; the class sets it anyway */ + + void *lldd_phy; /* not touched by the sas_class_code */ +}; + +struct scsi_core { + struct Scsi_Host *shost; + + spinlock_t task_queue_lock; + struct list_head task_queue; + int task_queue_size; + + struct semaphore queue_thread_sema; + int queue_thread_kill; +}; + +struct sas_ha_struct { +/* private: */ + spinlock_t event_lock; + struct work_struct ha_events[HA_NUM_EVENTS]; + unsigned long pending; + + struct scsi_core core; + +/* public: */ + char *sas_ha_name; + struct pci_dev *pcidev; /* should be set */ + struct module *lldd_module; /* should be set */ + + u8 *sas_addr; /* must be set */ + u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE]; + + spinlock_t phy_port_lock; + struct asd_sas_phy **sas_phy; /* array of valid pointers, must be set */ + struct asd_sas_port **sas_port; /* array of valid pointers, must be set */ + int num_phys; /* must be set, gt 0, static */ + + /* The class calls this to send a task for execution. */ + int lldd_max_execute_num; + int lldd_queue_size; + + /* LLDD calls these to notify the class of an event. */ + void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event); + void (*notify_port_event)(struct asd_sas_phy *, enum port_event); + void (*notify_phy_event)(struct asd_sas_phy *, enum phy_event); + + void *lldd_ha; /* not touched by sas class code */ +}; + +#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata) + +static inline struct domain_device * +starget_to_domain_dev(struct scsi_target *starget) { + return starget->hostdata; +} + +static inline struct domain_device * +sdev_to_domain_dev(struct scsi_device *sdev) { + return starget_to_domain_dev(sdev->sdev_target); +} + +static inline struct domain_device * +cmd_to_domain_dev(struct scsi_cmnd *cmd) +{ + return sdev_to_domain_dev(cmd->device); +} + +void sas_hash_addr(u8 *hashed, const u8 *sas_addr); + +/* Before calling a notify event, LLDD should use this function + * when the link is severed (possibly from its tasklet). + * The idea is that the Class only reads those, while the LLDD, + * can R/W these (thus avoiding a race). + */ +static inline void sas_phy_disconnected(struct asd_sas_phy *phy) +{ + phy->oob_mode = OOB_NOT_CONNECTED; + phy->linkrate = PHY_LINKRATE_NONE; +} + +/* ---------- Tasks ---------- */ +/* + service_response | SAS_TASK_COMPLETE | SAS_TASK_UNDELIVERED | + exec_status | | | + ---------------------+---------------------+-----------------------+ + SAM_... | X | | + DEV_NO_RESPONSE | X | X | + INTERRUPTED | X | | + QUEUE_FULL | | X | + DEVICE_UNKNOWN | | X | + SG_ERR | | X | + ---------------------+---------------------+-----------------------+ + */ + +enum service_response { + SAS_TASK_COMPLETE, + SAS_TASK_UNDELIVERED = -1, +}; + +enum exec_status { + SAM_GOOD = 0, + SAM_CHECK_COND = 2, + SAM_COND_MET = 4, + SAM_BUSY = 8, + SAM_INTERMEDIATE = 0x10, + SAM_IM_COND_MET = 0x12, + SAM_RESV_CONFLICT= 0x14, + SAM_TASK_SET_FULL= 0x28, + SAM_ACA_ACTIVE = 0x30, + SAM_TASK_ABORTED = 0x40, + + SAS_DEV_NO_RESPONSE = 0x80, + SAS_DATA_UNDERRUN, + SAS_DATA_OVERRUN, + SAS_INTERRUPTED, + SAS_QUEUE_FULL, + SAS_DEVICE_UNKNOWN, + SAS_SG_ERR, + SAS_OPEN_REJECT, + SAS_OPEN_TO, + SAS_PROTO_RESPONSE, + SAS_PHY_DOWN, + SAS_NAK_R_ERR, + SAS_PENDING, + SAS_ABORTED_TASK, +}; + +/* When a task finishes with a response, the LLDD examines the + * response: + * - For an ATA task task_status_struct::stat is set to + * SAS_PROTO_RESPONSE, and the task_status_struct::buf is set to the + * contents of struct ata_task_resp. + * - For SSP tasks, if no data is present or status/TMF response + * is valid, task_status_struct::stat is set. If data is present + * (SENSE data), the LLDD copies up to SAS_STATUS_BUF_SIZE, sets + * task_status_struct::buf_valid_size, and task_status_struct::stat is + * set to SAM_CHECK_COND. + * + * "buf" has format SCSI Sense for SSP task, or struct ata_task_resp + * for ATA task. + * + * "frame_len" is the total frame length, which could be more or less + * than actually copied. + * + * Tasks ending with response, always set the residual field. + */ +struct ata_task_resp { + u16 frame_len; + u8 ending_fis[24]; /* dev to host or data-in */ + u32 sstatus; + u32 serror; + u32 scontrol; + u32 sactive; +}; + +#define SAS_STATUS_BUF_SIZE 96 + +struct task_status_struct { + enum service_response resp; + enum exec_status stat; + int buf_valid_size; + + u8 buf[SAS_STATUS_BUF_SIZE]; + + u32 residual; + enum sas_open_rej_reason open_rej_reason; +}; + +/* ATA and ATAPI task queuable to a SAS LLDD. + */ +struct sas_ata_task { + struct host_to_dev_fis fis; + u8 atapi_packet[16]; /* 0 if not ATAPI task */ + + u8 retry_count; /* hardware retry, should be > 0 */ + + u8 dma_xfer:1; /* PIO:0 or DMA:1 */ + u8 use_ncq:1; + u8 set_affil_pol:1; + u8 stp_affil_pol:1; + + u8 device_control_reg_update:1; +}; + +struct sas_smp_task { + struct scatterlist smp_req; + struct scatterlist smp_resp; +}; + +enum task_attribute { + TASK_ATTR_SIMPLE = 0, + TASK_ATTR_HOQ = 1, + TASK_ATTR_ORDERED= 2, + TASK_ATTR_ACA = 4, +}; + +struct sas_ssp_task { + u8 retry_count; /* hardware retry, should be > 0 */ + + u8 LUN[8]; + u8 enable_first_burst:1; + enum task_attribute task_attr; + u8 task_prio; + u8 cdb[16]; +}; + +struct sas_task { + struct domain_device *dev; + struct list_head list; + + spinlock_t task_state_lock; + unsigned task_state_flags; + + enum sas_proto task_proto; + + /* Used by the discovery code. */ + struct timer_list timer; + struct completion completion; + + union { + struct sas_ata_task ata_task; + struct sas_smp_task smp_task; + struct sas_ssp_task ssp_task; + }; + + struct scatterlist *scatter; + int num_scatter; + u32 total_xfer_len; + u8 data_dir:2; /* Use PCI_DMA_... */ + + struct task_status_struct task_status; + void (*task_done)(struct sas_task *); + + void *lldd_task; /* for use by LLDDs */ + void *uldd_task; +}; + + + +#define SAS_TASK_STATE_PENDING 1 +#define SAS_TASK_STATE_DONE 2 +#define SAS_TASK_STATE_ABORTED 4 + +static inline struct sas_task *sas_alloc_task(unsigned long flags) +{ + extern kmem_cache_t *sas_task_cache; + struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags); + + if (task) { + memset(task, 0, sizeof(*task)); + INIT_LIST_HEAD(&task->list); + spin_lock_init(&task->task_state_lock); + task->task_state_flags = SAS_TASK_STATE_PENDING; + init_timer(&task->timer); + init_completion(&task->completion); + } + + return task; +} + +static inline void sas_free_task(struct sas_task *task) +{ + if (task) { + extern kmem_cache_t *sas_task_cache; + BUG_ON(!list_empty(&task->list)); + kmem_cache_free(sas_task_cache, task); + } +} + +struct sas_domain_function_template { + /* The class calls these to notify the LLDD of an event. */ + void (*lldd_port_formed)(struct asd_sas_phy *); + void (*lldd_port_deformed)(struct asd_sas_phy *); + + /* The class calls these when a device is found or gone. */ + int (*lldd_dev_found)(struct domain_device *); + void (*lldd_dev_gone)(struct domain_device *); + + int (*lldd_execute_task)(struct sas_task *, int num, + unsigned long gfp_flags); + + /* Task Management Functions. Must be called from process context. */ + int (*lldd_abort_task)(struct sas_task *); + int (*lldd_abort_task_set)(struct domain_device *, u8 *lun); + int (*lldd_clear_aca)(struct domain_device *, u8 *lun); + int (*lldd_clear_task_set)(struct domain_device *, u8 *lun); + int (*lldd_I_T_nexus_reset)(struct domain_device *); + int (*lldd_lu_reset)(struct domain_device *, u8 *lun); + int (*lldd_query_task)(struct sas_task *); + + /* Port and Adapter management */ + int (*lldd_clear_nexus_port)(struct asd_sas_port *); + int (*lldd_clear_nexus_ha)(struct sas_ha_struct *); + + /* Phy management */ + int (*lldd_control_phy)(struct asd_sas_phy *, enum phy_func); +}; + +extern int sas_register_ha(struct sas_ha_struct *); +extern int sas_unregister_ha(struct sas_ha_struct *); + +extern int sas_queuecommand(struct scsi_cmnd *, + void (*scsi_done)(struct scsi_cmnd *)); +extern int sas_target_alloc(struct scsi_target *); +extern int sas_slave_alloc(struct scsi_device *); +extern int sas_slave_configure(struct scsi_device *); +extern void sas_slave_destroy(struct scsi_device *); +extern int sas_change_queue_depth(struct scsi_device *, int new_depth); +extern int sas_change_queue_type(struct scsi_device *, int qt); +extern int sas_bios_param(struct scsi_device *, + struct block_device *, + sector_t capacity, int *hsc); +extern struct scsi_transport_template * +sas_domain_attach_transport(struct sas_domain_function_template *); +extern void sas_domain_release_transport(struct scsi_transport_template *); + +int sas_discover_root_expander(struct domain_device *); + +void sas_init_ex_attr(void); + +int sas_ex_revalidate_domain(struct domain_device *); + +void sas_unregister_domain_devices(struct asd_sas_port *port); +void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *); +int sas_discover_event(struct asd_sas_port *, enum discover_event ev); + +int sas_discover_sata(struct domain_device *); +int sas_discover_end_dev(struct domain_device *); + +void sas_unregister_dev(struct domain_device *); + +void sas_init_dev(struct domain_device *); + +#endif /* _SASLIB_H_ */ diff --git a/include/scsi/sas.h b/include/scsi/sas.h new file mode 100644 index 0000000..752853a --- /dev/null +++ b/include/scsi/sas.h @@ -0,0 +1,644 @@ +/* + * SAS structures and definitions header file + * + * Copyright (C) 2005 Adaptec, Inc. All rights reserved. + * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + */ + +#ifndef _SAS_H_ +#define _SAS_H_ + +#include <linux/types.h> +#include <asm/byteorder.h> + +#define SAS_ADDR_SIZE 8 +#define HASHED_SAS_ADDR_SIZE 3 +#define SAS_ADDR(_sa) ((unsigned long long) be64_to_cpu(*(__be64 *)(_sa))) + +#define SMP_REQUEST 0x40 +#define SMP_RESPONSE 0x41 + +#define SSP_DATA 0x01 +#define SSP_XFER_RDY 0x05 +#define SSP_COMMAND 0x06 +#define SSP_RESPONSE 0x07 +#define SSP_TASK 0x16 + +#define SMP_REPORT_GENERAL 0x00 +#define SMP_REPORT_MANUF_INFO 0x01 +#define SMP_READ_GPIO_REG 0x02 +#define SMP_DISCOVER 0x10 +#define SMP_REPORT_PHY_ERR_LOG 0x11 +#define SMP_REPORT_PHY_SATA 0x12 +#define SMP_REPORT_ROUTE_INFO 0x13 +#define SMP_WRITE_GPIO_REG 0x82 +#define SMP_CONF_ROUTE_INFO 0x90 +#define SMP_PHY_CONTROL 0x91 +#define SMP_PHY_TEST_FUNCTION 0x92 + +#define SMP_RESP_FUNC_ACC 0x00 +#define SMP_RESP_FUNC_UNK 0x01 +#define SMP_RESP_FUNC_FAILED 0x02 +#define SMP_RESP_INV_FRM_LEN 0x03 +#define SMP_RESP_NO_PHY 0x10 +#define SMP_RESP_NO_INDEX 0x11 +#define SMP_RESP_PHY_NO_SATA 0x12 +#define SMP_RESP_PHY_UNK_OP 0x13 +#define SMP_RESP_PHY_UNK_TESTF 0x14 +#define SMP_RESP_PHY_TEST_INPROG 0x15 +#define SMP_RESP_PHY_VACANT 0x16 + +/* SAM TMFs */ +#define TMF_ABORT_TASK 0x01 +#define TMF_ABORT_TASK_SET 0x02 +#define TMF_CLEAR_TASK_SET 0x04 +#define TMF_LU_RESET 0x08 +#define TMF_CLEAR_ACA 0x40 +#define TMF_QUERY_TASK 0x80 + +/* SAS TMF responses */ +#define TMF_RESP_FUNC_COMPLETE 0x00 +#define TMF_RESP_INVALID_FRAME 0x02 +#define TMF_RESP_FUNC_ESUPP 0x04 +#define TMF_RESP_FUNC_FAILED 0x05 +#define TMF_RESP_FUNC_SUCC 0x08 +#define TMF_RESP_NO_LUN 0x09 +#define TMF_RESP_OVERLAPPED_TAG 0x0A + +enum sas_oob_mode { + OOB_NOT_CONNECTED, + SATA_OOB_MODE, + SAS_OOB_MODE +}; + +/* See sas_discover.c if you plan on changing these. + */ +enum sas_dev_type { + NO_DEVICE = 0, /* protocol */ + SAS_END_DEV = 1, /* protocol */ + EDGE_DEV = 2, /* protocol */ + FANOUT_DEV = 3, /* protocol */ + SAS_HA = 4, + SATA_DEV = 5, + SATA_PM = 7, + SATA_PM_PORT= 8, +}; + +enum sas_phy_linkrate { + PHY_LINKRATE_NONE = 0, + PHY_LINKRATE_UNKNOWN = 0, + PHY_DISABLED, + PHY_RESET_PROBLEM, + PHY_SPINUP_HOLD, + PHY_PORT_SELECTOR, + PHY_LINKRATE_1_5 = 0x08, + PHY_LINKRATE_G1 = PHY_LINKRATE_1_5, + PHY_LINKRATE_3 = 0x09, + PHY_LINKRATE_G2 = PHY_LINKRATE_3, + PHY_LINKRATE_6 = 0x0A, +}; + +/* Partly from IDENTIFY address frame. */ +enum sas_proto { + SATA_PROTO = 1, + SAS_PROTO_SMP = 2, /* protocol */ + SAS_PROTO_STP = 4, /* protocol */ + SAS_PROTO_SSP = 8, /* protocol */ + SAS_PROTO_ALL = 0xE, +}; + +/* From the spec; local phys only */ +enum phy_func { + PHY_FUNC_NOP, + PHY_FUNC_LINK_RESET, /* Enables the phy */ + PHY_FUNC_HARD_RESET, + PHY_FUNC_DISABLE, + PHY_FUNC_CLEAR_ERROR_LOG = 5, + PHY_FUNC_CLEAR_AFFIL, + PHY_FUNC_TX_SATA_PS_SIGNAL, + PHY_FUNC_RELEASE_SPINUP_HOLD = 0x10, /* LOCAL PORT ONLY! */ +}; + +/* SAS LLDD would need to report only _very_few_ of those, like BROADCAST. + * Most of those are here for completeness. + */ +enum sas_prim { + SAS_PRIM_AIP_NORMAL = 1, + SAS_PRIM_AIP_R0 = 2, + SAS_PRIM_AIP_R1 = 3, + SAS_PRIM_AIP_R2 = 4, + SAS_PRIM_AIP_WC = 5, + SAS_PRIM_AIP_WD = 6, + SAS_PRIM_AIP_WP = 7, + SAS_PRIM_AIP_RWP = 8, + + SAS_PRIM_BC_CH = 9, + SAS_PRIM_BC_RCH0 = 10, + SAS_PRIM_BC_RCH1 = 11, + SAS_PRIM_BC_R0 = 12, + SAS_PRIM_BC_R1 = 13, + SAS_PRIM_BC_R2 = 14, + SAS_PRIM_BC_R3 = 15, + SAS_PRIM_BC_R4 = 16, + + SAS_PRIM_NOTIFY_ENSP= 17, + SAS_PRIM_NOTIFY_R0 = 18, + SAS_PRIM_NOTIFY_R1 = 19, + SAS_PRIM_NOTIFY_R2 = 20, + + SAS_PRIM_CLOSE_CLAF = 21, + SAS_PRIM_CLOSE_NORM = 22, + SAS_PRIM_CLOSE_R0 = 23, + SAS_PRIM_CLOSE_R1 = 24, + + SAS_PRIM_OPEN_RTRY = 25, + SAS_PRIM_OPEN_RJCT = 26, + SAS_PRIM_OPEN_ACPT = 27, + + SAS_PRIM_DONE = 28, + SAS_PRIM_BREAK = 29, + + SATA_PRIM_DMAT = 33, + SATA_PRIM_PMNAK = 34, + SATA_PRIM_PMACK = 35, + SATA_PRIM_PMREQ_S = 36, + SATA_PRIM_PMREQ_P = 37, + SATA_SATA_R_ERR = 38, +}; + +enum sas_open_rej_reason { + /* Abandon open */ + SAS_OREJ_UNKNOWN = 0, + SAS_OREJ_BAD_DEST = 1, + SAS_OREJ_CONN_RATE = 2, + SAS_OREJ_EPROTO = 3, + SAS_OREJ_RESV_AB0 = 4, + SAS_OREJ_RESV_AB1 = 5, + SAS_OREJ_RESV_AB2 = 6, + SAS_OREJ_RESV_AB3 = 7, + SAS_OREJ_WRONG_DEST= 8, + SAS_OREJ_STP_NORES = 9, + + /* Retry open */ + SAS_OREJ_NO_DEST = 10, + SAS_OREJ_PATH_BLOCKED = 11, + SAS_OREJ_RSVD_CONT0 = 12, + SAS_OREJ_RSVD_CONT1 = 13, + SAS_OREJ_RSVD_INIT0 = 14, + SAS_OREJ_RSVD_INIT1 = 15, + SAS_OREJ_RSVD_STOP0 = 16, + SAS_OREJ_RSVD_STOP1 = 17, + SAS_OREJ_RSVD_RETRY = 18, +}; + +struct dev_to_host_fis { + u8 fis_type; /* 0x34 */ + u8 flags; + u8 status; + u8 error; + + u8 lbal; + union { u8 lbam; u8 byte_count_low; }; + union { u8 lbah; u8 byte_count_high; }; + u8 device; + + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + + union { u8 sector_count; u8 interrupt_reason; }; + u8 sector_count_exp; + u8 _r_b; + u8 _r_c; + + u32 _r_d; +} __attribute__ ((packed)); + +struct host_to_dev_fis { + u8 fis_type; /* 0x27 */ + u8 flags; + u8 command; + u8 features; + + u8 lbal; + union { u8 lbam; u8 byte_count_low; }; + union { u8 lbah; u8 byte_count_high; }; + u8 device; + + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 features_exp; + + union { u8 sector_count; u8 interrupt_reason; }; + u8 sector_count_exp; + u8 _r_a; + u8 control; + + u32 _r_b; +} __attribute__ ((packed)); + +/* Prefer to have code clarity over header file clarity. + */ +#ifdef __LITTLE_ENDIAN_BITFIELD +struct sas_identify_frame { + /* Byte 0 */ + u8 frame_type:4; + u8 dev_type:3; + u8 _un0:1; + + /* Byte 1 */ + u8 _un1; + + /* Byte 2 */ + union { + struct { + u8 _un20:1; + u8 smp_iport:1; + u8 stp_iport:1; + u8 ssp_iport:1; + u8 _un247:4; + }; + u8 initiator_bits; + }; + + /* Byte 3 */ + union { + struct { + u8 _un30:1; + u8 smp_tport:1; + u8 stp_tport:1; + u8 ssp_tport:1; + u8 _un347:4; + }; + u8 target_bits; + }; + + /* Byte 4 - 11 */ + u8 _un4_11[8]; + + /* Byte 12 - 19 */ + u8 sas_addr[SAS_ADDR_SIZE]; + + /* Byte 20 */ + u8 phy_id; + + u8 _un21_27[7]; + + __be32 crc; +} __attribute__ ((packed)); + +struct ssp_frame_hdr { + u8 frame_type; + u8 hashed_dest_addr[HASHED_SAS_ADDR_SIZE]; + u8 _r_a; + u8 hashed_src_addr[HASHED_SAS_ADDR_SIZE]; + __be16 _r_b; + + u8 changing_data_ptr:1; + u8 retransmit:1; + u8 retry_data_frames:1; + u8 _r_c:5; + + u8 num_fill_bytes:2; + u8 _r_d:6; + + u32 _r_e; + __be16 tag; + __be16 tptt; + __be32 data_offs; +} __attribute__ ((packed)); + +struct ssp_response_iu { + u8 _r_a[10]; + + u8 datapres:2; + u8 _r_b:6; + + u8 status; + + u32 _r_c; + + __be32 sense_data_len; + __be32 response_data_len; + + u8 resp_data[0]; + u8 sense_data[0]; +} __attribute__ ((packed)); + +/* ---------- SMP ---------- */ + +struct report_general_resp { + __be16 change_count; + __be16 route_indexes; + u8 _r_a; + u8 num_phys; + + u8 conf_route_table:1; + u8 configuring:1; + u8 _r_b:6; + + u8 _r_c; + + u8 enclosure_logical_id[8]; + + u8 _r_d[12]; +} __attribute__ ((packed)); + +struct discover_resp { + u8 _r_a[5]; + + u8 phy_id; + __be16 _r_b; + + u8 _r_c:4; + u8 attached_dev_type:3; + u8 _r_d:1; + + u8 linkrate:4; + u8 _r_e:4; + + u8 attached_sata_host:1; + u8 iproto:3; + u8 _r_f:4; + + u8 attached_sata_dev:1; + u8 tproto:3; + u8 _r_g:3; + u8 attached_sata_ps:1; + + u8 sas_addr[8]; + u8 attached_sas_addr[8]; + u8 attached_phy_id; + + u8 _r_h[7]; + + u8 hmin_linkrate:4; + u8 pmin_linkrate:4; + u8 hmax_linkrate:4; + u8 pmax_linkrate:4; + + u8 change_count; + + u8 pptv:4; + u8 _r_i:3; + u8 virtual:1; + + u8 routing_attr:4; + u8 _r_j:4; + + u8 conn_type; + u8 conn_el_index; + u8 conn_phy_link; + + u8 _r_k[8]; +} __attribute__ ((packed)); + +struct report_phy_sata_resp { + u8 _r_a[5]; + + u8 phy_id; + u8 _r_b; + + u8 affil_valid:1; + u8 affil_supp:1; + u8 _r_c:6; + + u32 _r_d; + + u8 stp_sas_addr[8]; + + struct dev_to_host_fis fis; + + u32 _r_e; + + u8 affil_stp_ini_addr[8]; + + __be32 crc; +} __attribute__ ((packed)); + +struct smp_resp { + u8 frame_type; + u8 function; + u8 result; + u8 reserved; + union { + struct report_general_resp rg; + struct discover_resp disc; + struct report_phy_sata_resp rps; + }; +} __attribute__ ((packed)); + +#elif defined(__BIG_ENDIAN_BITFIELD) +struct sas_identify_frame { + /* Byte 0 */ + u8 _un0:1; + u8 dev_type:3; + u8 frame_type:4; + + /* Byte 1 */ + u8 _un1; + + /* Byte 2 */ + union { + struct { + u8 _un247:4; + u8 ssp_iport:1; + u8 stp_iport:1; + u8 smp_iport:1; + u8 _un20:1; + }; + u8 initiator_bits; + }; + + /* Byte 3 */ + union { + struct { + u8 _un347:4; + u8 ssp_tport:1; + u8 stp_tport:1; + u8 smp_tport:1; + u8 _un30:1; + }; + u8 target_bits; + }; + + /* Byte 4 - 11 */ + u8 _un4_11[8]; + + /* Byte 12 - 19 */ + u8 sas_addr[SAS_ADDR_SIZE]; + + /* Byte 20 */ + u8 phy_id; + + u8 _un21_27[7]; + + __be32 crc; +} __attribute__ ((packed)); + +struct ssp_frame_hdr { + u8 frame_type; + u8 hashed_dest_addr[HASHED_SAS_ADDR_SIZE]; + u8 _r_a; + u8 hashed_src_addr[HASHED_SAS_ADDR_SIZE]; + __be16 _r_b; + + u8 _r_c:5; + u8 retry_data_frames:1; + u8 retransmit:1; + u8 changing_data_ptr:1; + + u8 _r_d:6; + u8 num_fill_bytes:2; + + u32 _r_e; + __be16 tag; + __be16 tptt; + __be32 data_offs; +} __attribute__ ((packed)); + +struct ssp_response_iu { + u8 _r_a[10]; + + u8 _r_b:6; + u8 datapres:2; + + u8 status; + + u32 _r_c; + + __be32 sense_data_len; + __be32 response_data_len; + + u8 resp_data[0]; + u8 sense_data[0]; +} __attribute__ ((packed)); + +/* ---------- SMP ---------- */ + +struct report_general_resp { + __be16 change_count; + __be16 route_indexes; + u8 _r_a; + u8 num_phys; + + u8 _r_b:6; + u8 configuring:1; + u8 conf_route_table:1; + + u8 _r_c; + + u8 enclosure_logical_id[8]; + + u8 _r_d[12]; +} __attribute__ ((packed)); + +struct discover_resp { + u8 _r_a[5]; + + u8 phy_id; + __be16 _r_b; + + u8 _r_d:1; + u8 attached_dev_type:3; + u8 _r_c:4; + + u8 _r_e:4; + u8 linkrate:4; + + u8 _r_f:4; + u8 iproto:3; + u8 attached_sata_host:1; + + u8 attached_sata_ps:1; + u8 _r_g:3; + u8 tproto:3; + u8 attached_sata_dev:1; + + u8 sas_addr[8]; + u8 attached_sas_addr[8]; + u8 attached_phy_id; + + u8 _r_h[7]; + + u8 pmin_linkrate:4; + u8 hmin_linkrate:4; + u8 pmax_linkrate:4; + u8 hmax_linkrate:4; + + u8 change_count; + + u8 virtual:1; + u8 _r_i:3; + u8 pptv:4; + + u8 _r_j:4; + u8 routing_attr:4; + + u8 conn_type; + u8 conn_el_index; + u8 conn_phy_link; + + u8 _r_k[8]; +} __attribute__ ((packed)); + +struct report_phy_sata_resp { + u8 _r_a[5]; + + u8 phy_id; + u8 _r_b; + + u8 _r_c:6; + u8 affil_supp:1; + u8 affil_valid:1; + + u32 _r_d; + + u8 stp_sas_addr[8]; + + struct dev_to_host_fis fis; + + u32 _r_e; + + u8 affil_stp_ini_addr[8]; + + __be32 crc; +} __attribute__ ((packed)); + +struct smp_resp { + u8 frame_type; + u8 function; + u8 result; + u8 reserved; + union { + struct report_general_resp rg; + struct discover_resp disc; + struct report_phy_sata_resp rps; + }; +} __attribute__ ((packed)); + +#else +#error "Bitfield order not defined!" +#endif + +#endif /* _SAS_H_ */ diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 1bc6752..84a6d5f 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -429,4 +429,10 @@ struct scsi_lun { /* Used to obtain the PCI location of a device */ #define SCSI_IOCTL_GET_PCI 0x5387 +/* Pull a u32 out of a SCSI message (using BE SCSI conventions) */ +static inline u32 scsi_to_u32(u8 *ptr) +{ + return (ptr[0]<<24) + (ptr[1]<<16) + (ptr[2]<<8) + ptr[3]; +} + #endif /* _SCSI_SCSI_H */ |