diff options
Diffstat (limited to 'drivers/block')
73 files changed, 62883 insertions, 0 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c new file mode 100644 index 0000000..f6a337c --- /dev/null +++ b/drivers/block/DAC960.c @@ -0,0 +1,7249 @@ +/* + + Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers + + Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com> + Portions Copyright 2002 by Mylex (An IBM Business Unit) + + This program is free software; you may redistribute and/or modify it under + the terms of the GNU General Public License Version 2 as published by the + Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for complete details. + +*/ + + +#define DAC960_DriverVersion "2.5.49" +#define DAC960_DriverDate "21 Aug 2007" + + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/miscdevice.h> +#include <linux/blkdev.h> +#include <linux/bio.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/genhd.h> +#include <linux/hdreg.h> +#include <linux/blkpg.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/proc_fs.h> +#include <linux/reboot.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <asm/io.h> +#include <asm/uaccess.h> +#include "DAC960.h" + +#define DAC960_GAM_MINOR 252 + + +static DAC960_Controller_T *DAC960_Controllers[DAC960_MaxControllers]; +static int DAC960_ControllerCount; +static struct proc_dir_entry *DAC960_ProcDirectoryEntry; + +static long disk_size(DAC960_Controller_T *p, int drive_nr) +{ + if (p->FirmwareType == DAC960_V1_Controller) { + if (drive_nr >= p->LogicalDriveCount) + return 0; + return p->V1.LogicalDriveInformation[drive_nr]. + LogicalDriveSize; + } else { + DAC960_V2_LogicalDeviceInfo_T *i = + p->V2.LogicalDeviceInformation[drive_nr]; + if (i == NULL) + return 0; + return i->ConfigurableDeviceSize; + } +} + +static int DAC960_open(struct block_device *bdev, fmode_t mode) +{ + struct gendisk *disk = bdev->bd_disk; + DAC960_Controller_T *p = disk->queue->queuedata; + int drive_nr = (long)disk->private_data; + + if (p->FirmwareType == DAC960_V1_Controller) { + if (p->V1.LogicalDriveInformation[drive_nr]. + LogicalDriveState == DAC960_V1_LogicalDrive_Offline) + return -ENXIO; + } else { + DAC960_V2_LogicalDeviceInfo_T *i = + p->V2.LogicalDeviceInformation[drive_nr]; + if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline) + return -ENXIO; + } + + check_disk_change(bdev); + + if (!get_capacity(p->disks[drive_nr])) + return -ENXIO; + return 0; +} + +static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct gendisk *disk = bdev->bd_disk; + DAC960_Controller_T *p = disk->queue->queuedata; + int drive_nr = (long)disk->private_data; + + if (p->FirmwareType == DAC960_V1_Controller) { + geo->heads = p->V1.GeometryTranslationHeads; + geo->sectors = p->V1.GeometryTranslationSectors; + geo->cylinders = p->V1.LogicalDriveInformation[drive_nr]. + LogicalDriveSize / (geo->heads * geo->sectors); + } else { + DAC960_V2_LogicalDeviceInfo_T *i = + p->V2.LogicalDeviceInformation[drive_nr]; + switch (i->DriveGeometry) { + case DAC960_V2_Geometry_128_32: + geo->heads = 128; + geo->sectors = 32; + break; + case DAC960_V2_Geometry_255_63: + geo->heads = 255; + geo->sectors = 63; + break; + default: + DAC960_Error("Illegal Logical Device Geometry %d\n", + p, i->DriveGeometry); + return -EINVAL; + } + + geo->cylinders = i->ConfigurableDeviceSize / + (geo->heads * geo->sectors); + } + + return 0; +} + +static int DAC960_media_changed(struct gendisk *disk) +{ + DAC960_Controller_T *p = disk->queue->queuedata; + int drive_nr = (long)disk->private_data; + + if (!p->LogicalDriveInitiallyAccessible[drive_nr]) + return 1; + return 0; +} + +static int DAC960_revalidate_disk(struct gendisk *disk) +{ + DAC960_Controller_T *p = disk->queue->queuedata; + int unit = (long)disk->private_data; + + set_capacity(disk, disk_size(p, unit)); + return 0; +} + +static struct block_device_operations DAC960_BlockDeviceOperations = { + .owner = THIS_MODULE, + .open = DAC960_open, + .getgeo = DAC960_getgeo, + .media_changed = DAC960_media_changed, + .revalidate_disk = DAC960_revalidate_disk, +}; + + +/* + DAC960_AnnounceDriver announces the Driver Version and Date, Author's Name, + Copyright Notice, and Electronic Mail Address. +*/ + +static void DAC960_AnnounceDriver(DAC960_Controller_T *Controller) +{ + DAC960_Announce("***** DAC960 RAID Driver Version " + DAC960_DriverVersion " of " + DAC960_DriverDate " *****\n", Controller); + DAC960_Announce("Copyright 1998-2001 by Leonard N. Zubkoff " + "<lnz@dandelion.com>\n", Controller); +} + + +/* + DAC960_Failure prints a standardized error message, and then returns false. +*/ + +static bool DAC960_Failure(DAC960_Controller_T *Controller, + unsigned char *ErrorMessage) +{ + DAC960_Error("While configuring DAC960 PCI RAID Controller at\n", + Controller); + if (Controller->IO_Address == 0) + DAC960_Error("PCI Bus %d Device %d Function %d I/O Address N/A " + "PCI Address 0x%X\n", Controller, + Controller->Bus, Controller->Device, + Controller->Function, Controller->PCI_Address); + else DAC960_Error("PCI Bus %d Device %d Function %d I/O Address " + "0x%X PCI Address 0x%X\n", Controller, + Controller->Bus, Controller->Device, + Controller->Function, Controller->IO_Address, + Controller->PCI_Address); + DAC960_Error("%s FAILED - DETACHING\n", Controller, ErrorMessage); + return false; +} + +/* + init_dma_loaf() and slice_dma_loaf() are helper functions for + aggregating the dma-mapped memory for a well-known collection of + data structures that are of different lengths. + + These routines don't guarantee any alignment. The caller must + include any space needed for alignment in the sizes of the structures + that are passed in. + */ + +static bool init_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf, + size_t len) +{ + void *cpu_addr; + dma_addr_t dma_handle; + + cpu_addr = pci_alloc_consistent(dev, len, &dma_handle); + if (cpu_addr == NULL) + return false; + + loaf->cpu_free = loaf->cpu_base = cpu_addr; + loaf->dma_free =loaf->dma_base = dma_handle; + loaf->length = len; + memset(cpu_addr, 0, len); + return true; +} + +static void *slice_dma_loaf(struct dma_loaf *loaf, size_t len, + dma_addr_t *dma_handle) +{ + void *cpu_end = loaf->cpu_free + len; + void *cpu_addr = loaf->cpu_free; + + BUG_ON(cpu_end > loaf->cpu_base + loaf->length); + *dma_handle = loaf->dma_free; + loaf->cpu_free = cpu_end; + loaf->dma_free += len; + return cpu_addr; +} + +static void free_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf_handle) +{ + if (loaf_handle->cpu_base != NULL) + pci_free_consistent(dev, loaf_handle->length, + loaf_handle->cpu_base, loaf_handle->dma_base); +} + + +/* + DAC960_CreateAuxiliaryStructures allocates and initializes the auxiliary + data structures for Controller. It returns true on success and false on + failure. +*/ + +static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller) +{ + int CommandAllocationLength, CommandAllocationGroupSize; + int CommandsRemaining = 0, CommandIdentifier, CommandGroupByteCount; + void *AllocationPointer = NULL; + void *ScatterGatherCPU = NULL; + dma_addr_t ScatterGatherDMA; + struct pci_pool *ScatterGatherPool; + void *RequestSenseCPU = NULL; + dma_addr_t RequestSenseDMA; + struct pci_pool *RequestSensePool = NULL; + + if (Controller->FirmwareType == DAC960_V1_Controller) + { + CommandAllocationLength = offsetof(DAC960_Command_T, V1.EndMarker); + CommandAllocationGroupSize = DAC960_V1_CommandAllocationGroupSize; + ScatterGatherPool = pci_pool_create("DAC960_V1_ScatterGather", + Controller->PCIDevice, + DAC960_V1_ScatterGatherLimit * sizeof(DAC960_V1_ScatterGatherSegment_T), + sizeof(DAC960_V1_ScatterGatherSegment_T), 0); + if (ScatterGatherPool == NULL) + return DAC960_Failure(Controller, + "AUXILIARY STRUCTURE CREATION (SG)"); + Controller->ScatterGatherPool = ScatterGatherPool; + } + else + { + CommandAllocationLength = offsetof(DAC960_Command_T, V2.EndMarker); + CommandAllocationGroupSize = DAC960_V2_CommandAllocationGroupSize; + ScatterGatherPool = pci_pool_create("DAC960_V2_ScatterGather", + Controller->PCIDevice, + DAC960_V2_ScatterGatherLimit * sizeof(DAC960_V2_ScatterGatherSegment_T), + sizeof(DAC960_V2_ScatterGatherSegment_T), 0); + if (ScatterGatherPool == NULL) + return DAC960_Failure(Controller, + "AUXILIARY STRUCTURE CREATION (SG)"); + RequestSensePool = pci_pool_create("DAC960_V2_RequestSense", + Controller->PCIDevice, sizeof(DAC960_SCSI_RequestSense_T), + sizeof(int), 0); + if (RequestSensePool == NULL) { + pci_pool_destroy(ScatterGatherPool); + return DAC960_Failure(Controller, + "AUXILIARY STRUCTURE CREATION (SG)"); + } + Controller->ScatterGatherPool = ScatterGatherPool; + Controller->V2.RequestSensePool = RequestSensePool; + } + Controller->CommandAllocationGroupSize = CommandAllocationGroupSize; + Controller->FreeCommands = NULL; + for (CommandIdentifier = 1; + CommandIdentifier <= Controller->DriverQueueDepth; + CommandIdentifier++) + { + DAC960_Command_T *Command; + if (--CommandsRemaining <= 0) + { + CommandsRemaining = + Controller->DriverQueueDepth - CommandIdentifier + 1; + if (CommandsRemaining > CommandAllocationGroupSize) + CommandsRemaining = CommandAllocationGroupSize; + CommandGroupByteCount = + CommandsRemaining * CommandAllocationLength; + AllocationPointer = kzalloc(CommandGroupByteCount, GFP_ATOMIC); + if (AllocationPointer == NULL) + return DAC960_Failure(Controller, + "AUXILIARY STRUCTURE CREATION"); + } + Command = (DAC960_Command_T *) AllocationPointer; + AllocationPointer += CommandAllocationLength; + Command->CommandIdentifier = CommandIdentifier; + Command->Controller = Controller; + Command->Next = Controller->FreeCommands; + Controller->FreeCommands = Command; + Controller->Commands[CommandIdentifier-1] = Command; + ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, GFP_ATOMIC, + &ScatterGatherDMA); + if (ScatterGatherCPU == NULL) + return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION"); + + if (RequestSensePool != NULL) { + RequestSenseCPU = pci_pool_alloc(RequestSensePool, GFP_ATOMIC, + &RequestSenseDMA); + if (RequestSenseCPU == NULL) { + pci_pool_free(ScatterGatherPool, ScatterGatherCPU, + ScatterGatherDMA); + return DAC960_Failure(Controller, + "AUXILIARY STRUCTURE CREATION"); + } + } + if (Controller->FirmwareType == DAC960_V1_Controller) { + Command->cmd_sglist = Command->V1.ScatterList; + Command->V1.ScatterGatherList = + (DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU; + Command->V1.ScatterGatherListDMA = ScatterGatherDMA; + sg_init_table(Command->cmd_sglist, DAC960_V1_ScatterGatherLimit); + } else { + Command->cmd_sglist = Command->V2.ScatterList; + Command->V2.ScatterGatherList = + (DAC960_V2_ScatterGatherSegment_T *)ScatterGatherCPU; + Command->V2.ScatterGatherListDMA = ScatterGatherDMA; + Command->V2.RequestSense = + (DAC960_SCSI_RequestSense_T *)RequestSenseCPU; + Command->V2.RequestSenseDMA = RequestSenseDMA; + sg_init_table(Command->cmd_sglist, DAC960_V2_ScatterGatherLimit); + } + } + return true; +} + + +/* + DAC960_DestroyAuxiliaryStructures deallocates the auxiliary data + structures for Controller. +*/ + +static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller) +{ + int i; + struct pci_pool *ScatterGatherPool = Controller->ScatterGatherPool; + struct pci_pool *RequestSensePool = NULL; + void *ScatterGatherCPU; + dma_addr_t ScatterGatherDMA; + void *RequestSenseCPU; + dma_addr_t RequestSenseDMA; + DAC960_Command_T *CommandGroup = NULL; + + + if (Controller->FirmwareType == DAC960_V2_Controller) + RequestSensePool = Controller->V2.RequestSensePool; + + Controller->FreeCommands = NULL; + for (i = 0; i < Controller->DriverQueueDepth; i++) + { + DAC960_Command_T *Command = Controller->Commands[i]; + + if (Command == NULL) + continue; + + if (Controller->FirmwareType == DAC960_V1_Controller) { + ScatterGatherCPU = (void *)Command->V1.ScatterGatherList; + ScatterGatherDMA = Command->V1.ScatterGatherListDMA; + RequestSenseCPU = NULL; + RequestSenseDMA = (dma_addr_t)0; + } else { + ScatterGatherCPU = (void *)Command->V2.ScatterGatherList; + ScatterGatherDMA = Command->V2.ScatterGatherListDMA; + RequestSenseCPU = (void *)Command->V2.RequestSense; + RequestSenseDMA = Command->V2.RequestSenseDMA; + } + if (ScatterGatherCPU != NULL) + pci_pool_free(ScatterGatherPool, ScatterGatherCPU, ScatterGatherDMA); + if (RequestSenseCPU != NULL) + pci_pool_free(RequestSensePool, RequestSenseCPU, RequestSenseDMA); + + if ((Command->CommandIdentifier + % Controller->CommandAllocationGroupSize) == 1) { + /* + * We can't free the group of commands until all of the + * request sense and scatter gather dma structures are free. + * Remember the beginning of the group, but don't free it + * until we've reached the beginning of the next group. + */ + kfree(CommandGroup); + CommandGroup = Command; + } + Controller->Commands[i] = NULL; + } + kfree(CommandGroup); + + if (Controller->CombinedStatusBuffer != NULL) + { + kfree(Controller->CombinedStatusBuffer); + Controller->CombinedStatusBuffer = NULL; + Controller->CurrentStatusBuffer = NULL; + } + + if (ScatterGatherPool != NULL) + pci_pool_destroy(ScatterGatherPool); + if (Controller->FirmwareType == DAC960_V1_Controller) + return; + + if (RequestSensePool != NULL) + pci_pool_destroy(RequestSensePool); + + for (i = 0; i < DAC960_MaxLogicalDrives; i++) { + kfree(Controller->V2.LogicalDeviceInformation[i]); + Controller->V2.LogicalDeviceInformation[i] = NULL; + } + + for (i = 0; i < DAC960_V2_MaxPhysicalDevices; i++) + { + kfree(Controller->V2.PhysicalDeviceInformation[i]); + Controller->V2.PhysicalDeviceInformation[i] = NULL; + kfree(Controller->V2.InquiryUnitSerialNumber[i]); + Controller->V2.InquiryUnitSerialNumber[i] = NULL; + } +} + + +/* + DAC960_V1_ClearCommand clears critical fields of Command for DAC960 V1 + Firmware Controllers. +*/ + +static inline void DAC960_V1_ClearCommand(DAC960_Command_T *Command) +{ + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + memset(CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T)); + Command->V1.CommandStatus = 0; +} + + +/* + DAC960_V2_ClearCommand clears critical fields of Command for DAC960 V2 + Firmware Controllers. +*/ + +static inline void DAC960_V2_ClearCommand(DAC960_Command_T *Command) +{ + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + memset(CommandMailbox, 0, sizeof(DAC960_V2_CommandMailbox_T)); + Command->V2.CommandStatus = 0; +} + + +/* + DAC960_AllocateCommand allocates a Command structure from Controller's + free list. During driver initialization, a special initialization command + has been placed on the free list to guarantee that command allocation can + never fail. +*/ + +static inline DAC960_Command_T *DAC960_AllocateCommand(DAC960_Controller_T + *Controller) +{ + DAC960_Command_T *Command = Controller->FreeCommands; + if (Command == NULL) return NULL; + Controller->FreeCommands = Command->Next; + Command->Next = NULL; + return Command; +} + + +/* + DAC960_DeallocateCommand deallocates Command, returning it to Controller's + free list. +*/ + +static inline void DAC960_DeallocateCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + + Command->Request = NULL; + Command->Next = Controller->FreeCommands; + Controller->FreeCommands = Command; +} + + +/* + DAC960_WaitForCommand waits for a wake_up on Controller's Command Wait Queue. +*/ + +static void DAC960_WaitForCommand(DAC960_Controller_T *Controller) +{ + spin_unlock_irq(&Controller->queue_lock); + __wait_event(Controller->CommandWaitQueue, Controller->FreeCommands); + spin_lock_irq(&Controller->queue_lock); +} + +/* + DAC960_GEM_QueueCommand queues Command for DAC960 GEM Series Controllers. +*/ + +static void DAC960_GEM_QueueCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_CommandMailbox_T *NextCommandMailbox = + Controller->V2.NextCommandMailbox; + + CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier; + DAC960_GEM_WriteCommandMailbox(NextCommandMailbox, CommandMailbox); + + if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 || + Controller->V2.PreviousCommandMailbox2->Words[0] == 0) + DAC960_GEM_MemoryMailboxNewCommand(ControllerBaseAddress); + + Controller->V2.PreviousCommandMailbox2 = + Controller->V2.PreviousCommandMailbox1; + Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox; + + if (++NextCommandMailbox > Controller->V2.LastCommandMailbox) + NextCommandMailbox = Controller->V2.FirstCommandMailbox; + + Controller->V2.NextCommandMailbox = NextCommandMailbox; +} + +/* + DAC960_BA_QueueCommand queues Command for DAC960 BA Series Controllers. +*/ + +static void DAC960_BA_QueueCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_CommandMailbox_T *NextCommandMailbox = + Controller->V2.NextCommandMailbox; + CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier; + DAC960_BA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox); + if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 || + Controller->V2.PreviousCommandMailbox2->Words[0] == 0) + DAC960_BA_MemoryMailboxNewCommand(ControllerBaseAddress); + Controller->V2.PreviousCommandMailbox2 = + Controller->V2.PreviousCommandMailbox1; + Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox; + if (++NextCommandMailbox > Controller->V2.LastCommandMailbox) + NextCommandMailbox = Controller->V2.FirstCommandMailbox; + Controller->V2.NextCommandMailbox = NextCommandMailbox; +} + + +/* + DAC960_LP_QueueCommand queues Command for DAC960 LP Series Controllers. +*/ + +static void DAC960_LP_QueueCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_CommandMailbox_T *NextCommandMailbox = + Controller->V2.NextCommandMailbox; + CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier; + DAC960_LP_WriteCommandMailbox(NextCommandMailbox, CommandMailbox); + if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 || + Controller->V2.PreviousCommandMailbox2->Words[0] == 0) + DAC960_LP_MemoryMailboxNewCommand(ControllerBaseAddress); + Controller->V2.PreviousCommandMailbox2 = + Controller->V2.PreviousCommandMailbox1; + Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox; + if (++NextCommandMailbox > Controller->V2.LastCommandMailbox) + NextCommandMailbox = Controller->V2.FirstCommandMailbox; + Controller->V2.NextCommandMailbox = NextCommandMailbox; +} + + +/* + DAC960_LA_QueueCommandDualMode queues Command for DAC960 LA Series + Controllers with Dual Mode Firmware. +*/ + +static void DAC960_LA_QueueCommandDualMode(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + DAC960_V1_CommandMailbox_T *NextCommandMailbox = + Controller->V1.NextCommandMailbox; + CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier; + DAC960_LA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox); + if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 || + Controller->V1.PreviousCommandMailbox2->Words[0] == 0) + DAC960_LA_MemoryMailboxNewCommand(ControllerBaseAddress); + Controller->V1.PreviousCommandMailbox2 = + Controller->V1.PreviousCommandMailbox1; + Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox; + if (++NextCommandMailbox > Controller->V1.LastCommandMailbox) + NextCommandMailbox = Controller->V1.FirstCommandMailbox; + Controller->V1.NextCommandMailbox = NextCommandMailbox; +} + + +/* + DAC960_LA_QueueCommandSingleMode queues Command for DAC960 LA Series + Controllers with Single Mode Firmware. +*/ + +static void DAC960_LA_QueueCommandSingleMode(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + DAC960_V1_CommandMailbox_T *NextCommandMailbox = + Controller->V1.NextCommandMailbox; + CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier; + DAC960_LA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox); + if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 || + Controller->V1.PreviousCommandMailbox2->Words[0] == 0) + DAC960_LA_HardwareMailboxNewCommand(ControllerBaseAddress); + Controller->V1.PreviousCommandMailbox2 = + Controller->V1.PreviousCommandMailbox1; + Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox; + if (++NextCommandMailbox > Controller->V1.LastCommandMailbox) + NextCommandMailbox = Controller->V1.FirstCommandMailbox; + Controller->V1.NextCommandMailbox = NextCommandMailbox; +} + + +/* + DAC960_PG_QueueCommandDualMode queues Command for DAC960 PG Series + Controllers with Dual Mode Firmware. +*/ + +static void DAC960_PG_QueueCommandDualMode(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + DAC960_V1_CommandMailbox_T *NextCommandMailbox = + Controller->V1.NextCommandMailbox; + CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier; + DAC960_PG_WriteCommandMailbox(NextCommandMailbox, CommandMailbox); + if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 || + Controller->V1.PreviousCommandMailbox2->Words[0] == 0) + DAC960_PG_MemoryMailboxNewCommand(ControllerBaseAddress); + Controller->V1.PreviousCommandMailbox2 = + Controller->V1.PreviousCommandMailbox1; + Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox; + if (++NextCommandMailbox > Controller->V1.LastCommandMailbox) + NextCommandMailbox = Controller->V1.FirstCommandMailbox; + Controller->V1.NextCommandMailbox = NextCommandMailbox; +} + + +/* + DAC960_PG_QueueCommandSingleMode queues Command for DAC960 PG Series + Controllers with Single Mode Firmware. +*/ + +static void DAC960_PG_QueueCommandSingleMode(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + DAC960_V1_CommandMailbox_T *NextCommandMailbox = + Controller->V1.NextCommandMailbox; + CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier; + DAC960_PG_WriteCommandMailbox(NextCommandMailbox, CommandMailbox); + if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 || + Controller->V1.PreviousCommandMailbox2->Words[0] == 0) + DAC960_PG_HardwareMailboxNewCommand(ControllerBaseAddress); + Controller->V1.PreviousCommandMailbox2 = + Controller->V1.PreviousCommandMailbox1; + Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox; + if (++NextCommandMailbox > Controller->V1.LastCommandMailbox) + NextCommandMailbox = Controller->V1.FirstCommandMailbox; + Controller->V1.NextCommandMailbox = NextCommandMailbox; +} + + +/* + DAC960_PD_QueueCommand queues Command for DAC960 PD Series Controllers. +*/ + +static void DAC960_PD_QueueCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier; + while (DAC960_PD_MailboxFullP(ControllerBaseAddress)) + udelay(1); + DAC960_PD_WriteCommandMailbox(ControllerBaseAddress, CommandMailbox); + DAC960_PD_NewCommand(ControllerBaseAddress); +} + + +/* + DAC960_P_QueueCommand queues Command for DAC960 P Series Controllers. +*/ + +static void DAC960_P_QueueCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier; + switch (CommandMailbox->Common.CommandOpcode) + { + case DAC960_V1_Enquiry: + CommandMailbox->Common.CommandOpcode = DAC960_V1_Enquiry_Old; + break; + case DAC960_V1_GetDeviceState: + CommandMailbox->Common.CommandOpcode = DAC960_V1_GetDeviceState_Old; + break; + case DAC960_V1_Read: + CommandMailbox->Common.CommandOpcode = DAC960_V1_Read_Old; + DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox); + break; + case DAC960_V1_Write: + CommandMailbox->Common.CommandOpcode = DAC960_V1_Write_Old; + DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox); + break; + case DAC960_V1_ReadWithScatterGather: + CommandMailbox->Common.CommandOpcode = + DAC960_V1_ReadWithScatterGather_Old; + DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox); + break; + case DAC960_V1_WriteWithScatterGather: + CommandMailbox->Common.CommandOpcode = + DAC960_V1_WriteWithScatterGather_Old; + DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox); + break; + default: + break; + } + while (DAC960_PD_MailboxFullP(ControllerBaseAddress)) + udelay(1); + DAC960_PD_WriteCommandMailbox(ControllerBaseAddress, CommandMailbox); + DAC960_PD_NewCommand(ControllerBaseAddress); +} + + +/* + DAC960_ExecuteCommand executes Command and waits for completion. +*/ + +static void DAC960_ExecuteCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + DECLARE_COMPLETION_ONSTACK(Completion); + unsigned long flags; + Command->Completion = &Completion; + + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_QueueCommand(Command); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + + if (in_interrupt()) + return; + wait_for_completion(&Completion); +} + + +/* + DAC960_V1_ExecuteType3 executes a DAC960 V1 Firmware Controller Type 3 + Command and waits for completion. It returns true on success and false + on failure. +*/ + +static bool DAC960_V1_ExecuteType3(DAC960_Controller_T *Controller, + DAC960_V1_CommandOpcode_T CommandOpcode, + dma_addr_t DataDMA) +{ + DAC960_Command_T *Command = DAC960_AllocateCommand(Controller); + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + DAC960_V1_CommandStatus_T CommandStatus; + DAC960_V1_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox->Type3.CommandOpcode = CommandOpcode; + CommandMailbox->Type3.BusAddress = DataDMA; + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V1.CommandStatus; + DAC960_DeallocateCommand(Command); + return (CommandStatus == DAC960_V1_NormalCompletion); +} + + +/* + DAC960_V1_ExecuteTypeB executes a DAC960 V1 Firmware Controller Type 3B + Command and waits for completion. It returns true on success and false + on failure. +*/ + +static bool DAC960_V1_ExecuteType3B(DAC960_Controller_T *Controller, + DAC960_V1_CommandOpcode_T CommandOpcode, + unsigned char CommandOpcode2, + dma_addr_t DataDMA) +{ + DAC960_Command_T *Command = DAC960_AllocateCommand(Controller); + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + DAC960_V1_CommandStatus_T CommandStatus; + DAC960_V1_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox->Type3B.CommandOpcode = CommandOpcode; + CommandMailbox->Type3B.CommandOpcode2 = CommandOpcode2; + CommandMailbox->Type3B.BusAddress = DataDMA; + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V1.CommandStatus; + DAC960_DeallocateCommand(Command); + return (CommandStatus == DAC960_V1_NormalCompletion); +} + + +/* + DAC960_V1_ExecuteType3D executes a DAC960 V1 Firmware Controller Type 3D + Command and waits for completion. It returns true on success and false + on failure. +*/ + +static bool DAC960_V1_ExecuteType3D(DAC960_Controller_T *Controller, + DAC960_V1_CommandOpcode_T CommandOpcode, + unsigned char Channel, + unsigned char TargetID, + dma_addr_t DataDMA) +{ + DAC960_Command_T *Command = DAC960_AllocateCommand(Controller); + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + DAC960_V1_CommandStatus_T CommandStatus; + DAC960_V1_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox->Type3D.CommandOpcode = CommandOpcode; + CommandMailbox->Type3D.Channel = Channel; + CommandMailbox->Type3D.TargetID = TargetID; + CommandMailbox->Type3D.BusAddress = DataDMA; + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V1.CommandStatus; + DAC960_DeallocateCommand(Command); + return (CommandStatus == DAC960_V1_NormalCompletion); +} + + +/* + DAC960_V2_GeneralInfo executes a DAC960 V2 Firmware General Information + Reading IOCTL Command and waits for completion. It returns true on success + and false on failure. + + Return data in The controller's HealthStatusBuffer, which is dma-able memory +*/ + +static bool DAC960_V2_GeneralInfo(DAC960_Controller_T *Controller) +{ + DAC960_Command_T *Command = DAC960_AllocateCommand(Controller); + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_CommandStatus_T CommandStatus; + DAC960_V2_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox->Common.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->Common.CommandControlBits + .DataTransferControllerToHost = true; + CommandMailbox->Common.CommandControlBits + .NoAutoRequestSense = true; + CommandMailbox->Common.DataTransferSize = sizeof(DAC960_V2_HealthStatusBuffer_T); + CommandMailbox->Common.IOCTL_Opcode = DAC960_V2_GetHealthStatus; + CommandMailbox->Common.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.HealthStatusBufferDMA; + CommandMailbox->Common.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->Common.DataTransferSize; + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V2.CommandStatus; + DAC960_DeallocateCommand(Command); + return (CommandStatus == DAC960_V2_NormalCompletion); +} + + +/* + DAC960_V2_ControllerInfo executes a DAC960 V2 Firmware Controller + Information Reading IOCTL Command and waits for completion. It returns + true on success and false on failure. + + Data is returned in the controller's V2.NewControllerInformation dma-able + memory buffer. +*/ + +static bool DAC960_V2_NewControllerInfo(DAC960_Controller_T *Controller) +{ + DAC960_Command_T *Command = DAC960_AllocateCommand(Controller); + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_CommandStatus_T CommandStatus; + DAC960_V2_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->ControllerInfo.CommandControlBits + .DataTransferControllerToHost = true; + CommandMailbox->ControllerInfo.CommandControlBits + .NoAutoRequestSense = true; + CommandMailbox->ControllerInfo.DataTransferSize = sizeof(DAC960_V2_ControllerInfo_T); + CommandMailbox->ControllerInfo.ControllerNumber = 0; + CommandMailbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo; + CommandMailbox->ControllerInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.NewControllerInformationDMA; + CommandMailbox->ControllerInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->ControllerInfo.DataTransferSize; + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V2.CommandStatus; + DAC960_DeallocateCommand(Command); + return (CommandStatus == DAC960_V2_NormalCompletion); +} + + +/* + DAC960_V2_LogicalDeviceInfo executes a DAC960 V2 Firmware Controller Logical + Device Information Reading IOCTL Command and waits for completion. It + returns true on success and false on failure. + + Data is returned in the controller's V2.NewLogicalDeviceInformation +*/ + +static bool DAC960_V2_NewLogicalDeviceInfo(DAC960_Controller_T *Controller, + unsigned short LogicalDeviceNumber) +{ + DAC960_Command_T *Command = DAC960_AllocateCommand(Controller); + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_CommandStatus_T CommandStatus; + + DAC960_V2_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox->LogicalDeviceInfo.CommandOpcode = + DAC960_V2_IOCTL; + CommandMailbox->LogicalDeviceInfo.CommandControlBits + .DataTransferControllerToHost = true; + CommandMailbox->LogicalDeviceInfo.CommandControlBits + .NoAutoRequestSense = true; + CommandMailbox->LogicalDeviceInfo.DataTransferSize = + sizeof(DAC960_V2_LogicalDeviceInfo_T); + CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber = + LogicalDeviceNumber; + CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode = DAC960_V2_GetLogicalDeviceInfoValid; + CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.NewLogicalDeviceInformationDMA; + CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->LogicalDeviceInfo.DataTransferSize; + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V2.CommandStatus; + DAC960_DeallocateCommand(Command); + return (CommandStatus == DAC960_V2_NormalCompletion); +} + + +/* + DAC960_V2_PhysicalDeviceInfo executes a DAC960 V2 Firmware Controller "Read + Physical Device Information" IOCTL Command and waits for completion. It + returns true on success and false on failure. + + The Channel, TargetID, LogicalUnit arguments should be 0 the first time + this function is called for a given controller. This will return data + for the "first" device on that controller. The returned data includes a + Channel, TargetID, LogicalUnit that can be passed in to this routine to + get data for the NEXT device on that controller. + + Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able + memory buffer. + +*/ + +static bool DAC960_V2_NewPhysicalDeviceInfo(DAC960_Controller_T *Controller, + unsigned char Channel, + unsigned char TargetID, + unsigned char LogicalUnit) +{ + DAC960_Command_T *Command = DAC960_AllocateCommand(Controller); + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_CommandStatus_T CommandStatus; + + DAC960_V2_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->PhysicalDeviceInfo.CommandControlBits + .DataTransferControllerToHost = true; + CommandMailbox->PhysicalDeviceInfo.CommandControlBits + .NoAutoRequestSense = true; + CommandMailbox->PhysicalDeviceInfo.DataTransferSize = + sizeof(DAC960_V2_PhysicalDeviceInfo_T); + CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit = LogicalUnit; + CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID; + CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel; + CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode = + DAC960_V2_GetPhysicalDeviceInfoValid; + CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.NewPhysicalDeviceInformationDMA; + CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->PhysicalDeviceInfo.DataTransferSize; + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V2.CommandStatus; + DAC960_DeallocateCommand(Command); + return (CommandStatus == DAC960_V2_NormalCompletion); +} + + +static void DAC960_V2_ConstructNewUnitSerialNumber( + DAC960_Controller_T *Controller, + DAC960_V2_CommandMailbox_T *CommandMailbox, int Channel, int TargetID, + int LogicalUnit) +{ + CommandMailbox->SCSI_10.CommandOpcode = DAC960_V2_SCSI_10_Passthru; + CommandMailbox->SCSI_10.CommandControlBits + .DataTransferControllerToHost = true; + CommandMailbox->SCSI_10.CommandControlBits + .NoAutoRequestSense = true; + CommandMailbox->SCSI_10.DataTransferSize = + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T); + CommandMailbox->SCSI_10.PhysicalDevice.LogicalUnit = LogicalUnit; + CommandMailbox->SCSI_10.PhysicalDevice.TargetID = TargetID; + CommandMailbox->SCSI_10.PhysicalDevice.Channel = Channel; + CommandMailbox->SCSI_10.CDBLength = 6; + CommandMailbox->SCSI_10.SCSI_CDB[0] = 0x12; /* INQUIRY */ + CommandMailbox->SCSI_10.SCSI_CDB[1] = 1; /* EVPD = 1 */ + CommandMailbox->SCSI_10.SCSI_CDB[2] = 0x80; /* Page Code */ + CommandMailbox->SCSI_10.SCSI_CDB[3] = 0; /* Reserved */ + CommandMailbox->SCSI_10.SCSI_CDB[4] = + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T); + CommandMailbox->SCSI_10.SCSI_CDB[5] = 0; /* Control */ + CommandMailbox->SCSI_10.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.NewInquiryUnitSerialNumberDMA; + CommandMailbox->SCSI_10.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->SCSI_10.DataTransferSize; +} + + +/* + DAC960_V2_NewUnitSerialNumber executes an SCSI pass-through + Inquiry command to a SCSI device identified by Channel number, + Target id, Logical Unit Number. This function Waits for completion + of the command. + + The return data includes Unit Serial Number information for the + specified device. + + Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able + memory buffer. +*/ + +static bool DAC960_V2_NewInquiryUnitSerialNumber(DAC960_Controller_T *Controller, + int Channel, int TargetID, int LogicalUnit) +{ + DAC960_Command_T *Command; + DAC960_V2_CommandMailbox_T *CommandMailbox; + DAC960_V2_CommandStatus_T CommandStatus; + + Command = DAC960_AllocateCommand(Controller); + CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + + DAC960_V2_ConstructNewUnitSerialNumber(Controller, CommandMailbox, + Channel, TargetID, LogicalUnit); + + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V2.CommandStatus; + DAC960_DeallocateCommand(Command); + return (CommandStatus == DAC960_V2_NormalCompletion); +} + + +/* + DAC960_V2_DeviceOperation executes a DAC960 V2 Firmware Controller Device + Operation IOCTL Command and waits for completion. It returns true on + success and false on failure. +*/ + +static bool DAC960_V2_DeviceOperation(DAC960_Controller_T *Controller, + DAC960_V2_IOCTL_Opcode_T IOCTL_Opcode, + DAC960_V2_OperationDevice_T + OperationDevice) +{ + DAC960_Command_T *Command = DAC960_AllocateCommand(Controller); + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_CommandStatus_T CommandStatus; + DAC960_V2_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox->DeviceOperation.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->DeviceOperation.CommandControlBits + .DataTransferControllerToHost = true; + CommandMailbox->DeviceOperation.CommandControlBits + .NoAutoRequestSense = true; + CommandMailbox->DeviceOperation.IOCTL_Opcode = IOCTL_Opcode; + CommandMailbox->DeviceOperation.OperationDevice = OperationDevice; + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V2.CommandStatus; + DAC960_DeallocateCommand(Command); + return (CommandStatus == DAC960_V2_NormalCompletion); +} + + +/* + DAC960_V1_EnableMemoryMailboxInterface enables the Memory Mailbox Interface + for DAC960 V1 Firmware Controllers. + + PD and P controller types have no memory mailbox, but still need the + other dma mapped memory. +*/ + +static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T + *Controller) +{ + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_HardwareType_T hw_type = Controller->HardwareType; + struct pci_dev *PCI_Device = Controller->PCIDevice; + struct dma_loaf *DmaPages = &Controller->DmaPages; + size_t DmaPagesSize; + size_t CommandMailboxesSize; + size_t StatusMailboxesSize; + + DAC960_V1_CommandMailbox_T *CommandMailboxesMemory; + dma_addr_t CommandMailboxesMemoryDMA; + + DAC960_V1_StatusMailbox_T *StatusMailboxesMemory; + dma_addr_t StatusMailboxesMemoryDMA; + + DAC960_V1_CommandMailbox_T CommandMailbox; + DAC960_V1_CommandStatus_T CommandStatus; + int TimeoutCounter; + int i; + + + if (pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK)) + return DAC960_Failure(Controller, "DMA mask out of range"); + Controller->BounceBufferLimit = DMA_32BIT_MASK; + + if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) { + CommandMailboxesSize = 0; + StatusMailboxesSize = 0; + } else { + CommandMailboxesSize = DAC960_V1_CommandMailboxCount * sizeof(DAC960_V1_CommandMailbox_T); + StatusMailboxesSize = DAC960_V1_StatusMailboxCount * sizeof(DAC960_V1_StatusMailbox_T); + } + DmaPagesSize = CommandMailboxesSize + StatusMailboxesSize + + sizeof(DAC960_V1_DCDB_T) + sizeof(DAC960_V1_Enquiry_T) + + sizeof(DAC960_V1_ErrorTable_T) + sizeof(DAC960_V1_EventLogEntry_T) + + sizeof(DAC960_V1_RebuildProgress_T) + + sizeof(DAC960_V1_LogicalDriveInformationArray_T) + + sizeof(DAC960_V1_BackgroundInitializationStatus_T) + + sizeof(DAC960_V1_DeviceState_T) + sizeof(DAC960_SCSI_Inquiry_T) + + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T); + + if (!init_dma_loaf(PCI_Device, DmaPages, DmaPagesSize)) + return false; + + + if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) + goto skip_mailboxes; + + CommandMailboxesMemory = slice_dma_loaf(DmaPages, + CommandMailboxesSize, &CommandMailboxesMemoryDMA); + + /* These are the base addresses for the command memory mailbox array */ + Controller->V1.FirstCommandMailbox = CommandMailboxesMemory; + Controller->V1.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA; + + CommandMailboxesMemory += DAC960_V1_CommandMailboxCount - 1; + Controller->V1.LastCommandMailbox = CommandMailboxesMemory; + Controller->V1.NextCommandMailbox = Controller->V1.FirstCommandMailbox; + Controller->V1.PreviousCommandMailbox1 = Controller->V1.LastCommandMailbox; + Controller->V1.PreviousCommandMailbox2 = + Controller->V1.LastCommandMailbox - 1; + + /* These are the base addresses for the status memory mailbox array */ + StatusMailboxesMemory = slice_dma_loaf(DmaPages, + StatusMailboxesSize, &StatusMailboxesMemoryDMA); + + Controller->V1.FirstStatusMailbox = StatusMailboxesMemory; + Controller->V1.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA; + StatusMailboxesMemory += DAC960_V1_StatusMailboxCount - 1; + Controller->V1.LastStatusMailbox = StatusMailboxesMemory; + Controller->V1.NextStatusMailbox = Controller->V1.FirstStatusMailbox; + +skip_mailboxes: + Controller->V1.MonitoringDCDB = slice_dma_loaf(DmaPages, + sizeof(DAC960_V1_DCDB_T), + &Controller->V1.MonitoringDCDB_DMA); + + Controller->V1.NewEnquiry = slice_dma_loaf(DmaPages, + sizeof(DAC960_V1_Enquiry_T), + &Controller->V1.NewEnquiryDMA); + + Controller->V1.NewErrorTable = slice_dma_loaf(DmaPages, + sizeof(DAC960_V1_ErrorTable_T), + &Controller->V1.NewErrorTableDMA); + + Controller->V1.EventLogEntry = slice_dma_loaf(DmaPages, + sizeof(DAC960_V1_EventLogEntry_T), + &Controller->V1.EventLogEntryDMA); + + Controller->V1.RebuildProgress = slice_dma_loaf(DmaPages, + sizeof(DAC960_V1_RebuildProgress_T), + &Controller->V1.RebuildProgressDMA); + + Controller->V1.NewLogicalDriveInformation = slice_dma_loaf(DmaPages, + sizeof(DAC960_V1_LogicalDriveInformationArray_T), + &Controller->V1.NewLogicalDriveInformationDMA); + + Controller->V1.BackgroundInitializationStatus = slice_dma_loaf(DmaPages, + sizeof(DAC960_V1_BackgroundInitializationStatus_T), + &Controller->V1.BackgroundInitializationStatusDMA); + + Controller->V1.NewDeviceState = slice_dma_loaf(DmaPages, + sizeof(DAC960_V1_DeviceState_T), + &Controller->V1.NewDeviceStateDMA); + + Controller->V1.NewInquiryStandardData = slice_dma_loaf(DmaPages, + sizeof(DAC960_SCSI_Inquiry_T), + &Controller->V1.NewInquiryStandardDataDMA); + + Controller->V1.NewInquiryUnitSerialNumber = slice_dma_loaf(DmaPages, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), + &Controller->V1.NewInquiryUnitSerialNumberDMA); + + if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) + return true; + + /* Enable the Memory Mailbox Interface. */ + Controller->V1.DualModeMemoryMailboxInterface = true; + CommandMailbox.TypeX.CommandOpcode = 0x2B; + CommandMailbox.TypeX.CommandIdentifier = 0; + CommandMailbox.TypeX.CommandOpcode2 = 0x14; + CommandMailbox.TypeX.CommandMailboxesBusAddress = + Controller->V1.FirstCommandMailboxDMA; + CommandMailbox.TypeX.StatusMailboxesBusAddress = + Controller->V1.FirstStatusMailboxDMA; +#define TIMEOUT_COUNT 1000000 + + for (i = 0; i < 2; i++) + switch (Controller->HardwareType) + { + case DAC960_LA_Controller: + TimeoutCounter = TIMEOUT_COUNT; + while (--TimeoutCounter >= 0) + { + if (!DAC960_LA_HardwareMailboxFullP(ControllerBaseAddress)) + break; + udelay(10); + } + if (TimeoutCounter < 0) return false; + DAC960_LA_WriteHardwareMailbox(ControllerBaseAddress, &CommandMailbox); + DAC960_LA_HardwareMailboxNewCommand(ControllerBaseAddress); + TimeoutCounter = TIMEOUT_COUNT; + while (--TimeoutCounter >= 0) + { + if (DAC960_LA_HardwareMailboxStatusAvailableP( + ControllerBaseAddress)) + break; + udelay(10); + } + if (TimeoutCounter < 0) return false; + CommandStatus = DAC960_LA_ReadStatusRegister(ControllerBaseAddress); + DAC960_LA_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress); + DAC960_LA_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress); + if (CommandStatus == DAC960_V1_NormalCompletion) return true; + Controller->V1.DualModeMemoryMailboxInterface = false; + CommandMailbox.TypeX.CommandOpcode2 = 0x10; + break; + case DAC960_PG_Controller: + TimeoutCounter = TIMEOUT_COUNT; + while (--TimeoutCounter >= 0) + { + if (!DAC960_PG_HardwareMailboxFullP(ControllerBaseAddress)) + break; + udelay(10); + } + if (TimeoutCounter < 0) return false; + DAC960_PG_WriteHardwareMailbox(ControllerBaseAddress, &CommandMailbox); + DAC960_PG_HardwareMailboxNewCommand(ControllerBaseAddress); + + TimeoutCounter = TIMEOUT_COUNT; + while (--TimeoutCounter >= 0) + { + if (DAC960_PG_HardwareMailboxStatusAvailableP( + ControllerBaseAddress)) + break; + udelay(10); + } + if (TimeoutCounter < 0) return false; + CommandStatus = DAC960_PG_ReadStatusRegister(ControllerBaseAddress); + DAC960_PG_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress); + DAC960_PG_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress); + if (CommandStatus == DAC960_V1_NormalCompletion) return true; + Controller->V1.DualModeMemoryMailboxInterface = false; + CommandMailbox.TypeX.CommandOpcode2 = 0x10; + break; + default: + DAC960_Failure(Controller, "Unknown Controller Type\n"); + break; + } + return false; +} + + +/* + DAC960_V2_EnableMemoryMailboxInterface enables the Memory Mailbox Interface + for DAC960 V2 Firmware Controllers. + + Aggregate the space needed for the controller's memory mailbox and + the other data structures that will be targets of dma transfers with + the controller. Allocate a dma-mapped region of memory to hold these + structures. Then, save CPU pointers and dma_addr_t values to reference + the structures that are contained in that region. +*/ + +static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T + *Controller) +{ + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + struct pci_dev *PCI_Device = Controller->PCIDevice; + struct dma_loaf *DmaPages = &Controller->DmaPages; + size_t DmaPagesSize; + size_t CommandMailboxesSize; + size_t StatusMailboxesSize; + + DAC960_V2_CommandMailbox_T *CommandMailboxesMemory; + dma_addr_t CommandMailboxesMemoryDMA; + + DAC960_V2_StatusMailbox_T *StatusMailboxesMemory; + dma_addr_t StatusMailboxesMemoryDMA; + + DAC960_V2_CommandMailbox_T *CommandMailbox; + dma_addr_t CommandMailboxDMA; + DAC960_V2_CommandStatus_T CommandStatus; + + if (!pci_set_dma_mask(Controller->PCIDevice, DMA_64BIT_MASK)) + Controller->BounceBufferLimit = DMA_64BIT_MASK; + else if (!pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK)) + Controller->BounceBufferLimit = DMA_32BIT_MASK; + else + return DAC960_Failure(Controller, "DMA mask out of range"); + + /* This is a temporary dma mapping, used only in the scope of this function */ + CommandMailbox = pci_alloc_consistent(PCI_Device, + sizeof(DAC960_V2_CommandMailbox_T), &CommandMailboxDMA); + if (CommandMailbox == NULL) + return false; + + CommandMailboxesSize = DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T); + StatusMailboxesSize = DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T); + DmaPagesSize = + CommandMailboxesSize + StatusMailboxesSize + + sizeof(DAC960_V2_HealthStatusBuffer_T) + + sizeof(DAC960_V2_ControllerInfo_T) + + sizeof(DAC960_V2_LogicalDeviceInfo_T) + + sizeof(DAC960_V2_PhysicalDeviceInfo_T) + + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T) + + sizeof(DAC960_V2_Event_T) + + sizeof(DAC960_V2_PhysicalToLogicalDevice_T); + + if (!init_dma_loaf(PCI_Device, DmaPages, DmaPagesSize)) { + pci_free_consistent(PCI_Device, sizeof(DAC960_V2_CommandMailbox_T), + CommandMailbox, CommandMailboxDMA); + return false; + } + + CommandMailboxesMemory = slice_dma_loaf(DmaPages, + CommandMailboxesSize, &CommandMailboxesMemoryDMA); + + /* These are the base addresses for the command memory mailbox array */ + Controller->V2.FirstCommandMailbox = CommandMailboxesMemory; + Controller->V2.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA; + + CommandMailboxesMemory += DAC960_V2_CommandMailboxCount - 1; + Controller->V2.LastCommandMailbox = CommandMailboxesMemory; + Controller->V2.NextCommandMailbox = Controller->V2.FirstCommandMailbox; + Controller->V2.PreviousCommandMailbox1 = Controller->V2.LastCommandMailbox; + Controller->V2.PreviousCommandMailbox2 = + Controller->V2.LastCommandMailbox - 1; + + /* These are the base addresses for the status memory mailbox array */ + StatusMailboxesMemory = slice_dma_loaf(DmaPages, + StatusMailboxesSize, &StatusMailboxesMemoryDMA); + + Controller->V2.FirstStatusMailbox = StatusMailboxesMemory; + Controller->V2.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA; + StatusMailboxesMemory += DAC960_V2_StatusMailboxCount - 1; + Controller->V2.LastStatusMailbox = StatusMailboxesMemory; + Controller->V2.NextStatusMailbox = Controller->V2.FirstStatusMailbox; + + Controller->V2.HealthStatusBuffer = slice_dma_loaf(DmaPages, + sizeof(DAC960_V2_HealthStatusBuffer_T), + &Controller->V2.HealthStatusBufferDMA); + + Controller->V2.NewControllerInformation = slice_dma_loaf(DmaPages, + sizeof(DAC960_V2_ControllerInfo_T), + &Controller->V2.NewControllerInformationDMA); + + Controller->V2.NewLogicalDeviceInformation = slice_dma_loaf(DmaPages, + sizeof(DAC960_V2_LogicalDeviceInfo_T), + &Controller->V2.NewLogicalDeviceInformationDMA); + + Controller->V2.NewPhysicalDeviceInformation = slice_dma_loaf(DmaPages, + sizeof(DAC960_V2_PhysicalDeviceInfo_T), + &Controller->V2.NewPhysicalDeviceInformationDMA); + + Controller->V2.NewInquiryUnitSerialNumber = slice_dma_loaf(DmaPages, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), + &Controller->V2.NewInquiryUnitSerialNumberDMA); + + Controller->V2.Event = slice_dma_loaf(DmaPages, + sizeof(DAC960_V2_Event_T), + &Controller->V2.EventDMA); + + Controller->V2.PhysicalToLogicalDevice = slice_dma_loaf(DmaPages, + sizeof(DAC960_V2_PhysicalToLogicalDevice_T), + &Controller->V2.PhysicalToLogicalDeviceDMA); + + /* + Enable the Memory Mailbox Interface. + + I don't know why we can't just use one of the memory mailboxes + we just allocated to do this, instead of using this temporary one. + Try this change later. + */ + memset(CommandMailbox, 0, sizeof(DAC960_V2_CommandMailbox_T)); + CommandMailbox->SetMemoryMailbox.CommandIdentifier = 1; + CommandMailbox->SetMemoryMailbox.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->SetMemoryMailbox.CommandControlBits.NoAutoRequestSense = true; + CommandMailbox->SetMemoryMailbox.FirstCommandMailboxSizeKB = + (DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T)) >> 10; + CommandMailbox->SetMemoryMailbox.FirstStatusMailboxSizeKB = + (DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T)) >> 10; + CommandMailbox->SetMemoryMailbox.SecondCommandMailboxSizeKB = 0; + CommandMailbox->SetMemoryMailbox.SecondStatusMailboxSizeKB = 0; + CommandMailbox->SetMemoryMailbox.RequestSenseSize = 0; + CommandMailbox->SetMemoryMailbox.IOCTL_Opcode = DAC960_V2_SetMemoryMailbox; + CommandMailbox->SetMemoryMailbox.HealthStatusBufferSizeKB = 1; + CommandMailbox->SetMemoryMailbox.HealthStatusBufferBusAddress = + Controller->V2.HealthStatusBufferDMA; + CommandMailbox->SetMemoryMailbox.FirstCommandMailboxBusAddress = + Controller->V2.FirstCommandMailboxDMA; + CommandMailbox->SetMemoryMailbox.FirstStatusMailboxBusAddress = + Controller->V2.FirstStatusMailboxDMA; + switch (Controller->HardwareType) + { + case DAC960_GEM_Controller: + while (DAC960_GEM_HardwareMailboxFullP(ControllerBaseAddress)) + udelay(1); + DAC960_GEM_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA); + DAC960_GEM_HardwareMailboxNewCommand(ControllerBaseAddress); + while (!DAC960_GEM_HardwareMailboxStatusAvailableP(ControllerBaseAddress)) + udelay(1); + CommandStatus = DAC960_GEM_ReadCommandStatus(ControllerBaseAddress); + DAC960_GEM_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress); + DAC960_GEM_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress); + break; + case DAC960_BA_Controller: + while (DAC960_BA_HardwareMailboxFullP(ControllerBaseAddress)) + udelay(1); + DAC960_BA_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA); + DAC960_BA_HardwareMailboxNewCommand(ControllerBaseAddress); + while (!DAC960_BA_HardwareMailboxStatusAvailableP(ControllerBaseAddress)) + udelay(1); + CommandStatus = DAC960_BA_ReadCommandStatus(ControllerBaseAddress); + DAC960_BA_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress); + DAC960_BA_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress); + break; + case DAC960_LP_Controller: + while (DAC960_LP_HardwareMailboxFullP(ControllerBaseAddress)) + udelay(1); + DAC960_LP_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA); + DAC960_LP_HardwareMailboxNewCommand(ControllerBaseAddress); + while (!DAC960_LP_HardwareMailboxStatusAvailableP(ControllerBaseAddress)) + udelay(1); + CommandStatus = DAC960_LP_ReadCommandStatus(ControllerBaseAddress); + DAC960_LP_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress); + DAC960_LP_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress); + break; + default: + DAC960_Failure(Controller, "Unknown Controller Type\n"); + CommandStatus = DAC960_V2_AbormalCompletion; + break; + } + pci_free_consistent(PCI_Device, sizeof(DAC960_V2_CommandMailbox_T), + CommandMailbox, CommandMailboxDMA); + return (CommandStatus == DAC960_V2_NormalCompletion); +} + + +/* + DAC960_V1_ReadControllerConfiguration reads the Configuration Information + from DAC960 V1 Firmware Controllers and initializes the Controller structure. +*/ + +static bool DAC960_V1_ReadControllerConfiguration(DAC960_Controller_T + *Controller) +{ + DAC960_V1_Enquiry2_T *Enquiry2; + dma_addr_t Enquiry2DMA; + DAC960_V1_Config2_T *Config2; + dma_addr_t Config2DMA; + int LogicalDriveNumber, Channel, TargetID; + struct dma_loaf local_dma; + + if (!init_dma_loaf(Controller->PCIDevice, &local_dma, + sizeof(DAC960_V1_Enquiry2_T) + sizeof(DAC960_V1_Config2_T))) + return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION"); + + Enquiry2 = slice_dma_loaf(&local_dma, sizeof(DAC960_V1_Enquiry2_T), &Enquiry2DMA); + Config2 = slice_dma_loaf(&local_dma, sizeof(DAC960_V1_Config2_T), &Config2DMA); + + if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_Enquiry, + Controller->V1.NewEnquiryDMA)) { + free_dma_loaf(Controller->PCIDevice, &local_dma); + return DAC960_Failure(Controller, "ENQUIRY"); + } + memcpy(&Controller->V1.Enquiry, Controller->V1.NewEnquiry, + sizeof(DAC960_V1_Enquiry_T)); + + if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_Enquiry2, Enquiry2DMA)) { + free_dma_loaf(Controller->PCIDevice, &local_dma); + return DAC960_Failure(Controller, "ENQUIRY2"); + } + + if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_ReadConfig2, Config2DMA)) { + free_dma_loaf(Controller->PCIDevice, &local_dma); + return DAC960_Failure(Controller, "READ CONFIG2"); + } + + if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_GetLogicalDriveInformation, + Controller->V1.NewLogicalDriveInformationDMA)) { + free_dma_loaf(Controller->PCIDevice, &local_dma); + return DAC960_Failure(Controller, "GET LOGICAL DRIVE INFORMATION"); + } + memcpy(&Controller->V1.LogicalDriveInformation, + Controller->V1.NewLogicalDriveInformation, + sizeof(DAC960_V1_LogicalDriveInformationArray_T)); + + for (Channel = 0; Channel < Enquiry2->ActualChannels; Channel++) + for (TargetID = 0; TargetID < Enquiry2->MaxTargets; TargetID++) { + if (!DAC960_V1_ExecuteType3D(Controller, DAC960_V1_GetDeviceState, + Channel, TargetID, + Controller->V1.NewDeviceStateDMA)) { + free_dma_loaf(Controller->PCIDevice, &local_dma); + return DAC960_Failure(Controller, "GET DEVICE STATE"); + } + memcpy(&Controller->V1.DeviceState[Channel][TargetID], + Controller->V1.NewDeviceState, sizeof(DAC960_V1_DeviceState_T)); + } + /* + Initialize the Controller Model Name and Full Model Name fields. + */ + switch (Enquiry2->HardwareID.SubModel) + { + case DAC960_V1_P_PD_PU: + if (Enquiry2->SCSICapability.BusSpeed == DAC960_V1_Ultra) + strcpy(Controller->ModelName, "DAC960PU"); + else strcpy(Controller->ModelName, "DAC960PD"); + break; + case DAC960_V1_PL: + strcpy(Controller->ModelName, "DAC960PL"); + break; + case DAC960_V1_PG: + strcpy(Controller->ModelName, "DAC960PG"); + break; + case DAC960_V1_PJ: + strcpy(Controller->ModelName, "DAC960PJ"); + break; + case DAC960_V1_PR: + strcpy(Controller->ModelName, "DAC960PR"); + break; + case DAC960_V1_PT: + strcpy(Controller->ModelName, "DAC960PT"); + break; + case DAC960_V1_PTL0: + strcpy(Controller->ModelName, "DAC960PTL0"); + break; + case DAC960_V1_PRL: + strcpy(Controller->ModelName, "DAC960PRL"); + break; + case DAC960_V1_PTL1: + strcpy(Controller->ModelName, "DAC960PTL1"); + break; + case DAC960_V1_1164P: + strcpy(Controller->ModelName, "DAC1164P"); + break; + default: + free_dma_loaf(Controller->PCIDevice, &local_dma); + return DAC960_Failure(Controller, "MODEL VERIFICATION"); + } + strcpy(Controller->FullModelName, "Mylex "); + strcat(Controller->FullModelName, Controller->ModelName); + /* + Initialize the Controller Firmware Version field and verify that it + is a supported firmware version. The supported firmware versions are: + + DAC1164P 5.06 and above + DAC960PTL/PRL/PJ/PG 4.06 and above + DAC960PU/PD/PL 3.51 and above + DAC960PU/PD/PL/P 2.73 and above + */ +#if defined(CONFIG_ALPHA) + /* + DEC Alpha machines were often equipped with DAC960 cards that were + OEMed from Mylex, and had their own custom firmware. Version 2.70, + the last custom FW revision to be released by DEC for these older + controllers, appears to work quite well with this driver. + + Cards tested successfully were several versions each of the PD and + PU, called by DEC the KZPSC and KZPAC, respectively, and having + the Manufacturer Numbers (from Mylex), usually on a sticker on the + back of the board, of: + + KZPSC: D040347 (1-channel) or D040348 (2-channel) or D040349 (3-channel) + KZPAC: D040395 (1-channel) or D040396 (2-channel) or D040397 (3-channel) + */ +# define FIRMWARE_27X "2.70" +#else +# define FIRMWARE_27X "2.73" +#endif + + if (Enquiry2->FirmwareID.MajorVersion == 0) + { + Enquiry2->FirmwareID.MajorVersion = + Controller->V1.Enquiry.MajorFirmwareVersion; + Enquiry2->FirmwareID.MinorVersion = + Controller->V1.Enquiry.MinorFirmwareVersion; + Enquiry2->FirmwareID.FirmwareType = '0'; + Enquiry2->FirmwareID.TurnID = 0; + } + sprintf(Controller->FirmwareVersion, "%d.%02d-%c-%02d", + Enquiry2->FirmwareID.MajorVersion, Enquiry2->FirmwareID.MinorVersion, + Enquiry2->FirmwareID.FirmwareType, Enquiry2->FirmwareID.TurnID); + if (!((Controller->FirmwareVersion[0] == '5' && + strcmp(Controller->FirmwareVersion, "5.06") >= 0) || + (Controller->FirmwareVersion[0] == '4' && + strcmp(Controller->FirmwareVersion, "4.06") >= 0) || + (Controller->FirmwareVersion[0] == '3' && + strcmp(Controller->FirmwareVersion, "3.51") >= 0) || + (Controller->FirmwareVersion[0] == '2' && + strcmp(Controller->FirmwareVersion, FIRMWARE_27X) >= 0))) + { + DAC960_Failure(Controller, "FIRMWARE VERSION VERIFICATION"); + DAC960_Error("Firmware Version = '%s'\n", Controller, + Controller->FirmwareVersion); + free_dma_loaf(Controller->PCIDevice, &local_dma); + return false; + } + /* + Initialize the Controller Channels, Targets, Memory Size, and SAF-TE + Enclosure Management Enabled fields. + */ + Controller->Channels = Enquiry2->ActualChannels; + Controller->Targets = Enquiry2->MaxTargets; + Controller->MemorySize = Enquiry2->MemorySize >> 20; + Controller->V1.SAFTE_EnclosureManagementEnabled = + (Enquiry2->FaultManagementType == DAC960_V1_SAFTE); + /* + Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive + Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and + Driver Scatter/Gather Limit. The Driver Queue Depth must be at most one + less than the Controller Queue Depth to allow for an automatic drive + rebuild operation. + */ + Controller->ControllerQueueDepth = Controller->V1.Enquiry.MaxCommands; + Controller->DriverQueueDepth = Controller->ControllerQueueDepth - 1; + if (Controller->DriverQueueDepth > DAC960_MaxDriverQueueDepth) + Controller->DriverQueueDepth = DAC960_MaxDriverQueueDepth; + Controller->LogicalDriveCount = + Controller->V1.Enquiry.NumberOfLogicalDrives; + Controller->MaxBlocksPerCommand = Enquiry2->MaxBlocksPerCommand; + Controller->ControllerScatterGatherLimit = Enquiry2->MaxScatterGatherEntries; + Controller->DriverScatterGatherLimit = + Controller->ControllerScatterGatherLimit; + if (Controller->DriverScatterGatherLimit > DAC960_V1_ScatterGatherLimit) + Controller->DriverScatterGatherLimit = DAC960_V1_ScatterGatherLimit; + /* + Initialize the Stripe Size, Segment Size, and Geometry Translation. + */ + Controller->V1.StripeSize = Config2->BlocksPerStripe * Config2->BlockFactor + >> (10 - DAC960_BlockSizeBits); + Controller->V1.SegmentSize = Config2->BlocksPerCacheLine * Config2->BlockFactor + >> (10 - DAC960_BlockSizeBits); + switch (Config2->DriveGeometry) + { + case DAC960_V1_Geometry_128_32: + Controller->V1.GeometryTranslationHeads = 128; + Controller->V1.GeometryTranslationSectors = 32; + break; + case DAC960_V1_Geometry_255_63: + Controller->V1.GeometryTranslationHeads = 255; + Controller->V1.GeometryTranslationSectors = 63; + break; + default: + free_dma_loaf(Controller->PCIDevice, &local_dma); + return DAC960_Failure(Controller, "CONFIG2 DRIVE GEOMETRY"); + } + /* + Initialize the Background Initialization Status. + */ + if ((Controller->FirmwareVersion[0] == '4' && + strcmp(Controller->FirmwareVersion, "4.08") >= 0) || + (Controller->FirmwareVersion[0] == '5' && + strcmp(Controller->FirmwareVersion, "5.08") >= 0)) + { + Controller->V1.BackgroundInitializationStatusSupported = true; + DAC960_V1_ExecuteType3B(Controller, + DAC960_V1_BackgroundInitializationControl, 0x20, + Controller-> + V1.BackgroundInitializationStatusDMA); + memcpy(&Controller->V1.LastBackgroundInitializationStatus, + Controller->V1.BackgroundInitializationStatus, + sizeof(DAC960_V1_BackgroundInitializationStatus_T)); + } + /* + Initialize the Logical Drive Initially Accessible flag. + */ + for (LogicalDriveNumber = 0; + LogicalDriveNumber < Controller->LogicalDriveCount; + LogicalDriveNumber++) + if (Controller->V1.LogicalDriveInformation + [LogicalDriveNumber].LogicalDriveState != + DAC960_V1_LogicalDrive_Offline) + Controller->LogicalDriveInitiallyAccessible[LogicalDriveNumber] = true; + Controller->V1.LastRebuildStatus = DAC960_V1_NoRebuildOrCheckInProgress; + free_dma_loaf(Controller->PCIDevice, &local_dma); + return true; +} + + +/* + DAC960_V2_ReadControllerConfiguration reads the Configuration Information + from DAC960 V2 Firmware Controllers and initializes the Controller structure. +*/ + +static bool DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T + *Controller) +{ + DAC960_V2_ControllerInfo_T *ControllerInfo = + &Controller->V2.ControllerInformation; + unsigned short LogicalDeviceNumber = 0; + int ModelNameLength; + + /* Get data into dma-able area, then copy into permanant location */ + if (!DAC960_V2_NewControllerInfo(Controller)) + return DAC960_Failure(Controller, "GET CONTROLLER INFO"); + memcpy(ControllerInfo, Controller->V2.NewControllerInformation, + sizeof(DAC960_V2_ControllerInfo_T)); + + + if (!DAC960_V2_GeneralInfo(Controller)) + return DAC960_Failure(Controller, "GET HEALTH STATUS"); + + /* + Initialize the Controller Model Name and Full Model Name fields. + */ + ModelNameLength = sizeof(ControllerInfo->ControllerName); + if (ModelNameLength > sizeof(Controller->ModelName)-1) + ModelNameLength = sizeof(Controller->ModelName)-1; + memcpy(Controller->ModelName, ControllerInfo->ControllerName, + ModelNameLength); + ModelNameLength--; + while (Controller->ModelName[ModelNameLength] == ' ' || + Controller->ModelName[ModelNameLength] == '\0') + ModelNameLength--; + Controller->ModelName[++ModelNameLength] = '\0'; + strcpy(Controller->FullModelName, "Mylex "); + strcat(Controller->FullModelName, Controller->ModelName); + /* + Initialize the Controller Firmware Version field. + */ + sprintf(Controller->FirmwareVersion, "%d.%02d-%02d", + ControllerInfo->FirmwareMajorVersion, + ControllerInfo->FirmwareMinorVersion, + ControllerInfo->FirmwareTurnNumber); + if (ControllerInfo->FirmwareMajorVersion == 6 && + ControllerInfo->FirmwareMinorVersion == 0 && + ControllerInfo->FirmwareTurnNumber < 1) + { + DAC960_Info("FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n", + Controller, Controller->FirmwareVersion); + DAC960_Info("STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n", + Controller); + DAC960_Info("PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n", + Controller); + } + /* + Initialize the Controller Channels, Targets, and Memory Size. + */ + Controller->Channels = ControllerInfo->NumberOfPhysicalChannelsPresent; + Controller->Targets = + ControllerInfo->MaximumTargetsPerChannel + [ControllerInfo->NumberOfPhysicalChannelsPresent-1]; + Controller->MemorySize = ControllerInfo->MemorySizeMB; + /* + Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive + Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and + Driver Scatter/Gather Limit. The Driver Queue Depth must be at most one + less than the Controller Queue Depth to allow for an automatic drive + rebuild operation. + */ + Controller->ControllerQueueDepth = ControllerInfo->MaximumParallelCommands; + Controller->DriverQueueDepth = Controller->ControllerQueueDepth - 1; + if (Controller->DriverQueueDepth > DAC960_MaxDriverQueueDepth) + Controller->DriverQueueDepth = DAC960_MaxDriverQueueDepth; + Controller->LogicalDriveCount = ControllerInfo->LogicalDevicesPresent; + Controller->MaxBlocksPerCommand = + ControllerInfo->MaximumDataTransferSizeInBlocks; + Controller->ControllerScatterGatherLimit = + ControllerInfo->MaximumScatterGatherEntries; + Controller->DriverScatterGatherLimit = + Controller->ControllerScatterGatherLimit; + if (Controller->DriverScatterGatherLimit > DAC960_V2_ScatterGatherLimit) + Controller->DriverScatterGatherLimit = DAC960_V2_ScatterGatherLimit; + /* + Initialize the Logical Device Information. + */ + while (true) + { + DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo = + Controller->V2.NewLogicalDeviceInformation; + DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo; + DAC960_V2_PhysicalDevice_T PhysicalDevice; + + if (!DAC960_V2_NewLogicalDeviceInfo(Controller, LogicalDeviceNumber)) + break; + LogicalDeviceNumber = NewLogicalDeviceInfo->LogicalDeviceNumber; + if (LogicalDeviceNumber >= DAC960_MaxLogicalDrives) { + DAC960_Error("DAC960: Logical Drive Number %d not supported\n", + Controller, LogicalDeviceNumber); + break; + } + if (NewLogicalDeviceInfo->DeviceBlockSizeInBytes != DAC960_BlockSize) { + DAC960_Error("DAC960: Logical Drive Block Size %d not supported\n", + Controller, NewLogicalDeviceInfo->DeviceBlockSizeInBytes); + LogicalDeviceNumber++; + continue; + } + PhysicalDevice.Controller = 0; + PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel; + PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID; + PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit; + Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] = + PhysicalDevice; + if (NewLogicalDeviceInfo->LogicalDeviceState != + DAC960_V2_LogicalDevice_Offline) + Controller->LogicalDriveInitiallyAccessible[LogicalDeviceNumber] = true; + LogicalDeviceInfo = kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T), + GFP_ATOMIC); + if (LogicalDeviceInfo == NULL) + return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION"); + Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] = + LogicalDeviceInfo; + memcpy(LogicalDeviceInfo, NewLogicalDeviceInfo, + sizeof(DAC960_V2_LogicalDeviceInfo_T)); + LogicalDeviceNumber++; + } + return true; +} + + +/* + DAC960_ReportControllerConfiguration reports the Configuration Information + for Controller. +*/ + +static bool DAC960_ReportControllerConfiguration(DAC960_Controller_T + *Controller) +{ + DAC960_Info("Configuring Mylex %s PCI RAID Controller\n", + Controller, Controller->ModelName); + DAC960_Info(" Firmware Version: %s, Channels: %d, Memory Size: %dMB\n", + Controller, Controller->FirmwareVersion, + Controller->Channels, Controller->MemorySize); + DAC960_Info(" PCI Bus: %d, Device: %d, Function: %d, I/O Address: ", + Controller, Controller->Bus, + Controller->Device, Controller->Function); + if (Controller->IO_Address == 0) + DAC960_Info("Unassigned\n", Controller); + else DAC960_Info("0x%X\n", Controller, Controller->IO_Address); + DAC960_Info(" PCI Address: 0x%X mapped at 0x%lX, IRQ Channel: %d\n", + Controller, Controller->PCI_Address, + (unsigned long) Controller->BaseAddress, + Controller->IRQ_Channel); + DAC960_Info(" Controller Queue Depth: %d, " + "Maximum Blocks per Command: %d\n", + Controller, Controller->ControllerQueueDepth, + Controller->MaxBlocksPerCommand); + DAC960_Info(" Driver Queue Depth: %d, " + "Scatter/Gather Limit: %d of %d Segments\n", + Controller, Controller->DriverQueueDepth, + Controller->DriverScatterGatherLimit, + Controller->ControllerScatterGatherLimit); + if (Controller->FirmwareType == DAC960_V1_Controller) + { + DAC960_Info(" Stripe Size: %dKB, Segment Size: %dKB, " + "BIOS Geometry: %d/%d\n", Controller, + Controller->V1.StripeSize, + Controller->V1.SegmentSize, + Controller->V1.GeometryTranslationHeads, + Controller->V1.GeometryTranslationSectors); + if (Controller->V1.SAFTE_EnclosureManagementEnabled) + DAC960_Info(" SAF-TE Enclosure Management Enabled\n", Controller); + } + return true; +} + + +/* + DAC960_V1_ReadDeviceConfiguration reads the Device Configuration Information + for DAC960 V1 Firmware Controllers by requesting the SCSI Inquiry and SCSI + Inquiry Unit Serial Number information for each device connected to + Controller. +*/ + +static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T + *Controller) +{ + struct dma_loaf local_dma; + + dma_addr_t DCDBs_dma[DAC960_V1_MaxChannels]; + DAC960_V1_DCDB_T *DCDBs_cpu[DAC960_V1_MaxChannels]; + + dma_addr_t SCSI_Inquiry_dma[DAC960_V1_MaxChannels]; + DAC960_SCSI_Inquiry_T *SCSI_Inquiry_cpu[DAC960_V1_MaxChannels]; + + dma_addr_t SCSI_NewInquiryUnitSerialNumberDMA[DAC960_V1_MaxChannels]; + DAC960_SCSI_Inquiry_UnitSerialNumber_T *SCSI_NewInquiryUnitSerialNumberCPU[DAC960_V1_MaxChannels]; + + struct completion Completions[DAC960_V1_MaxChannels]; + unsigned long flags; + int Channel, TargetID; + + if (!init_dma_loaf(Controller->PCIDevice, &local_dma, + DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) + + sizeof(DAC960_SCSI_Inquiry_T) + + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T)))) + return DAC960_Failure(Controller, + "DMA ALLOCATION FAILED IN ReadDeviceConfiguration"); + + for (Channel = 0; Channel < Controller->Channels; Channel++) { + DCDBs_cpu[Channel] = slice_dma_loaf(&local_dma, + sizeof(DAC960_V1_DCDB_T), DCDBs_dma + Channel); + SCSI_Inquiry_cpu[Channel] = slice_dma_loaf(&local_dma, + sizeof(DAC960_SCSI_Inquiry_T), + SCSI_Inquiry_dma + Channel); + SCSI_NewInquiryUnitSerialNumberCPU[Channel] = slice_dma_loaf(&local_dma, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), + SCSI_NewInquiryUnitSerialNumberDMA + Channel); + } + + for (TargetID = 0; TargetID < Controller->Targets; TargetID++) + { + /* + * For each channel, submit a probe for a device on that channel. + * The timeout interval for a device that is present is 10 seconds. + * With this approach, the timeout periods can elapse in parallel + * on each channel. + */ + for (Channel = 0; Channel < Controller->Channels; Channel++) + { + dma_addr_t NewInquiryStandardDataDMA = SCSI_Inquiry_dma[Channel]; + DAC960_V1_DCDB_T *DCDB = DCDBs_cpu[Channel]; + dma_addr_t DCDB_dma = DCDBs_dma[Channel]; + DAC960_Command_T *Command = Controller->Commands[Channel]; + struct completion *Completion = &Completions[Channel]; + + init_completion(Completion); + DAC960_V1_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + Command->Completion = Completion; + Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB; + Command->V1.CommandMailbox.Type3.BusAddress = DCDB_dma; + DCDB->Channel = Channel; + DCDB->TargetID = TargetID; + DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem; + DCDB->EarlyStatus = false; + DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds; + DCDB->NoAutomaticRequestSense = false; + DCDB->DisconnectPermitted = true; + DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_T); + DCDB->BusAddress = NewInquiryStandardDataDMA; + DCDB->CDBLength = 6; + DCDB->TransferLengthHigh4 = 0; + DCDB->SenseLength = sizeof(DCDB->SenseData); + DCDB->CDB[0] = 0x12; /* INQUIRY */ + DCDB->CDB[1] = 0; /* EVPD = 0 */ + DCDB->CDB[2] = 0; /* Page Code */ + DCDB->CDB[3] = 0; /* Reserved */ + DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_T); + DCDB->CDB[5] = 0; /* Control */ + + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_QueueCommand(Command); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + } + /* + * Wait for the problems submitted in the previous loop + * to complete. On the probes that are successful, + * get the serial number of the device that was found. + */ + for (Channel = 0; Channel < Controller->Channels; Channel++) + { + DAC960_SCSI_Inquiry_T *InquiryStandardData = + &Controller->V1.InquiryStandardData[Channel][TargetID]; + DAC960_SCSI_Inquiry_T *NewInquiryStandardData = SCSI_Inquiry_cpu[Channel]; + dma_addr_t NewInquiryUnitSerialNumberDMA = + SCSI_NewInquiryUnitSerialNumberDMA[Channel]; + DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber = + SCSI_NewInquiryUnitSerialNumberCPU[Channel]; + DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber = + &Controller->V1.InquiryUnitSerialNumber[Channel][TargetID]; + DAC960_Command_T *Command = Controller->Commands[Channel]; + DAC960_V1_DCDB_T *DCDB = DCDBs_cpu[Channel]; + struct completion *Completion = &Completions[Channel]; + + wait_for_completion(Completion); + + if (Command->V1.CommandStatus != DAC960_V1_NormalCompletion) { + memset(InquiryStandardData, 0, sizeof(DAC960_SCSI_Inquiry_T)); + InquiryStandardData->PeripheralDeviceType = 0x1F; + continue; + } else + memcpy(InquiryStandardData, NewInquiryStandardData, sizeof(DAC960_SCSI_Inquiry_T)); + + /* Preserve Channel and TargetID values from the previous loop */ + Command->Completion = Completion; + DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T); + DCDB->BusAddress = NewInquiryUnitSerialNumberDMA; + DCDB->SenseLength = sizeof(DCDB->SenseData); + DCDB->CDB[0] = 0x12; /* INQUIRY */ + DCDB->CDB[1] = 1; /* EVPD = 1 */ + DCDB->CDB[2] = 0x80; /* Page Code */ + DCDB->CDB[3] = 0; /* Reserved */ + DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T); + DCDB->CDB[5] = 0; /* Control */ + + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_QueueCommand(Command); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + wait_for_completion(Completion); + + if (Command->V1.CommandStatus != DAC960_V1_NormalCompletion) { + memset(InquiryUnitSerialNumber, 0, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T)); + InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F; + } else + memcpy(InquiryUnitSerialNumber, NewInquiryUnitSerialNumber, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T)); + } + } + free_dma_loaf(Controller->PCIDevice, &local_dma); + return true; +} + + +/* + DAC960_V2_ReadDeviceConfiguration reads the Device Configuration Information + for DAC960 V2 Firmware Controllers by requesting the Physical Device + Information and SCSI Inquiry Unit Serial Number information for each + device connected to Controller. +*/ + +static bool DAC960_V2_ReadDeviceConfiguration(DAC960_Controller_T + *Controller) +{ + unsigned char Channel = 0, TargetID = 0, LogicalUnit = 0; + unsigned short PhysicalDeviceIndex = 0; + + while (true) + { + DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo = + Controller->V2.NewPhysicalDeviceInformation; + DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo; + DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber = + Controller->V2.NewInquiryUnitSerialNumber; + DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber; + + if (!DAC960_V2_NewPhysicalDeviceInfo(Controller, Channel, TargetID, LogicalUnit)) + break; + + PhysicalDeviceInfo = kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T), + GFP_ATOMIC); + if (PhysicalDeviceInfo == NULL) + return DAC960_Failure(Controller, "PHYSICAL DEVICE ALLOCATION"); + Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex] = + PhysicalDeviceInfo; + memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo, + sizeof(DAC960_V2_PhysicalDeviceInfo_T)); + + InquiryUnitSerialNumber = kmalloc( + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), GFP_ATOMIC); + if (InquiryUnitSerialNumber == NULL) { + kfree(PhysicalDeviceInfo); + return DAC960_Failure(Controller, "SERIAL NUMBER ALLOCATION"); + } + Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex] = + InquiryUnitSerialNumber; + + Channel = NewPhysicalDeviceInfo->Channel; + TargetID = NewPhysicalDeviceInfo->TargetID; + LogicalUnit = NewPhysicalDeviceInfo->LogicalUnit; + + /* + Some devices do NOT have Unit Serial Numbers. + This command fails for them. But, we still want to + remember those devices are there. Construct a + UnitSerialNumber structure for the failure case. + */ + if (!DAC960_V2_NewInquiryUnitSerialNumber(Controller, Channel, TargetID, LogicalUnit)) { + memset(InquiryUnitSerialNumber, 0, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T)); + InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F; + } else + memcpy(InquiryUnitSerialNumber, NewInquiryUnitSerialNumber, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T)); + + PhysicalDeviceIndex++; + LogicalUnit++; + } + return true; +} + + +/* + DAC960_SanitizeInquiryData sanitizes the Vendor, Model, Revision, and + Product Serial Number fields of the Inquiry Standard Data and Inquiry + Unit Serial Number structures. +*/ + +static void DAC960_SanitizeInquiryData(DAC960_SCSI_Inquiry_T + *InquiryStandardData, + DAC960_SCSI_Inquiry_UnitSerialNumber_T + *InquiryUnitSerialNumber, + unsigned char *Vendor, + unsigned char *Model, + unsigned char *Revision, + unsigned char *SerialNumber) +{ + int SerialNumberLength, i; + if (InquiryStandardData->PeripheralDeviceType == 0x1F) return; + for (i = 0; i < sizeof(InquiryStandardData->VendorIdentification); i++) + { + unsigned char VendorCharacter = + InquiryStandardData->VendorIdentification[i]; + Vendor[i] = (VendorCharacter >= ' ' && VendorCharacter <= '~' + ? VendorCharacter : ' '); + } + Vendor[sizeof(InquiryStandardData->VendorIdentification)] = '\0'; + for (i = 0; i < sizeof(InquiryStandardData->ProductIdentification); i++) + { + unsigned char ModelCharacter = + InquiryStandardData->ProductIdentification[i]; + Model[i] = (ModelCharacter >= ' ' && ModelCharacter <= '~' + ? ModelCharacter : ' '); + } + Model[sizeof(InquiryStandardData->ProductIdentification)] = '\0'; + for (i = 0; i < sizeof(InquiryStandardData->ProductRevisionLevel); i++) + { + unsigned char RevisionCharacter = + InquiryStandardData->ProductRevisionLevel[i]; + Revision[i] = (RevisionCharacter >= ' ' && RevisionCharacter <= '~' + ? RevisionCharacter : ' '); + } + Revision[sizeof(InquiryStandardData->ProductRevisionLevel)] = '\0'; + if (InquiryUnitSerialNumber->PeripheralDeviceType == 0x1F) return; + SerialNumberLength = InquiryUnitSerialNumber->PageLength; + if (SerialNumberLength > + sizeof(InquiryUnitSerialNumber->ProductSerialNumber)) + SerialNumberLength = sizeof(InquiryUnitSerialNumber->ProductSerialNumber); + for (i = 0; i < SerialNumberLength; i++) + { + unsigned char SerialNumberCharacter = + InquiryUnitSerialNumber->ProductSerialNumber[i]; + SerialNumber[i] = + (SerialNumberCharacter >= ' ' && SerialNumberCharacter <= '~' + ? SerialNumberCharacter : ' '); + } + SerialNumber[SerialNumberLength] = '\0'; +} + + +/* + DAC960_V1_ReportDeviceConfiguration reports the Device Configuration + Information for DAC960 V1 Firmware Controllers. +*/ + +static bool DAC960_V1_ReportDeviceConfiguration(DAC960_Controller_T + *Controller) +{ + int LogicalDriveNumber, Channel, TargetID; + DAC960_Info(" Physical Devices:\n", Controller); + for (Channel = 0; Channel < Controller->Channels; Channel++) + for (TargetID = 0; TargetID < Controller->Targets; TargetID++) + { + DAC960_SCSI_Inquiry_T *InquiryStandardData = + &Controller->V1.InquiryStandardData[Channel][TargetID]; + DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber = + &Controller->V1.InquiryUnitSerialNumber[Channel][TargetID]; + DAC960_V1_DeviceState_T *DeviceState = + &Controller->V1.DeviceState[Channel][TargetID]; + DAC960_V1_ErrorTableEntry_T *ErrorEntry = + &Controller->V1.ErrorTable.ErrorTableEntries[Channel][TargetID]; + char Vendor[1+sizeof(InquiryStandardData->VendorIdentification)]; + char Model[1+sizeof(InquiryStandardData->ProductIdentification)]; + char Revision[1+sizeof(InquiryStandardData->ProductRevisionLevel)]; + char SerialNumber[1+sizeof(InquiryUnitSerialNumber + ->ProductSerialNumber)]; + if (InquiryStandardData->PeripheralDeviceType == 0x1F) continue; + DAC960_SanitizeInquiryData(InquiryStandardData, InquiryUnitSerialNumber, + Vendor, Model, Revision, SerialNumber); + DAC960_Info(" %d:%d%s Vendor: %s Model: %s Revision: %s\n", + Controller, Channel, TargetID, (TargetID < 10 ? " " : ""), + Vendor, Model, Revision); + if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F) + DAC960_Info(" Serial Number: %s\n", Controller, SerialNumber); + if (DeviceState->Present && + DeviceState->DeviceType == DAC960_V1_DiskType) + { + if (Controller->V1.DeviceResetCount[Channel][TargetID] > 0) + DAC960_Info(" Disk Status: %s, %u blocks, %d resets\n", + Controller, + (DeviceState->DeviceState == DAC960_V1_Device_Dead + ? "Dead" + : DeviceState->DeviceState + == DAC960_V1_Device_WriteOnly + ? "Write-Only" + : DeviceState->DeviceState + == DAC960_V1_Device_Online + ? "Online" : "Standby"), + DeviceState->DiskSize, + Controller->V1.DeviceResetCount[Channel][TargetID]); + else + DAC960_Info(" Disk Status: %s, %u blocks\n", Controller, + (DeviceState->DeviceState == DAC960_V1_Device_Dead + ? "Dead" + : DeviceState->DeviceState + == DAC960_V1_Device_WriteOnly + ? "Write-Only" + : DeviceState->DeviceState + == DAC960_V1_Device_Online + ? "Online" : "Standby"), + DeviceState->DiskSize); + } + if (ErrorEntry->ParityErrorCount > 0 || + ErrorEntry->SoftErrorCount > 0 || + ErrorEntry->HardErrorCount > 0 || + ErrorEntry->MiscErrorCount > 0) + DAC960_Info(" Errors - Parity: %d, Soft: %d, " + "Hard: %d, Misc: %d\n", Controller, + ErrorEntry->ParityErrorCount, + ErrorEntry->SoftErrorCount, + ErrorEntry->HardErrorCount, + ErrorEntry->MiscErrorCount); + } + DAC960_Info(" Logical Drives:\n", Controller); + for (LogicalDriveNumber = 0; + LogicalDriveNumber < Controller->LogicalDriveCount; + LogicalDriveNumber++) + { + DAC960_V1_LogicalDriveInformation_T *LogicalDriveInformation = + &Controller->V1.LogicalDriveInformation[LogicalDriveNumber]; + DAC960_Info(" /dev/rd/c%dd%d: RAID-%d, %s, %u blocks, %s\n", + Controller, Controller->ControllerNumber, LogicalDriveNumber, + LogicalDriveInformation->RAIDLevel, + (LogicalDriveInformation->LogicalDriveState + == DAC960_V1_LogicalDrive_Online + ? "Online" + : LogicalDriveInformation->LogicalDriveState + == DAC960_V1_LogicalDrive_Critical + ? "Critical" : "Offline"), + LogicalDriveInformation->LogicalDriveSize, + (LogicalDriveInformation->WriteBack + ? "Write Back" : "Write Thru")); + } + return true; +} + + +/* + DAC960_V2_ReportDeviceConfiguration reports the Device Configuration + Information for DAC960 V2 Firmware Controllers. +*/ + +static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T + *Controller) +{ + int PhysicalDeviceIndex, LogicalDriveNumber; + DAC960_Info(" Physical Devices:\n", Controller); + for (PhysicalDeviceIndex = 0; + PhysicalDeviceIndex < DAC960_V2_MaxPhysicalDevices; + PhysicalDeviceIndex++) + { + DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo = + Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex]; + DAC960_SCSI_Inquiry_T *InquiryStandardData = + (DAC960_SCSI_Inquiry_T *) &PhysicalDeviceInfo->SCSI_InquiryData; + DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber = + Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex]; + char Vendor[1+sizeof(InquiryStandardData->VendorIdentification)]; + char Model[1+sizeof(InquiryStandardData->ProductIdentification)]; + char Revision[1+sizeof(InquiryStandardData->ProductRevisionLevel)]; + char SerialNumber[1+sizeof(InquiryUnitSerialNumber->ProductSerialNumber)]; + if (PhysicalDeviceInfo == NULL) break; + DAC960_SanitizeInquiryData(InquiryStandardData, InquiryUnitSerialNumber, + Vendor, Model, Revision, SerialNumber); + DAC960_Info(" %d:%d%s Vendor: %s Model: %s Revision: %s\n", + Controller, + PhysicalDeviceInfo->Channel, + PhysicalDeviceInfo->TargetID, + (PhysicalDeviceInfo->TargetID < 10 ? " " : ""), + Vendor, Model, Revision); + if (PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers == 0) + DAC960_Info(" %sAsynchronous\n", Controller, + (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16 + ? "Wide " :"")); + else + DAC960_Info(" %sSynchronous at %d MB/sec\n", Controller, + (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16 + ? "Wide " :""), + (PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers + * PhysicalDeviceInfo->NegotiatedDataWidthBits/8)); + if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F) + DAC960_Info(" Serial Number: %s\n", Controller, SerialNumber); + if (PhysicalDeviceInfo->PhysicalDeviceState == + DAC960_V2_Device_Unconfigured) + continue; + DAC960_Info(" Disk Status: %s, %u blocks\n", Controller, + (PhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Online + ? "Online" + : PhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Rebuild + ? "Rebuild" + : PhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Missing + ? "Missing" + : PhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Critical + ? "Critical" + : PhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Dead + ? "Dead" + : PhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_SuspectedDead + ? "Suspected-Dead" + : PhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_CommandedOffline + ? "Commanded-Offline" + : PhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Standby + ? "Standby" : "Unknown"), + PhysicalDeviceInfo->ConfigurableDeviceSize); + if (PhysicalDeviceInfo->ParityErrors == 0 && + PhysicalDeviceInfo->SoftErrors == 0 && + PhysicalDeviceInfo->HardErrors == 0 && + PhysicalDeviceInfo->MiscellaneousErrors == 0 && + PhysicalDeviceInfo->CommandTimeouts == 0 && + PhysicalDeviceInfo->Retries == 0 && + PhysicalDeviceInfo->Aborts == 0 && + PhysicalDeviceInfo->PredictedFailuresDetected == 0) + continue; + DAC960_Info(" Errors - Parity: %d, Soft: %d, " + "Hard: %d, Misc: %d\n", Controller, + PhysicalDeviceInfo->ParityErrors, + PhysicalDeviceInfo->SoftErrors, + PhysicalDeviceInfo->HardErrors, + PhysicalDeviceInfo->MiscellaneousErrors); + DAC960_Info(" Timeouts: %d, Retries: %d, " + "Aborts: %d, Predicted: %d\n", Controller, + PhysicalDeviceInfo->CommandTimeouts, + PhysicalDeviceInfo->Retries, + PhysicalDeviceInfo->Aborts, + PhysicalDeviceInfo->PredictedFailuresDetected); + } + DAC960_Info(" Logical Drives:\n", Controller); + for (LogicalDriveNumber = 0; + LogicalDriveNumber < DAC960_MaxLogicalDrives; + LogicalDriveNumber++) + { + DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo = + Controller->V2.LogicalDeviceInformation[LogicalDriveNumber]; + unsigned char *ReadCacheStatus[] = { "Read Cache Disabled", + "Read Cache Enabled", + "Read Ahead Enabled", + "Intelligent Read Ahead Enabled", + "-", "-", "-", "-" }; + unsigned char *WriteCacheStatus[] = { "Write Cache Disabled", + "Logical Device Read Only", + "Write Cache Enabled", + "Intelligent Write Cache Enabled", + "-", "-", "-", "-" }; + unsigned char *GeometryTranslation; + if (LogicalDeviceInfo == NULL) continue; + switch (LogicalDeviceInfo->DriveGeometry) + { + case DAC960_V2_Geometry_128_32: + GeometryTranslation = "128/32"; + break; + case DAC960_V2_Geometry_255_63: + GeometryTranslation = "255/63"; + break; + default: + GeometryTranslation = "Invalid"; + DAC960_Error("Illegal Logical Device Geometry %d\n", + Controller, LogicalDeviceInfo->DriveGeometry); + break; + } + DAC960_Info(" /dev/rd/c%dd%d: RAID-%d, %s, %u blocks\n", + Controller, Controller->ControllerNumber, LogicalDriveNumber, + LogicalDeviceInfo->RAIDLevel, + (LogicalDeviceInfo->LogicalDeviceState + == DAC960_V2_LogicalDevice_Online + ? "Online" + : LogicalDeviceInfo->LogicalDeviceState + == DAC960_V2_LogicalDevice_Critical + ? "Critical" : "Offline"), + LogicalDeviceInfo->ConfigurableDeviceSize); + DAC960_Info(" Logical Device %s, BIOS Geometry: %s\n", + Controller, + (LogicalDeviceInfo->LogicalDeviceControl + .LogicalDeviceInitialized + ? "Initialized" : "Uninitialized"), + GeometryTranslation); + if (LogicalDeviceInfo->StripeSize == 0) + { + if (LogicalDeviceInfo->CacheLineSize == 0) + DAC960_Info(" Stripe Size: N/A, " + "Segment Size: N/A\n", Controller); + else + DAC960_Info(" Stripe Size: N/A, " + "Segment Size: %dKB\n", Controller, + 1 << (LogicalDeviceInfo->CacheLineSize - 2)); + } + else + { + if (LogicalDeviceInfo->CacheLineSize == 0) + DAC960_Info(" Stripe Size: %dKB, " + "Segment Size: N/A\n", Controller, + 1 << (LogicalDeviceInfo->StripeSize - 2)); + else + DAC960_Info(" Stripe Size: %dKB, " + "Segment Size: %dKB\n", Controller, + 1 << (LogicalDeviceInfo->StripeSize - 2), + 1 << (LogicalDeviceInfo->CacheLineSize - 2)); + } + DAC960_Info(" %s, %s\n", Controller, + ReadCacheStatus[ + LogicalDeviceInfo->LogicalDeviceControl.ReadCache], + WriteCacheStatus[ + LogicalDeviceInfo->LogicalDeviceControl.WriteCache]); + if (LogicalDeviceInfo->SoftErrors > 0 || + LogicalDeviceInfo->CommandsFailed > 0 || + LogicalDeviceInfo->DeferredWriteErrors) + DAC960_Info(" Errors - Soft: %d, Failed: %d, " + "Deferred Write: %d\n", Controller, + LogicalDeviceInfo->SoftErrors, + LogicalDeviceInfo->CommandsFailed, + LogicalDeviceInfo->DeferredWriteErrors); + + } + return true; +} + +/* + DAC960_RegisterBlockDevice registers the Block Device structures + associated with Controller. +*/ + +static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) +{ + int MajorNumber = DAC960_MAJOR + Controller->ControllerNumber; + int n; + + /* + Register the Block Device Major Number for this DAC960 Controller. + */ + if (register_blkdev(MajorNumber, "dac960") < 0) + return false; + + for (n = 0; n < DAC960_MaxLogicalDrives; n++) { + struct gendisk *disk = Controller->disks[n]; + struct request_queue *RequestQueue; + + /* for now, let all request queues share controller's lock */ + RequestQueue = blk_init_queue(DAC960_RequestFunction,&Controller->queue_lock); + if (!RequestQueue) { + printk("DAC960: failure to allocate request queue\n"); + continue; + } + Controller->RequestQueue[n] = RequestQueue; + blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); + RequestQueue->queuedata = Controller; + blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); + blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit); + blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand); + disk->queue = RequestQueue; + sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n); + disk->major = MajorNumber; + disk->first_minor = n << DAC960_MaxPartitionsBits; + disk->fops = &DAC960_BlockDeviceOperations; + } + /* + Indicate the Block Device Registration completed successfully, + */ + return true; +} + + +/* + DAC960_UnregisterBlockDevice unregisters the Block Device structures + associated with Controller. +*/ + +static void DAC960_UnregisterBlockDevice(DAC960_Controller_T *Controller) +{ + int MajorNumber = DAC960_MAJOR + Controller->ControllerNumber; + int disk; + + /* does order matter when deleting gendisk and cleanup in request queue? */ + for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) { + del_gendisk(Controller->disks[disk]); + blk_cleanup_queue(Controller->RequestQueue[disk]); + Controller->RequestQueue[disk] = NULL; + } + + /* + Unregister the Block Device Major Number for this DAC960 Controller. + */ + unregister_blkdev(MajorNumber, "dac960"); +} + +/* + DAC960_ComputeGenericDiskInfo computes the values for the Generic Disk + Information Partition Sector Counts and Block Sizes. +*/ + +static void DAC960_ComputeGenericDiskInfo(DAC960_Controller_T *Controller) +{ + int disk; + for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) + set_capacity(Controller->disks[disk], disk_size(Controller, disk)); +} + +/* + DAC960_ReportErrorStatus reports Controller BIOS Messages passed through + the Error Status Register when the driver performs the BIOS handshaking. + It returns true for fatal errors and false otherwise. +*/ + +static bool DAC960_ReportErrorStatus(DAC960_Controller_T *Controller, + unsigned char ErrorStatus, + unsigned char Parameter0, + unsigned char Parameter1) +{ + switch (ErrorStatus) + { + case 0x00: + DAC960_Notice("Physical Device %d:%d Not Responding\n", + Controller, Parameter1, Parameter0); + break; + case 0x08: + if (Controller->DriveSpinUpMessageDisplayed) break; + DAC960_Notice("Spinning Up Drives\n", Controller); + Controller->DriveSpinUpMessageDisplayed = true; + break; + case 0x30: + DAC960_Notice("Configuration Checksum Error\n", Controller); + break; + case 0x60: + DAC960_Notice("Mirror Race Recovery Failed\n", Controller); + break; + case 0x70: + DAC960_Notice("Mirror Race Recovery In Progress\n", Controller); + break; + case 0x90: + DAC960_Notice("Physical Device %d:%d COD Mismatch\n", + Controller, Parameter1, Parameter0); + break; + case 0xA0: + DAC960_Notice("Logical Drive Installation Aborted\n", Controller); + break; + case 0xB0: + DAC960_Notice("Mirror Race On A Critical Logical Drive\n", Controller); + break; + case 0xD0: + DAC960_Notice("New Controller Configuration Found\n", Controller); + break; + case 0xF0: + DAC960_Error("Fatal Memory Parity Error for Controller at\n", Controller); + return true; + default: + DAC960_Error("Unknown Initialization Error %02X for Controller at\n", + Controller, ErrorStatus); + return true; + } + return false; +} + + +/* + * DAC960_DetectCleanup releases the resources that were allocated + * during DAC960_DetectController(). DAC960_DetectController can + * has several internal failure points, so not ALL resources may + * have been allocated. It's important to free only + * resources that HAVE been allocated. The code below always + * tests that the resource has been allocated before attempting to + * free it. + */ +static void DAC960_DetectCleanup(DAC960_Controller_T *Controller) +{ + int i; + + /* Free the memory mailbox, status, and related structures */ + free_dma_loaf(Controller->PCIDevice, &Controller->DmaPages); + if (Controller->MemoryMappedAddress) { + switch(Controller->HardwareType) + { + case DAC960_GEM_Controller: + DAC960_GEM_DisableInterrupts(Controller->BaseAddress); + break; + case DAC960_BA_Controller: + DAC960_BA_DisableInterrupts(Controller->BaseAddress); + break; + case DAC960_LP_Controller: + DAC960_LP_DisableInterrupts(Controller->BaseAddress); + break; + case DAC960_LA_Controller: + DAC960_LA_DisableInterrupts(Controller->BaseAddress); + break; + case DAC960_PG_Controller: + DAC960_PG_DisableInterrupts(Controller->BaseAddress); + break; + case DAC960_PD_Controller: + DAC960_PD_DisableInterrupts(Controller->BaseAddress); + break; + case DAC960_P_Controller: + DAC960_PD_DisableInterrupts(Controller->BaseAddress); + break; + } + iounmap(Controller->MemoryMappedAddress); + } + if (Controller->IRQ_Channel) + free_irq(Controller->IRQ_Channel, Controller); + if (Controller->IO_Address) + release_region(Controller->IO_Address, 0x80); + pci_disable_device(Controller->PCIDevice); + for (i = 0; (i < DAC960_MaxLogicalDrives) && Controller->disks[i]; i++) + put_disk(Controller->disks[i]); + DAC960_Controllers[Controller->ControllerNumber] = NULL; + kfree(Controller); +} + + +/* + DAC960_DetectController detects Mylex DAC960/AcceleRAID/eXtremeRAID + PCI RAID Controllers by interrogating the PCI Configuration Space for + Controller Type. +*/ + +static DAC960_Controller_T * +DAC960_DetectController(struct pci_dev *PCI_Device, + const struct pci_device_id *entry) +{ + struct DAC960_privdata *privdata = + (struct DAC960_privdata *)entry->driver_data; + irq_handler_t InterruptHandler = privdata->InterruptHandler; + unsigned int MemoryWindowSize = privdata->MemoryWindowSize; + DAC960_Controller_T *Controller = NULL; + unsigned char DeviceFunction = PCI_Device->devfn; + unsigned char ErrorStatus, Parameter0, Parameter1; + unsigned int IRQ_Channel; + void __iomem *BaseAddress; + int i; + + Controller = kzalloc(sizeof(DAC960_Controller_T), GFP_ATOMIC); + if (Controller == NULL) { + DAC960_Error("Unable to allocate Controller structure for " + "Controller at\n", NULL); + return NULL; + } + Controller->ControllerNumber = DAC960_ControllerCount; + DAC960_Controllers[DAC960_ControllerCount++] = Controller; + Controller->Bus = PCI_Device->bus->number; + Controller->FirmwareType = privdata->FirmwareType; + Controller->HardwareType = privdata->HardwareType; + Controller->Device = DeviceFunction >> 3; + Controller->Function = DeviceFunction & 0x7; + Controller->PCIDevice = PCI_Device; + strcpy(Controller->FullModelName, "DAC960"); + + if (pci_enable_device(PCI_Device)) + goto Failure; + + switch (Controller->HardwareType) + { + case DAC960_GEM_Controller: + Controller->PCI_Address = pci_resource_start(PCI_Device, 0); + break; + case DAC960_BA_Controller: + Controller->PCI_Address = pci_resource_start(PCI_Device, 0); + break; + case DAC960_LP_Controller: + Controller->PCI_Address = pci_resource_start(PCI_Device, 0); + break; + case DAC960_LA_Controller: + Controller->PCI_Address = pci_resource_start(PCI_Device, 0); + break; + case DAC960_PG_Controller: + Controller->PCI_Address = pci_resource_start(PCI_Device, 0); + break; + case DAC960_PD_Controller: + Controller->IO_Address = pci_resource_start(PCI_Device, 0); + Controller->PCI_Address = pci_resource_start(PCI_Device, 1); + break; + case DAC960_P_Controller: + Controller->IO_Address = pci_resource_start(PCI_Device, 0); + Controller->PCI_Address = pci_resource_start(PCI_Device, 1); + break; + } + + pci_set_drvdata(PCI_Device, (void *)((long)Controller->ControllerNumber)); + for (i = 0; i < DAC960_MaxLogicalDrives; i++) { + Controller->disks[i] = alloc_disk(1<<DAC960_MaxPartitionsBits); + if (!Controller->disks[i]) + goto Failure; + Controller->disks[i]->private_data = (void *)((long)i); + } + init_waitqueue_head(&Controller->CommandWaitQueue); + init_waitqueue_head(&Controller->HealthStatusWaitQueue); + spin_lock_init(&Controller->queue_lock); + DAC960_AnnounceDriver(Controller); + /* + Map the Controller Register Window. + */ + if (MemoryWindowSize < PAGE_SIZE) + MemoryWindowSize = PAGE_SIZE; + Controller->MemoryMappedAddress = + ioremap_nocache(Controller->PCI_Address & PAGE_MASK, MemoryWindowSize); + Controller->BaseAddress = + Controller->MemoryMappedAddress + (Controller->PCI_Address & ~PAGE_MASK); + if (Controller->MemoryMappedAddress == NULL) + { + DAC960_Error("Unable to map Controller Register Window for " + "Controller at\n", Controller); + goto Failure; + } + BaseAddress = Controller->BaseAddress; + switch (Controller->HardwareType) + { + case DAC960_GEM_Controller: + DAC960_GEM_DisableInterrupts(BaseAddress); + DAC960_GEM_AcknowledgeHardwareMailboxStatus(BaseAddress); + udelay(1000); + while (DAC960_GEM_InitializationInProgressP(BaseAddress)) + { + if (DAC960_GEM_ReadErrorStatus(BaseAddress, &ErrorStatus, + &Parameter0, &Parameter1) && + DAC960_ReportErrorStatus(Controller, ErrorStatus, + Parameter0, Parameter1)) + goto Failure; + udelay(10); + } + if (!DAC960_V2_EnableMemoryMailboxInterface(Controller)) + { + DAC960_Error("Unable to Enable Memory Mailbox Interface " + "for Controller at\n", Controller); + goto Failure; + } + DAC960_GEM_EnableInterrupts(BaseAddress); + Controller->QueueCommand = DAC960_GEM_QueueCommand; + Controller->ReadControllerConfiguration = + DAC960_V2_ReadControllerConfiguration; + Controller->ReadDeviceConfiguration = + DAC960_V2_ReadDeviceConfiguration; + Controller->ReportDeviceConfiguration = + DAC960_V2_ReportDeviceConfiguration; + Controller->QueueReadWriteCommand = + DAC960_V2_QueueReadWriteCommand; + break; + case DAC960_BA_Controller: + DAC960_BA_DisableInterrupts(BaseAddress); + DAC960_BA_AcknowledgeHardwareMailboxStatus(BaseAddress); + udelay(1000); + while (DAC960_BA_InitializationInProgressP(BaseAddress)) + { + if (DAC960_BA_ReadErrorStatus(BaseAddress, &ErrorStatus, + &Parameter0, &Parameter1) && + DAC960_ReportErrorStatus(Controller, ErrorStatus, + Parameter0, Parameter1)) + goto Failure; + udelay(10); + } + if (!DAC960_V2_EnableMemoryMailboxInterface(Controller)) + { + DAC960_Error("Unable to Enable Memory Mailbox Interface " + "for Controller at\n", Controller); + goto Failure; + } + DAC960_BA_EnableInterrupts(BaseAddress); + Controller->QueueCommand = DAC960_BA_QueueCommand; + Controller->ReadControllerConfiguration = + DAC960_V2_ReadControllerConfiguration; + Controller->ReadDeviceConfiguration = + DAC960_V2_ReadDeviceConfiguration; + Controller->ReportDeviceConfiguration = + DAC960_V2_ReportDeviceConfiguration; + Controller->QueueReadWriteCommand = + DAC960_V2_QueueReadWriteCommand; + break; + case DAC960_LP_Controller: + DAC960_LP_DisableInterrupts(BaseAddress); + DAC960_LP_AcknowledgeHardwareMailboxStatus(BaseAddress); + udelay(1000); + while (DAC960_LP_InitializationInProgressP(BaseAddress)) + { + if (DAC960_LP_ReadErrorStatus(BaseAddress, &ErrorStatus, + &Parameter0, &Parameter1) && + DAC960_ReportErrorStatus(Controller, ErrorStatus, + Parameter0, Parameter1)) + goto Failure; + udelay(10); + } + if (!DAC960_V2_EnableMemoryMailboxInterface(Controller)) + { + DAC960_Error("Unable to Enable Memory Mailbox Interface " + "for Controller at\n", Controller); + goto Failure; + } + DAC960_LP_EnableInterrupts(BaseAddress); + Controller->QueueCommand = DAC960_LP_QueueCommand; + Controller->ReadControllerConfiguration = + DAC960_V2_ReadControllerConfiguration; + Controller->ReadDeviceConfiguration = + DAC960_V2_ReadDeviceConfiguration; + Controller->ReportDeviceConfiguration = + DAC960_V2_ReportDeviceConfiguration; + Controller->QueueReadWriteCommand = + DAC960_V2_QueueReadWriteCommand; + break; + case DAC960_LA_Controller: + DAC960_LA_DisableInterrupts(BaseAddress); + DAC960_LA_AcknowledgeHardwareMailboxStatus(BaseAddress); + udelay(1000); + while (DAC960_LA_InitializationInProgressP(BaseAddress)) + { + if (DAC960_LA_ReadErrorStatus(BaseAddress, &ErrorStatus, + &Parameter0, &Parameter1) && + DAC960_ReportErrorStatus(Controller, ErrorStatus, + Parameter0, Parameter1)) + goto Failure; + udelay(10); + } + if (!DAC960_V1_EnableMemoryMailboxInterface(Controller)) + { + DAC960_Error("Unable to Enable Memory Mailbox Interface " + "for Controller at\n", Controller); + goto Failure; + } + DAC960_LA_EnableInterrupts(BaseAddress); + if (Controller->V1.DualModeMemoryMailboxInterface) + Controller->QueueCommand = DAC960_LA_QueueCommandDualMode; + else Controller->QueueCommand = DAC960_LA_QueueCommandSingleMode; + Controller->ReadControllerConfiguration = + DAC960_V1_ReadControllerConfiguration; + Controller->ReadDeviceConfiguration = + DAC960_V1_ReadDeviceConfiguration; + Controller->ReportDeviceConfiguration = + DAC960_V1_ReportDeviceConfiguration; + Controller->QueueReadWriteCommand = + DAC960_V1_QueueReadWriteCommand; + break; + case DAC960_PG_Controller: + DAC960_PG_DisableInterrupts(BaseAddress); + DAC960_PG_AcknowledgeHardwareMailboxStatus(BaseAddress); + udelay(1000); + while (DAC960_PG_InitializationInProgressP(BaseAddress)) + { + if (DAC960_PG_ReadErrorStatus(BaseAddress, &ErrorStatus, + &Parameter0, &Parameter1) && + DAC960_ReportErrorStatus(Controller, ErrorStatus, + Parameter0, Parameter1)) + goto Failure; + udelay(10); + } + if (!DAC960_V1_EnableMemoryMailboxInterface(Controller)) + { + DAC960_Error("Unable to Enable Memory Mailbox Interface " + "for Controller at\n", Controller); + goto Failure; + } + DAC960_PG_EnableInterrupts(BaseAddress); + if (Controller->V1.DualModeMemoryMailboxInterface) + Controller->QueueCommand = DAC960_PG_QueueCommandDualMode; + else Controller->QueueCommand = DAC960_PG_QueueCommandSingleMode; + Controller->ReadControllerConfiguration = + DAC960_V1_ReadControllerConfiguration; + Controller->ReadDeviceConfiguration = + DAC960_V1_ReadDeviceConfiguration; + Controller->ReportDeviceConfiguration = + DAC960_V1_ReportDeviceConfiguration; + Controller->QueueReadWriteCommand = + DAC960_V1_QueueReadWriteCommand; + break; + case DAC960_PD_Controller: + if (!request_region(Controller->IO_Address, 0x80, + Controller->FullModelName)) { + DAC960_Error("IO port 0x%d busy for Controller at\n", + Controller, Controller->IO_Address); + goto Failure; + } + DAC960_PD_DisableInterrupts(BaseAddress); + DAC960_PD_AcknowledgeStatus(BaseAddress); + udelay(1000); + while (DAC960_PD_InitializationInProgressP(BaseAddress)) + { + if (DAC960_PD_ReadErrorStatus(BaseAddress, &ErrorStatus, + &Parameter0, &Parameter1) && + DAC960_ReportErrorStatus(Controller, ErrorStatus, + Parameter0, Parameter1)) + goto Failure; + udelay(10); + } + if (!DAC960_V1_EnableMemoryMailboxInterface(Controller)) + { + DAC960_Error("Unable to allocate DMA mapped memory " + "for Controller at\n", Controller); + goto Failure; + } + DAC960_PD_EnableInterrupts(BaseAddress); + Controller->QueueCommand = DAC960_PD_QueueCommand; + Controller->ReadControllerConfiguration = + DAC960_V1_ReadControllerConfiguration; + Controller->ReadDeviceConfiguration = + DAC960_V1_ReadDeviceConfiguration; + Controller->ReportDeviceConfiguration = + DAC960_V1_ReportDeviceConfiguration; + Controller->QueueReadWriteCommand = + DAC960_V1_QueueReadWriteCommand; + break; + case DAC960_P_Controller: + if (!request_region(Controller->IO_Address, 0x80, + Controller->FullModelName)){ + DAC960_Error("IO port 0x%d busy for Controller at\n", + Controller, Controller->IO_Address); + goto Failure; + } + DAC960_PD_DisableInterrupts(BaseAddress); + DAC960_PD_AcknowledgeStatus(BaseAddress); + udelay(1000); + while (DAC960_PD_InitializationInProgressP(BaseAddress)) + { + if (DAC960_PD_ReadErrorStatus(BaseAddress, &ErrorStatus, + &Parameter0, &Parameter1) && + DAC960_ReportErrorStatus(Controller, ErrorStatus, + Parameter0, Parameter1)) + goto Failure; + udelay(10); + } + if (!DAC960_V1_EnableMemoryMailboxInterface(Controller)) + { + DAC960_Error("Unable to allocate DMA mapped memory" + "for Controller at\n", Controller); + goto Failure; + } + DAC960_PD_EnableInterrupts(BaseAddress); + Controller->QueueCommand = DAC960_P_QueueCommand; + Controller->ReadControllerConfiguration = + DAC960_V1_ReadControllerConfiguration; + Controller->ReadDeviceConfiguration = + DAC960_V1_ReadDeviceConfiguration; + Controller->ReportDeviceConfiguration = + DAC960_V1_ReportDeviceConfiguration; + Controller->QueueReadWriteCommand = + DAC960_V1_QueueReadWriteCommand; + break; + } + /* + Acquire shared access to the IRQ Channel. + */ + IRQ_Channel = PCI_Device->irq; + if (request_irq(IRQ_Channel, InterruptHandler, IRQF_SHARED, + Controller->FullModelName, Controller) < 0) + { + DAC960_Error("Unable to acquire IRQ Channel %d for Controller at\n", + Controller, Controller->IRQ_Channel); + goto Failure; + } + Controller->IRQ_Channel = IRQ_Channel; + Controller->InitialCommand.CommandIdentifier = 1; + Controller->InitialCommand.Controller = Controller; + Controller->Commands[0] = &Controller->InitialCommand; + Controller->FreeCommands = &Controller->InitialCommand; + return Controller; + +Failure: + if (Controller->IO_Address == 0) + DAC960_Error("PCI Bus %d Device %d Function %d I/O Address N/A " + "PCI Address 0x%X\n", Controller, + Controller->Bus, Controller->Device, + Controller->Function, Controller->PCI_Address); + else + DAC960_Error("PCI Bus %d Device %d Function %d I/O Address " + "0x%X PCI Address 0x%X\n", Controller, + Controller->Bus, Controller->Device, + Controller->Function, Controller->IO_Address, + Controller->PCI_Address); + DAC960_DetectCleanup(Controller); + DAC960_ControllerCount--; + return NULL; +} + +/* + DAC960_InitializeController initializes Controller. +*/ + +static bool +DAC960_InitializeController(DAC960_Controller_T *Controller) +{ + if (DAC960_ReadControllerConfiguration(Controller) && + DAC960_ReportControllerConfiguration(Controller) && + DAC960_CreateAuxiliaryStructures(Controller) && + DAC960_ReadDeviceConfiguration(Controller) && + DAC960_ReportDeviceConfiguration(Controller) && + DAC960_RegisterBlockDevice(Controller)) + { + /* + Initialize the Monitoring Timer. + */ + init_timer(&Controller->MonitoringTimer); + Controller->MonitoringTimer.expires = + jiffies + DAC960_MonitoringTimerInterval; + Controller->MonitoringTimer.data = (unsigned long) Controller; + Controller->MonitoringTimer.function = DAC960_MonitoringTimerFunction; + add_timer(&Controller->MonitoringTimer); + Controller->ControllerInitialized = true; + return true; + } + return false; +} + + +/* + DAC960_FinalizeController finalizes Controller. +*/ + +static void DAC960_FinalizeController(DAC960_Controller_T *Controller) +{ + if (Controller->ControllerInitialized) + { + unsigned long flags; + + /* + * Acquiring and releasing lock here eliminates + * a very low probability race. + * + * The code below allocates controller command structures + * from the free list without holding the controller lock. + * This is safe assuming there is no other activity on + * the controller at the time. + * + * But, there might be a monitoring command still + * in progress. Setting the Shutdown flag while holding + * the lock ensures that there is no monitoring command + * in the interrupt handler currently, and any monitoring + * commands that complete from this time on will NOT return + * their command structure to the free list. + */ + + spin_lock_irqsave(&Controller->queue_lock, flags); + Controller->ShutdownMonitoringTimer = 1; + spin_unlock_irqrestore(&Controller->queue_lock, flags); + + del_timer_sync(&Controller->MonitoringTimer); + if (Controller->FirmwareType == DAC960_V1_Controller) + { + DAC960_Notice("Flushing Cache...", Controller); + DAC960_V1_ExecuteType3(Controller, DAC960_V1_Flush, 0); + DAC960_Notice("done\n", Controller); + + if (Controller->HardwareType == DAC960_PD_Controller) + release_region(Controller->IO_Address, 0x80); + } + else + { + DAC960_Notice("Flushing Cache...", Controller); + DAC960_V2_DeviceOperation(Controller, DAC960_V2_PauseDevice, + DAC960_V2_RAID_Controller); + DAC960_Notice("done\n", Controller); + } + } + DAC960_UnregisterBlockDevice(Controller); + DAC960_DestroyAuxiliaryStructures(Controller); + DAC960_DestroyProcEntries(Controller); + DAC960_DetectCleanup(Controller); +} + + +/* + DAC960_Probe verifies controller's existence and + initializes the DAC960 Driver for that controller. +*/ + +static int +DAC960_Probe(struct pci_dev *dev, const struct pci_device_id *entry) +{ + int disk; + DAC960_Controller_T *Controller; + + if (DAC960_ControllerCount == DAC960_MaxControllers) + { + DAC960_Error("More than %d DAC960 Controllers detected - " + "ignoring from Controller at\n", + NULL, DAC960_MaxControllers); + return -ENODEV; + } + + Controller = DAC960_DetectController(dev, entry); + if (!Controller) + return -ENODEV; + + if (!DAC960_InitializeController(Controller)) { + DAC960_FinalizeController(Controller); + return -ENODEV; + } + + for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) { + set_capacity(Controller->disks[disk], disk_size(Controller, disk)); + add_disk(Controller->disks[disk]); + } + DAC960_CreateProcEntries(Controller); + return 0; +} + + +/* + DAC960_Finalize finalizes the DAC960 Driver. +*/ + +static void DAC960_Remove(struct pci_dev *PCI_Device) +{ + int Controller_Number = (long)pci_get_drvdata(PCI_Device); + DAC960_Controller_T *Controller = DAC960_Controllers[Controller_Number]; + if (Controller != NULL) + DAC960_FinalizeController(Controller); +} + + +/* + DAC960_V1_QueueReadWriteCommand prepares and queues a Read/Write Command for + DAC960 V1 Firmware Controllers. +*/ + +static void DAC960_V1_QueueReadWriteCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + DAC960_V1_ScatterGatherSegment_T *ScatterGatherList = + Command->V1.ScatterGatherList; + struct scatterlist *ScatterList = Command->V1.ScatterList; + + DAC960_V1_ClearCommand(Command); + + if (Command->SegmentCount == 1) + { + if (Command->DmaDirection == PCI_DMA_FROMDEVICE) + CommandMailbox->Type5.CommandOpcode = DAC960_V1_Read; + else + CommandMailbox->Type5.CommandOpcode = DAC960_V1_Write; + + CommandMailbox->Type5.LD.TransferLength = Command->BlockCount; + CommandMailbox->Type5.LD.LogicalDriveNumber = Command->LogicalDriveNumber; + CommandMailbox->Type5.LogicalBlockAddress = Command->BlockNumber; + CommandMailbox->Type5.BusAddress = + (DAC960_BusAddress32_T)sg_dma_address(ScatterList); + } + else + { + int i; + + if (Command->DmaDirection == PCI_DMA_FROMDEVICE) + CommandMailbox->Type5.CommandOpcode = DAC960_V1_ReadWithScatterGather; + else + CommandMailbox->Type5.CommandOpcode = DAC960_V1_WriteWithScatterGather; + + CommandMailbox->Type5.LD.TransferLength = Command->BlockCount; + CommandMailbox->Type5.LD.LogicalDriveNumber = Command->LogicalDriveNumber; + CommandMailbox->Type5.LogicalBlockAddress = Command->BlockNumber; + CommandMailbox->Type5.BusAddress = Command->V1.ScatterGatherListDMA; + + CommandMailbox->Type5.ScatterGatherCount = Command->SegmentCount; + + for (i = 0; i < Command->SegmentCount; i++, ScatterList++, ScatterGatherList++) { + ScatterGatherList->SegmentDataPointer = + (DAC960_BusAddress32_T)sg_dma_address(ScatterList); + ScatterGatherList->SegmentByteCount = + (DAC960_ByteCount32_T)sg_dma_len(ScatterList); + } + } + DAC960_QueueCommand(Command); +} + + +/* + DAC960_V2_QueueReadWriteCommand prepares and queues a Read/Write Command for + DAC960 V2 Firmware Controllers. +*/ + +static void DAC960_V2_QueueReadWriteCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + struct scatterlist *ScatterList = Command->V2.ScatterList; + + DAC960_V2_ClearCommand(Command); + + CommandMailbox->SCSI_10.CommandOpcode = DAC960_V2_SCSI_10; + CommandMailbox->SCSI_10.CommandControlBits.DataTransferControllerToHost = + (Command->DmaDirection == PCI_DMA_FROMDEVICE); + CommandMailbox->SCSI_10.DataTransferSize = + Command->BlockCount << DAC960_BlockSizeBits; + CommandMailbox->SCSI_10.RequestSenseBusAddress = Command->V2.RequestSenseDMA; + CommandMailbox->SCSI_10.PhysicalDevice = + Controller->V2.LogicalDriveToVirtualDevice[Command->LogicalDriveNumber]; + CommandMailbox->SCSI_10.RequestSenseSize = sizeof(DAC960_SCSI_RequestSense_T); + CommandMailbox->SCSI_10.CDBLength = 10; + CommandMailbox->SCSI_10.SCSI_CDB[0] = + (Command->DmaDirection == PCI_DMA_FROMDEVICE ? 0x28 : 0x2A); + CommandMailbox->SCSI_10.SCSI_CDB[2] = Command->BlockNumber >> 24; + CommandMailbox->SCSI_10.SCSI_CDB[3] = Command->BlockNumber >> 16; + CommandMailbox->SCSI_10.SCSI_CDB[4] = Command->BlockNumber >> 8; + CommandMailbox->SCSI_10.SCSI_CDB[5] = Command->BlockNumber; + CommandMailbox->SCSI_10.SCSI_CDB[7] = Command->BlockCount >> 8; + CommandMailbox->SCSI_10.SCSI_CDB[8] = Command->BlockCount; + + if (Command->SegmentCount == 1) + { + CommandMailbox->SCSI_10.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + (DAC960_BusAddress64_T)sg_dma_address(ScatterList); + CommandMailbox->SCSI_10.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->SCSI_10.DataTransferSize; + } + else + { + DAC960_V2_ScatterGatherSegment_T *ScatterGatherList; + int i; + + if (Command->SegmentCount > 2) + { + ScatterGatherList = Command->V2.ScatterGatherList; + CommandMailbox->SCSI_10.CommandControlBits + .AdditionalScatterGatherListMemory = true; + CommandMailbox->SCSI_10.DataTransferMemoryAddress + .ExtendedScatterGather.ScatterGatherList0Length = Command->SegmentCount; + CommandMailbox->SCSI_10.DataTransferMemoryAddress + .ExtendedScatterGather.ScatterGatherList0Address = + Command->V2.ScatterGatherListDMA; + } + else + ScatterGatherList = CommandMailbox->SCSI_10.DataTransferMemoryAddress + .ScatterGatherSegments; + + for (i = 0; i < Command->SegmentCount; i++, ScatterList++, ScatterGatherList++) { + ScatterGatherList->SegmentDataPointer = + (DAC960_BusAddress64_T)sg_dma_address(ScatterList); + ScatterGatherList->SegmentByteCount = + (DAC960_ByteCount64_T)sg_dma_len(ScatterList); + } + } + DAC960_QueueCommand(Command); +} + + +static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_queue *req_q) +{ + struct request *Request; + DAC960_Command_T *Command; + + while(1) { + Request = elv_next_request(req_q); + if (!Request) + return 1; + + Command = DAC960_AllocateCommand(Controller); + if (Command == NULL) + return 0; + + if (rq_data_dir(Request) == READ) { + Command->DmaDirection = PCI_DMA_FROMDEVICE; + Command->CommandType = DAC960_ReadCommand; + } else { + Command->DmaDirection = PCI_DMA_TODEVICE; + Command->CommandType = DAC960_WriteCommand; + } + Command->Completion = Request->end_io_data; + Command->LogicalDriveNumber = (long)Request->rq_disk->private_data; + Command->BlockNumber = Request->sector; + Command->BlockCount = Request->nr_sectors; + Command->Request = Request; + blkdev_dequeue_request(Request); + Command->SegmentCount = blk_rq_map_sg(req_q, + Command->Request, Command->cmd_sglist); + /* pci_map_sg MAY change the value of SegCount */ + Command->SegmentCount = pci_map_sg(Controller->PCIDevice, Command->cmd_sglist, + Command->SegmentCount, Command->DmaDirection); + + DAC960_QueueReadWriteCommand(Command); + } +} + +/* + DAC960_ProcessRequest attempts to remove one I/O Request from Controller's + I/O Request Queue and queues it to the Controller. WaitForCommand is true if + this function should wait for a Command to become available if necessary. + This function returns true if an I/O Request was queued and false otherwise. +*/ +static void DAC960_ProcessRequest(DAC960_Controller_T *controller) +{ + int i; + + if (!controller->ControllerInitialized) + return; + + /* Do this better later! */ + for (i = controller->req_q_index; i < DAC960_MaxLogicalDrives; i++) { + struct request_queue *req_q = controller->RequestQueue[i]; + + if (req_q == NULL) + continue; + + if (!DAC960_process_queue(controller, req_q)) { + controller->req_q_index = i; + return; + } + } + + if (controller->req_q_index == 0) + return; + + for (i = 0; i < controller->req_q_index; i++) { + struct request_queue *req_q = controller->RequestQueue[i]; + + if (req_q == NULL) + continue; + + if (!DAC960_process_queue(controller, req_q)) { + controller->req_q_index = i; + return; + } + } +} + + +/* + DAC960_queue_partial_rw extracts one bio from the request already + associated with argument command, and construct a new command block to retry I/O + only on that bio. Queue that command to the controller. + + This function re-uses a previously-allocated Command, + there is no failure mode from trying to allocate a command. +*/ + +static void DAC960_queue_partial_rw(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + struct request *Request = Command->Request; + struct request_queue *req_q = Controller->RequestQueue[Command->LogicalDriveNumber]; + + if (Command->DmaDirection == PCI_DMA_FROMDEVICE) + Command->CommandType = DAC960_ReadRetryCommand; + else + Command->CommandType = DAC960_WriteRetryCommand; + + /* + * We could be more efficient with these mapping requests + * and map only the portions that we need. But since this + * code should almost never be called, just go with a + * simple coding. + */ + (void)blk_rq_map_sg(req_q, Command->Request, Command->cmd_sglist); + + (void)pci_map_sg(Controller->PCIDevice, Command->cmd_sglist, 1, Command->DmaDirection); + /* + * Resubmitting the request sector at a time is really tedious. + * But, this should almost never happen. So, we're willing to pay + * this price so that in the end, as much of the transfer is completed + * successfully as possible. + */ + Command->SegmentCount = 1; + Command->BlockNumber = Request->sector; + Command->BlockCount = 1; + DAC960_QueueReadWriteCommand(Command); + return; +} + +/* + DAC960_RequestFunction is the I/O Request Function for DAC960 Controllers. +*/ + +static void DAC960_RequestFunction(struct request_queue *RequestQueue) +{ + DAC960_ProcessRequest(RequestQueue->queuedata); +} + +/* + DAC960_ProcessCompletedBuffer performs completion processing for an + individual Buffer. +*/ + +static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command, + bool SuccessfulIO) +{ + struct request *Request = Command->Request; + int Error = SuccessfulIO ? 0 : -EIO; + + pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist, + Command->SegmentCount, Command->DmaDirection); + + if (!__blk_end_request(Request, Error, Command->BlockCount << 9)) { + if (Command->Completion) { + complete(Command->Completion); + Command->Completion = NULL; + } + return true; + } + return false; +} + +/* + DAC960_V1_ReadWriteError prints an appropriate error message for Command + when an error occurs on a Read or Write operation. +*/ + +static void DAC960_V1_ReadWriteError(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + unsigned char *CommandName = "UNKNOWN"; + switch (Command->CommandType) + { + case DAC960_ReadCommand: + case DAC960_ReadRetryCommand: + CommandName = "READ"; + break; + case DAC960_WriteCommand: + case DAC960_WriteRetryCommand: + CommandName = "WRITE"; + break; + case DAC960_MonitoringCommand: + case DAC960_ImmediateCommand: + case DAC960_QueuedCommand: + break; + } + switch (Command->V1.CommandStatus) + { + case DAC960_V1_IrrecoverableDataError: + DAC960_Error("Irrecoverable Data Error on %s:\n", + Controller, CommandName); + break; + case DAC960_V1_LogicalDriveNonexistentOrOffline: + DAC960_Error("Logical Drive Nonexistent or Offline on %s:\n", + Controller, CommandName); + break; + case DAC960_V1_AccessBeyondEndOfLogicalDrive: + DAC960_Error("Attempt to Access Beyond End of Logical Drive " + "on %s:\n", Controller, CommandName); + break; + case DAC960_V1_BadDataEncountered: + DAC960_Error("Bad Data Encountered on %s:\n", Controller, CommandName); + break; + default: + DAC960_Error("Unexpected Error Status %04X on %s:\n", + Controller, Command->V1.CommandStatus, CommandName); + break; + } + DAC960_Error(" /dev/rd/c%dd%d: absolute blocks %u..%u\n", + Controller, Controller->ControllerNumber, + Command->LogicalDriveNumber, Command->BlockNumber, + Command->BlockNumber + Command->BlockCount - 1); +} + + +/* + DAC960_V1_ProcessCompletedCommand performs completion processing for Command + for DAC960 V1 Firmware Controllers. +*/ + +static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + DAC960_CommandType_T CommandType = Command->CommandType; + DAC960_V1_CommandOpcode_T CommandOpcode = + Command->V1.CommandMailbox.Common.CommandOpcode; + DAC960_V1_CommandStatus_T CommandStatus = Command->V1.CommandStatus; + + if (CommandType == DAC960_ReadCommand || + CommandType == DAC960_WriteCommand) + { + +#ifdef FORCE_RETRY_DEBUG + CommandStatus = DAC960_V1_IrrecoverableDataError; +#endif + + if (CommandStatus == DAC960_V1_NormalCompletion) { + + if (!DAC960_ProcessCompletedRequest(Command, true)) + BUG(); + + } else if (CommandStatus == DAC960_V1_IrrecoverableDataError || + CommandStatus == DAC960_V1_BadDataEncountered) + { + /* + * break the command down into pieces and resubmit each + * piece, hoping that some of them will succeed. + */ + DAC960_queue_partial_rw(Command); + return; + } + else + { + if (CommandStatus != DAC960_V1_LogicalDriveNonexistentOrOffline) + DAC960_V1_ReadWriteError(Command); + + if (!DAC960_ProcessCompletedRequest(Command, false)) + BUG(); + } + } + else if (CommandType == DAC960_ReadRetryCommand || + CommandType == DAC960_WriteRetryCommand) + { + bool normal_completion; +#ifdef FORCE_RETRY_FAILURE_DEBUG + static int retry_count = 1; +#endif + /* + Perform completion processing for the portion that was + retried, and submit the next portion, if any. + */ + normal_completion = true; + if (CommandStatus != DAC960_V1_NormalCompletion) { + normal_completion = false; + if (CommandStatus != DAC960_V1_LogicalDriveNonexistentOrOffline) + DAC960_V1_ReadWriteError(Command); + } + +#ifdef FORCE_RETRY_FAILURE_DEBUG + if (!(++retry_count % 10000)) { + printk("V1 error retry failure test\n"); + normal_completion = false; + DAC960_V1_ReadWriteError(Command); + } +#endif + + if (!DAC960_ProcessCompletedRequest(Command, normal_completion)) { + DAC960_queue_partial_rw(Command); + return; + } + } + + else if (CommandType == DAC960_MonitoringCommand) + { + if (Controller->ShutdownMonitoringTimer) + return; + if (CommandOpcode == DAC960_V1_Enquiry) + { + DAC960_V1_Enquiry_T *OldEnquiry = &Controller->V1.Enquiry; + DAC960_V1_Enquiry_T *NewEnquiry = Controller->V1.NewEnquiry; + unsigned int OldCriticalLogicalDriveCount = + OldEnquiry->CriticalLogicalDriveCount; + unsigned int NewCriticalLogicalDriveCount = + NewEnquiry->CriticalLogicalDriveCount; + if (NewEnquiry->NumberOfLogicalDrives > Controller->LogicalDriveCount) + { + int LogicalDriveNumber = Controller->LogicalDriveCount - 1; + while (++LogicalDriveNumber < NewEnquiry->NumberOfLogicalDrives) + DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) " + "Now Exists\n", Controller, + LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber); + Controller->LogicalDriveCount = NewEnquiry->NumberOfLogicalDrives; + DAC960_ComputeGenericDiskInfo(Controller); + } + if (NewEnquiry->NumberOfLogicalDrives < Controller->LogicalDriveCount) + { + int LogicalDriveNumber = NewEnquiry->NumberOfLogicalDrives - 1; + while (++LogicalDriveNumber < Controller->LogicalDriveCount) + DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) " + "No Longer Exists\n", Controller, + LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber); + Controller->LogicalDriveCount = NewEnquiry->NumberOfLogicalDrives; + DAC960_ComputeGenericDiskInfo(Controller); + } + if (NewEnquiry->StatusFlags.DeferredWriteError != + OldEnquiry->StatusFlags.DeferredWriteError) + DAC960_Critical("Deferred Write Error Flag is now %s\n", Controller, + (NewEnquiry->StatusFlags.DeferredWriteError + ? "TRUE" : "FALSE")); + if ((NewCriticalLogicalDriveCount > 0 || + NewCriticalLogicalDriveCount != OldCriticalLogicalDriveCount) || + (NewEnquiry->OfflineLogicalDriveCount > 0 || + NewEnquiry->OfflineLogicalDriveCount != + OldEnquiry->OfflineLogicalDriveCount) || + (NewEnquiry->DeadDriveCount > 0 || + NewEnquiry->DeadDriveCount != + OldEnquiry->DeadDriveCount) || + (NewEnquiry->EventLogSequenceNumber != + OldEnquiry->EventLogSequenceNumber) || + Controller->MonitoringTimerCount == 0 || + time_after_eq(jiffies, Controller->SecondaryMonitoringTime + + DAC960_SecondaryMonitoringInterval)) + { + Controller->V1.NeedLogicalDriveInformation = true; + Controller->V1.NewEventLogSequenceNumber = + NewEnquiry->EventLogSequenceNumber; + Controller->V1.NeedErrorTableInformation = true; + Controller->V1.NeedDeviceStateInformation = true; + Controller->V1.StartDeviceStateScan = true; + Controller->V1.NeedBackgroundInitializationStatus = + Controller->V1.BackgroundInitializationStatusSupported; + Controller->SecondaryMonitoringTime = jiffies; + } + if (NewEnquiry->RebuildFlag == DAC960_V1_StandbyRebuildInProgress || + NewEnquiry->RebuildFlag + == DAC960_V1_BackgroundRebuildInProgress || + OldEnquiry->RebuildFlag == DAC960_V1_StandbyRebuildInProgress || + OldEnquiry->RebuildFlag == DAC960_V1_BackgroundRebuildInProgress) + { + Controller->V1.NeedRebuildProgress = true; + Controller->V1.RebuildProgressFirst = + (NewEnquiry->CriticalLogicalDriveCount < + OldEnquiry->CriticalLogicalDriveCount); + } + if (OldEnquiry->RebuildFlag == DAC960_V1_BackgroundCheckInProgress) + switch (NewEnquiry->RebuildFlag) + { + case DAC960_V1_NoStandbyRebuildOrCheckInProgress: + DAC960_Progress("Consistency Check Completed Successfully\n", + Controller); + break; + case DAC960_V1_StandbyRebuildInProgress: + case DAC960_V1_BackgroundRebuildInProgress: + break; + case DAC960_V1_BackgroundCheckInProgress: + Controller->V1.NeedConsistencyCheckProgress = true; + break; + case DAC960_V1_StandbyRebuildCompletedWithError: + DAC960_Progress("Consistency Check Completed with Error\n", + Controller); + break; + case DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed: + DAC960_Progress("Consistency Check Failed - " + "Physical Device Failed\n", Controller); + break; + case DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed: + DAC960_Progress("Consistency Check Failed - " + "Logical Drive Failed\n", Controller); + break; + case DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses: + DAC960_Progress("Consistency Check Failed - Other Causes\n", + Controller); + break; + case DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated: + DAC960_Progress("Consistency Check Successfully Terminated\n", + Controller); + break; + } + else if (NewEnquiry->RebuildFlag + == DAC960_V1_BackgroundCheckInProgress) + Controller->V1.NeedConsistencyCheckProgress = true; + Controller->MonitoringAlertMode = + (NewEnquiry->CriticalLogicalDriveCount > 0 || + NewEnquiry->OfflineLogicalDriveCount > 0 || + NewEnquiry->DeadDriveCount > 0); + if (NewEnquiry->RebuildFlag > DAC960_V1_BackgroundCheckInProgress) + { + Controller->V1.PendingRebuildFlag = NewEnquiry->RebuildFlag; + Controller->V1.RebuildFlagPending = true; + } + memcpy(&Controller->V1.Enquiry, &Controller->V1.NewEnquiry, + sizeof(DAC960_V1_Enquiry_T)); + } + else if (CommandOpcode == DAC960_V1_PerformEventLogOperation) + { + static char + *DAC960_EventMessages[] = + { "killed because write recovery failed", + "killed because of SCSI bus reset failure", + "killed because of double check condition", + "killed because it was removed", + "killed because of gross error on SCSI chip", + "killed because of bad tag returned from drive", + "killed because of timeout on SCSI command", + "killed because of reset SCSI command issued from system", + "killed because busy or parity error count exceeded limit", + "killed because of 'kill drive' command from system", + "killed because of selection timeout", + "killed due to SCSI phase sequence error", + "killed due to unknown status" }; + DAC960_V1_EventLogEntry_T *EventLogEntry = + Controller->V1.EventLogEntry; + if (EventLogEntry->SequenceNumber == + Controller->V1.OldEventLogSequenceNumber) + { + unsigned char SenseKey = EventLogEntry->SenseKey; + unsigned char AdditionalSenseCode = + EventLogEntry->AdditionalSenseCode; + unsigned char AdditionalSenseCodeQualifier = + EventLogEntry->AdditionalSenseCodeQualifier; + if (SenseKey == DAC960_SenseKey_VendorSpecific && + AdditionalSenseCode == 0x80 && + AdditionalSenseCodeQualifier < + ARRAY_SIZE(DAC960_EventMessages)) + DAC960_Critical("Physical Device %d:%d %s\n", Controller, + EventLogEntry->Channel, + EventLogEntry->TargetID, + DAC960_EventMessages[ + AdditionalSenseCodeQualifier]); + else if (SenseKey == DAC960_SenseKey_UnitAttention && + AdditionalSenseCode == 0x29) + { + if (Controller->MonitoringTimerCount > 0) + Controller->V1.DeviceResetCount[EventLogEntry->Channel] + [EventLogEntry->TargetID]++; + } + else if (!(SenseKey == DAC960_SenseKey_NoSense || + (SenseKey == DAC960_SenseKey_NotReady && + AdditionalSenseCode == 0x04 && + (AdditionalSenseCodeQualifier == 0x01 || + AdditionalSenseCodeQualifier == 0x02)))) + { + DAC960_Critical("Physical Device %d:%d Error Log: " + "Sense Key = %X, ASC = %02X, ASCQ = %02X\n", + Controller, + EventLogEntry->Channel, + EventLogEntry->TargetID, + SenseKey, + AdditionalSenseCode, + AdditionalSenseCodeQualifier); + DAC960_Critical("Physical Device %d:%d Error Log: " + "Information = %02X%02X%02X%02X " + "%02X%02X%02X%02X\n", + Controller, + EventLogEntry->Channel, + EventLogEntry->TargetID, + EventLogEntry->Information[0], + EventLogEntry->Information[1], + EventLogEntry->Information[2], + EventLogEntry->Information[3], + EventLogEntry->CommandSpecificInformation[0], + EventLogEntry->CommandSpecificInformation[1], + EventLogEntry->CommandSpecificInformation[2], + EventLogEntry->CommandSpecificInformation[3]); + } + } + Controller->V1.OldEventLogSequenceNumber++; + } + else if (CommandOpcode == DAC960_V1_GetErrorTable) + { + DAC960_V1_ErrorTable_T *OldErrorTable = &Controller->V1.ErrorTable; + DAC960_V1_ErrorTable_T *NewErrorTable = Controller->V1.NewErrorTable; + int Channel, TargetID; + for (Channel = 0; Channel < Controller->Channels; Channel++) + for (TargetID = 0; TargetID < Controller->Targets; TargetID++) + { + DAC960_V1_ErrorTableEntry_T *NewErrorEntry = + &NewErrorTable->ErrorTableEntries[Channel][TargetID]; + DAC960_V1_ErrorTableEntry_T *OldErrorEntry = + &OldErrorTable->ErrorTableEntries[Channel][TargetID]; + if ((NewErrorEntry->ParityErrorCount != + OldErrorEntry->ParityErrorCount) || + (NewErrorEntry->SoftErrorCount != + OldErrorEntry->SoftErrorCount) || + (NewErrorEntry->HardErrorCount != + OldErrorEntry->HardErrorCount) || + (NewErrorEntry->MiscErrorCount != + OldErrorEntry->MiscErrorCount)) + DAC960_Critical("Physical Device %d:%d Errors: " + "Parity = %d, Soft = %d, " + "Hard = %d, Misc = %d\n", + Controller, Channel, TargetID, + NewErrorEntry->ParityErrorCount, + NewErrorEntry->SoftErrorCount, + NewErrorEntry->HardErrorCount, + NewErrorEntry->MiscErrorCount); + } + memcpy(&Controller->V1.ErrorTable, Controller->V1.NewErrorTable, + sizeof(DAC960_V1_ErrorTable_T)); + } + else if (CommandOpcode == DAC960_V1_GetDeviceState) + { + DAC960_V1_DeviceState_T *OldDeviceState = + &Controller->V1.DeviceState[Controller->V1.DeviceStateChannel] + [Controller->V1.DeviceStateTargetID]; + DAC960_V1_DeviceState_T *NewDeviceState = + Controller->V1.NewDeviceState; + if (NewDeviceState->DeviceState != OldDeviceState->DeviceState) + DAC960_Critical("Physical Device %d:%d is now %s\n", Controller, + Controller->V1.DeviceStateChannel, + Controller->V1.DeviceStateTargetID, + (NewDeviceState->DeviceState + == DAC960_V1_Device_Dead + ? "DEAD" + : NewDeviceState->DeviceState + == DAC960_V1_Device_WriteOnly + ? "WRITE-ONLY" + : NewDeviceState->DeviceState + == DAC960_V1_Device_Online + ? "ONLINE" : "STANDBY")); + if (OldDeviceState->DeviceState == DAC960_V1_Device_Dead && + NewDeviceState->DeviceState != DAC960_V1_Device_Dead) + { + Controller->V1.NeedDeviceInquiryInformation = true; + Controller->V1.NeedDeviceSerialNumberInformation = true; + Controller->V1.DeviceResetCount + [Controller->V1.DeviceStateChannel] + [Controller->V1.DeviceStateTargetID] = 0; + } + memcpy(OldDeviceState, NewDeviceState, + sizeof(DAC960_V1_DeviceState_T)); + } + else if (CommandOpcode == DAC960_V1_GetLogicalDriveInformation) + { + int LogicalDriveNumber; + for (LogicalDriveNumber = 0; + LogicalDriveNumber < Controller->LogicalDriveCount; + LogicalDriveNumber++) + { + DAC960_V1_LogicalDriveInformation_T *OldLogicalDriveInformation = + &Controller->V1.LogicalDriveInformation[LogicalDriveNumber]; + DAC960_V1_LogicalDriveInformation_T *NewLogicalDriveInformation = + &(*Controller->V1.NewLogicalDriveInformation)[LogicalDriveNumber]; + if (NewLogicalDriveInformation->LogicalDriveState != + OldLogicalDriveInformation->LogicalDriveState) + DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) " + "is now %s\n", Controller, + LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber, + (NewLogicalDriveInformation->LogicalDriveState + == DAC960_V1_LogicalDrive_Online + ? "ONLINE" + : NewLogicalDriveInformation->LogicalDriveState + == DAC960_V1_LogicalDrive_Critical + ? "CRITICAL" : "OFFLINE")); + if (NewLogicalDriveInformation->WriteBack != + OldLogicalDriveInformation->WriteBack) + DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) " + "is now %s\n", Controller, + LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber, + (NewLogicalDriveInformation->WriteBack + ? "WRITE BACK" : "WRITE THRU")); + } + memcpy(&Controller->V1.LogicalDriveInformation, + Controller->V1.NewLogicalDriveInformation, + sizeof(DAC960_V1_LogicalDriveInformationArray_T)); + } + else if (CommandOpcode == DAC960_V1_GetRebuildProgress) + { + unsigned int LogicalDriveNumber = + Controller->V1.RebuildProgress->LogicalDriveNumber; + unsigned int LogicalDriveSize = + Controller->V1.RebuildProgress->LogicalDriveSize; + unsigned int BlocksCompleted = + LogicalDriveSize - Controller->V1.RebuildProgress->RemainingBlocks; + if (CommandStatus == DAC960_V1_NoRebuildOrCheckInProgress && + Controller->V1.LastRebuildStatus == DAC960_V1_NormalCompletion) + CommandStatus = DAC960_V1_RebuildSuccessful; + switch (CommandStatus) + { + case DAC960_V1_NormalCompletion: + Controller->EphemeralProgressMessage = true; + DAC960_Progress("Rebuild in Progress: " + "Logical Drive %d (/dev/rd/c%dd%d) " + "%d%% completed\n", + Controller, LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber, + (100 * (BlocksCompleted >> 7)) + / (LogicalDriveSize >> 7)); + Controller->EphemeralProgressMessage = false; + break; + case DAC960_V1_RebuildFailed_LogicalDriveFailure: + DAC960_Progress("Rebuild Failed due to " + "Logical Drive Failure\n", Controller); + break; + case DAC960_V1_RebuildFailed_BadBlocksOnOther: + DAC960_Progress("Rebuild Failed due to " + "Bad Blocks on Other Drives\n", Controller); + break; + case DAC960_V1_RebuildFailed_NewDriveFailed: + DAC960_Progress("Rebuild Failed due to " + "Failure of Drive Being Rebuilt\n", Controller); + break; + case DAC960_V1_NoRebuildOrCheckInProgress: + break; + case DAC960_V1_RebuildSuccessful: + DAC960_Progress("Rebuild Completed Successfully\n", Controller); + break; + case DAC960_V1_RebuildSuccessfullyTerminated: + DAC960_Progress("Rebuild Successfully Terminated\n", Controller); + break; + } + Controller->V1.LastRebuildStatus = CommandStatus; + if (CommandType != DAC960_MonitoringCommand && + Controller->V1.RebuildStatusPending) + { + Command->V1.CommandStatus = Controller->V1.PendingRebuildStatus; + Controller->V1.RebuildStatusPending = false; + } + else if (CommandType == DAC960_MonitoringCommand && + CommandStatus != DAC960_V1_NormalCompletion && + CommandStatus != DAC960_V1_NoRebuildOrCheckInProgress) + { + Controller->V1.PendingRebuildStatus = CommandStatus; + Controller->V1.RebuildStatusPending = true; + } + } + else if (CommandOpcode == DAC960_V1_RebuildStat) + { + unsigned int LogicalDriveNumber = + Controller->V1.RebuildProgress->LogicalDriveNumber; + unsigned int LogicalDriveSize = + Controller->V1.RebuildProgress->LogicalDriveSize; + unsigned int BlocksCompleted = + LogicalDriveSize - Controller->V1.RebuildProgress->RemainingBlocks; + if (CommandStatus == DAC960_V1_NormalCompletion) + { + Controller->EphemeralProgressMessage = true; + DAC960_Progress("Consistency Check in Progress: " + "Logical Drive %d (/dev/rd/c%dd%d) " + "%d%% completed\n", + Controller, LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber, + (100 * (BlocksCompleted >> 7)) + / (LogicalDriveSize >> 7)); + Controller->EphemeralProgressMessage = false; + } + } + else if (CommandOpcode == DAC960_V1_BackgroundInitializationControl) + { + unsigned int LogicalDriveNumber = + Controller->V1.BackgroundInitializationStatus->LogicalDriveNumber; + unsigned int LogicalDriveSize = + Controller->V1.BackgroundInitializationStatus->LogicalDriveSize; + unsigned int BlocksCompleted = + Controller->V1.BackgroundInitializationStatus->BlocksCompleted; + switch (CommandStatus) + { + case DAC960_V1_NormalCompletion: + switch (Controller->V1.BackgroundInitializationStatus->Status) + { + case DAC960_V1_BackgroundInitializationInvalid: + break; + case DAC960_V1_BackgroundInitializationStarted: + DAC960_Progress("Background Initialization Started\n", + Controller); + break; + case DAC960_V1_BackgroundInitializationInProgress: + if (BlocksCompleted == + Controller->V1.LastBackgroundInitializationStatus. + BlocksCompleted && + LogicalDriveNumber == + Controller->V1.LastBackgroundInitializationStatus. + LogicalDriveNumber) + break; + Controller->EphemeralProgressMessage = true; + DAC960_Progress("Background Initialization in Progress: " + "Logical Drive %d (/dev/rd/c%dd%d) " + "%d%% completed\n", + Controller, LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber, + (100 * (BlocksCompleted >> 7)) + / (LogicalDriveSize >> 7)); + Controller->EphemeralProgressMessage = false; + break; + case DAC960_V1_BackgroundInitializationSuspended: + DAC960_Progress("Background Initialization Suspended\n", + Controller); + break; + case DAC960_V1_BackgroundInitializationCancelled: + DAC960_Progress("Background Initialization Cancelled\n", + Controller); + break; + } + memcpy(&Controller->V1.LastBackgroundInitializationStatus, + Controller->V1.BackgroundInitializationStatus, + sizeof(DAC960_V1_BackgroundInitializationStatus_T)); + break; + case DAC960_V1_BackgroundInitSuccessful: + if (Controller->V1.BackgroundInitializationStatus->Status == + DAC960_V1_BackgroundInitializationInProgress) + DAC960_Progress("Background Initialization " + "Completed Successfully\n", Controller); + Controller->V1.BackgroundInitializationStatus->Status = + DAC960_V1_BackgroundInitializationInvalid; + break; + case DAC960_V1_BackgroundInitAborted: + if (Controller->V1.BackgroundInitializationStatus->Status == + DAC960_V1_BackgroundInitializationInProgress) + DAC960_Progress("Background Initialization Aborted\n", + Controller); + Controller->V1.BackgroundInitializationStatus->Status = + DAC960_V1_BackgroundInitializationInvalid; + break; + case DAC960_V1_NoBackgroundInitInProgress: + break; + } + } + else if (CommandOpcode == DAC960_V1_DCDB) + { + /* + This is a bit ugly. + + The InquiryStandardData and + the InquiryUntitSerialNumber information + retrieval operations BOTH use the DAC960_V1_DCDB + commands. the test above can't distinguish between + these two cases. + + Instead, we rely on the order of code later in this + function to ensure that DeviceInquiryInformation commands + are submitted before DeviceSerialNumber commands. + */ + if (Controller->V1.NeedDeviceInquiryInformation) + { + DAC960_SCSI_Inquiry_T *InquiryStandardData = + &Controller->V1.InquiryStandardData + [Controller->V1.DeviceStateChannel] + [Controller->V1.DeviceStateTargetID]; + if (CommandStatus != DAC960_V1_NormalCompletion) + { + memset(InquiryStandardData, 0, + sizeof(DAC960_SCSI_Inquiry_T)); + InquiryStandardData->PeripheralDeviceType = 0x1F; + } + else + memcpy(InquiryStandardData, + Controller->V1.NewInquiryStandardData, + sizeof(DAC960_SCSI_Inquiry_T)); + Controller->V1.NeedDeviceInquiryInformation = false; + } + else if (Controller->V1.NeedDeviceSerialNumberInformation) + { + DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber = + &Controller->V1.InquiryUnitSerialNumber + [Controller->V1.DeviceStateChannel] + [Controller->V1.DeviceStateTargetID]; + if (CommandStatus != DAC960_V1_NormalCompletion) + { + memset(InquiryUnitSerialNumber, 0, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T)); + InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F; + } + else + memcpy(InquiryUnitSerialNumber, + Controller->V1.NewInquiryUnitSerialNumber, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T)); + Controller->V1.NeedDeviceSerialNumberInformation = false; + } + } + /* + Begin submitting new monitoring commands. + */ + if (Controller->V1.NewEventLogSequenceNumber + - Controller->V1.OldEventLogSequenceNumber > 0) + { + Command->V1.CommandMailbox.Type3E.CommandOpcode = + DAC960_V1_PerformEventLogOperation; + Command->V1.CommandMailbox.Type3E.OperationType = + DAC960_V1_GetEventLogEntry; + Command->V1.CommandMailbox.Type3E.OperationQualifier = 1; + Command->V1.CommandMailbox.Type3E.SequenceNumber = + Controller->V1.OldEventLogSequenceNumber; + Command->V1.CommandMailbox.Type3E.BusAddress = + Controller->V1.EventLogEntryDMA; + DAC960_QueueCommand(Command); + return; + } + if (Controller->V1.NeedErrorTableInformation) + { + Controller->V1.NeedErrorTableInformation = false; + Command->V1.CommandMailbox.Type3.CommandOpcode = + DAC960_V1_GetErrorTable; + Command->V1.CommandMailbox.Type3.BusAddress = + Controller->V1.NewErrorTableDMA; + DAC960_QueueCommand(Command); + return; + } + if (Controller->V1.NeedRebuildProgress && + Controller->V1.RebuildProgressFirst) + { + Controller->V1.NeedRebuildProgress = false; + Command->V1.CommandMailbox.Type3.CommandOpcode = + DAC960_V1_GetRebuildProgress; + Command->V1.CommandMailbox.Type3.BusAddress = + Controller->V1.RebuildProgressDMA; + DAC960_QueueCommand(Command); + return; + } + if (Controller->V1.NeedDeviceStateInformation) + { + if (Controller->V1.NeedDeviceInquiryInformation) + { + DAC960_V1_DCDB_T *DCDB = Controller->V1.MonitoringDCDB; + dma_addr_t DCDB_DMA = Controller->V1.MonitoringDCDB_DMA; + + dma_addr_t NewInquiryStandardDataDMA = + Controller->V1.NewInquiryStandardDataDMA; + + Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB; + Command->V1.CommandMailbox.Type3.BusAddress = DCDB_DMA; + DCDB->Channel = Controller->V1.DeviceStateChannel; + DCDB->TargetID = Controller->V1.DeviceStateTargetID; + DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem; + DCDB->EarlyStatus = false; + DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds; + DCDB->NoAutomaticRequestSense = false; + DCDB->DisconnectPermitted = true; + DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_T); + DCDB->BusAddress = NewInquiryStandardDataDMA; + DCDB->CDBLength = 6; + DCDB->TransferLengthHigh4 = 0; + DCDB->SenseLength = sizeof(DCDB->SenseData); + DCDB->CDB[0] = 0x12; /* INQUIRY */ + DCDB->CDB[1] = 0; /* EVPD = 0 */ + DCDB->CDB[2] = 0; /* Page Code */ + DCDB->CDB[3] = 0; /* Reserved */ + DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_T); + DCDB->CDB[5] = 0; /* Control */ + DAC960_QueueCommand(Command); + return; + } + if (Controller->V1.NeedDeviceSerialNumberInformation) + { + DAC960_V1_DCDB_T *DCDB = Controller->V1.MonitoringDCDB; + dma_addr_t DCDB_DMA = Controller->V1.MonitoringDCDB_DMA; + dma_addr_t NewInquiryUnitSerialNumberDMA = + Controller->V1.NewInquiryUnitSerialNumberDMA; + + Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB; + Command->V1.CommandMailbox.Type3.BusAddress = DCDB_DMA; + DCDB->Channel = Controller->V1.DeviceStateChannel; + DCDB->TargetID = Controller->V1.DeviceStateTargetID; + DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem; + DCDB->EarlyStatus = false; + DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds; + DCDB->NoAutomaticRequestSense = false; + DCDB->DisconnectPermitted = true; + DCDB->TransferLength = + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T); + DCDB->BusAddress = NewInquiryUnitSerialNumberDMA; + DCDB->CDBLength = 6; + DCDB->TransferLengthHigh4 = 0; + DCDB->SenseLength = sizeof(DCDB->SenseData); + DCDB->CDB[0] = 0x12; /* INQUIRY */ + DCDB->CDB[1] = 1; /* EVPD = 1 */ + DCDB->CDB[2] = 0x80; /* Page Code */ + DCDB->CDB[3] = 0; /* Reserved */ + DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T); + DCDB->CDB[5] = 0; /* Control */ + DAC960_QueueCommand(Command); + return; + } + if (Controller->V1.StartDeviceStateScan) + { + Controller->V1.DeviceStateChannel = 0; + Controller->V1.DeviceStateTargetID = 0; + Controller->V1.StartDeviceStateScan = false; + } + else if (++Controller->V1.DeviceStateTargetID == Controller->Targets) + { + Controller->V1.DeviceStateChannel++; + Controller->V1.DeviceStateTargetID = 0; + } + if (Controller->V1.DeviceStateChannel < Controller->Channels) + { + Controller->V1.NewDeviceState->DeviceState = + DAC960_V1_Device_Dead; + Command->V1.CommandMailbox.Type3D.CommandOpcode = + DAC960_V1_GetDeviceState; + Command->V1.CommandMailbox.Type3D.Channel = + Controller->V1.DeviceStateChannel; + Command->V1.CommandMailbox.Type3D.TargetID = + Controller->V1.DeviceStateTargetID; + Command->V1.CommandMailbox.Type3D.BusAddress = + Controller->V1.NewDeviceStateDMA; + DAC960_QueueCommand(Command); + return; + } + Controller->V1.NeedDeviceStateInformation = false; + } + if (Controller->V1.NeedLogicalDriveInformation) + { + Controller->V1.NeedLogicalDriveInformation = false; + Command->V1.CommandMailbox.Type3.CommandOpcode = + DAC960_V1_GetLogicalDriveInformation; + Command->V1.CommandMailbox.Type3.BusAddress = + Controller->V1.NewLogicalDriveInformationDMA; + DAC960_QueueCommand(Command); + return; + } + if (Controller->V1.NeedRebuildProgress) + { + Controller->V1.NeedRebuildProgress = false; + Command->V1.CommandMailbox.Type3.CommandOpcode = + DAC960_V1_GetRebuildProgress; + Command->V1.CommandMailbox.Type3.BusAddress = + Controller->V1.RebuildProgressDMA; + DAC960_QueueCommand(Command); + return; + } + if (Controller->V1.NeedConsistencyCheckProgress) + { + Controller->V1.NeedConsistencyCheckProgress = false; + Command->V1.CommandMailbox.Type3.CommandOpcode = + DAC960_V1_RebuildStat; + Command->V1.CommandMailbox.Type3.BusAddress = + Controller->V1.RebuildProgressDMA; + DAC960_QueueCommand(Command); + return; + } + if (Controller->V1.NeedBackgroundInitializationStatus) + { + Controller->V1.NeedBackgroundInitializationStatus = false; + Command->V1.CommandMailbox.Type3B.CommandOpcode = + DAC960_V1_BackgroundInitializationControl; + Command->V1.CommandMailbox.Type3B.CommandOpcode2 = 0x20; + Command->V1.CommandMailbox.Type3B.BusAddress = + Controller->V1.BackgroundInitializationStatusDMA; + DAC960_QueueCommand(Command); + return; + } + Controller->MonitoringTimerCount++; + Controller->MonitoringTimer.expires = + jiffies + DAC960_MonitoringTimerInterval; + add_timer(&Controller->MonitoringTimer); + } + if (CommandType == DAC960_ImmediateCommand) + { + complete(Command->Completion); + Command->Completion = NULL; + return; + } + if (CommandType == DAC960_QueuedCommand) + { + DAC960_V1_KernelCommand_T *KernelCommand = Command->V1.KernelCommand; + KernelCommand->CommandStatus = Command->V1.CommandStatus; + Command->V1.KernelCommand = NULL; + if (CommandOpcode == DAC960_V1_DCDB) + Controller->V1.DirectCommandActive[KernelCommand->DCDB->Channel] + [KernelCommand->DCDB->TargetID] = + false; + DAC960_DeallocateCommand(Command); + KernelCommand->CompletionFunction(KernelCommand); + return; + } + /* + Queue a Status Monitoring Command to the Controller using the just + completed Command if one was deferred previously due to lack of a + free Command when the Monitoring Timer Function was called. + */ + if (Controller->MonitoringCommandDeferred) + { + Controller->MonitoringCommandDeferred = false; + DAC960_V1_QueueMonitoringCommand(Command); + return; + } + /* + Deallocate the Command. + */ + DAC960_DeallocateCommand(Command); + /* + Wake up any processes waiting on a free Command. + */ + wake_up(&Controller->CommandWaitQueue); +} + + +/* + DAC960_V2_ReadWriteError prints an appropriate error message for Command + when an error occurs on a Read or Write operation. +*/ + +static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + unsigned char *SenseErrors[] = { "NO SENSE", "RECOVERED ERROR", + "NOT READY", "MEDIUM ERROR", + "HARDWARE ERROR", "ILLEGAL REQUEST", + "UNIT ATTENTION", "DATA PROTECT", + "BLANK CHECK", "VENDOR-SPECIFIC", + "COPY ABORTED", "ABORTED COMMAND", + "EQUAL", "VOLUME OVERFLOW", + "MISCOMPARE", "RESERVED" }; + unsigned char *CommandName = "UNKNOWN"; + switch (Command->CommandType) + { + case DAC960_ReadCommand: + case DAC960_ReadRetryCommand: + CommandName = "READ"; + break; + case DAC960_WriteCommand: + case DAC960_WriteRetryCommand: + CommandName = "WRITE"; + break; + case DAC960_MonitoringCommand: + case DAC960_ImmediateCommand: + case DAC960_QueuedCommand: + break; + } + DAC960_Error("Error Condition %s on %s:\n", Controller, + SenseErrors[Command->V2.RequestSense->SenseKey], CommandName); + DAC960_Error(" /dev/rd/c%dd%d: absolute blocks %u..%u\n", + Controller, Controller->ControllerNumber, + Command->LogicalDriveNumber, Command->BlockNumber, + Command->BlockNumber + Command->BlockCount - 1); +} + + +/* + DAC960_V2_ReportEvent prints an appropriate message when a Controller Event + occurs. +*/ + +static void DAC960_V2_ReportEvent(DAC960_Controller_T *Controller, + DAC960_V2_Event_T *Event) +{ + DAC960_SCSI_RequestSense_T *RequestSense = + (DAC960_SCSI_RequestSense_T *) &Event->RequestSenseData; + unsigned char MessageBuffer[DAC960_LineBufferSize]; + static struct { int EventCode; unsigned char *EventMessage; } EventList[] = + { /* Physical Device Events (0x0000 - 0x007F) */ + { 0x0001, "P Online" }, + { 0x0002, "P Standby" }, + { 0x0005, "P Automatic Rebuild Started" }, + { 0x0006, "P Manual Rebuild Started" }, + { 0x0007, "P Rebuild Completed" }, + { 0x0008, "P Rebuild Cancelled" }, + { 0x0009, "P Rebuild Failed for Unknown Reasons" }, + { 0x000A, "P Rebuild Failed due to New Physical Device" }, + { 0x000B, "P Rebuild Failed due to Logical Drive Failure" }, + { 0x000C, "S Offline" }, + { 0x000D, "P Found" }, + { 0x000E, "P Removed" }, + { 0x000F, "P Unconfigured" }, + { 0x0010, "P Expand Capacity Started" }, + { 0x0011, "P Expand Capacity Completed" }, + { 0x0012, "P Expand Capacity Failed" }, + { 0x0013, "P Command Timed Out" }, + { 0x0014, "P Command Aborted" }, + { 0x0015, "P Command Retried" }, + { 0x0016, "P Parity Error" }, + { 0x0017, "P Soft Error" }, + { 0x0018, "P Miscellaneous Error" }, + { 0x0019, "P Reset" }, + { 0x001A, "P Active Spare Found" }, + { 0x001B, "P Warm Spare Found" }, + { 0x001C, "S Sense Data Received" }, + { 0x001D, "P Initialization Started" }, + { 0x001E, "P Initialization Completed" }, + { 0x001F, "P Initialization Failed" }, + { 0x0020, "P Initialization Cancelled" }, + { 0x0021, "P Failed because Write Recovery Failed" }, + { 0x0022, "P Failed because SCSI Bus Reset Failed" }, + { 0x0023, "P Failed because of Double Check Condition" }, + { 0x0024, "P Failed because Device Cannot Be Accessed" }, + { 0x0025, "P Failed because of Gross Error on SCSI Processor" }, + { 0x0026, "P Failed because of Bad Tag from Device" }, + { 0x0027, "P Failed because of Command Timeout" }, + { 0x0028, "P Failed because of System Reset" }, + { 0x0029, "P Failed because of Busy Status or Parity Error" }, + { 0x002A, "P Failed because Host Set Device to Failed State" }, + { 0x002B, "P Failed because of Selection Timeout" }, + { 0x002C, "P Failed because of SCSI Bus Phase Error" }, + { 0x002D, "P Failed because Device Returned Unknown Status" }, + { 0x002E, "P Failed because Device Not Ready" }, + { 0x002F, "P Failed because Device Not Found at Startup" }, + { 0x0030, "P Failed because COD Write Operation Failed" }, + { 0x0031, "P Failed because BDT Write Operation Failed" }, + { 0x0039, "P Missing at Startup" }, + { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" }, + { 0x003C, "P Temporarily Offline Device Automatically Made Online" }, + { 0x003D, "P Standby Rebuild Started" }, + /* Logical Device Events (0x0080 - 0x00FF) */ + { 0x0080, "M Consistency Check Started" }, + { 0x0081, "M Consistency Check Completed" }, + { 0x0082, "M Consistency Check Cancelled" }, + { 0x0083, "M Consistency Check Completed With Errors" }, + { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" }, + { 0x0085, "M Consistency Check Failed due to Physical Device Failure" }, + { 0x0086, "L Offline" }, + { 0x0087, "L Critical" }, + { 0x0088, "L Online" }, + { 0x0089, "M Automatic Rebuild Started" }, + { 0x008A, "M Manual Rebuild Started" }, + { 0x008B, "M Rebuild Completed" }, + { 0x008C, "M Rebuild Cancelled" }, + { 0x008D, "M Rebuild Failed for Unknown Reasons" }, + { 0x008E, "M Rebuild Failed due to New Physical Device" }, + { 0x008F, "M Rebuild Failed due to Logical Drive Failure" }, + { 0x0090, "M Initialization Started" }, + { 0x0091, "M Initialization Completed" }, + { 0x0092, "M Initialization Cancelled" }, + { 0x0093, "M Initialization Failed" }, + { 0x0094, "L Found" }, + { 0x0095, "L Deleted" }, + { 0x0096, "M Expand Capacity Started" }, + { 0x0097, "M Expand Capacity Completed" }, + { 0x0098, "M Expand Capacity Failed" }, + { 0x0099, "L Bad Block Found" }, + { 0x009A, "L Size Changed" }, + { 0x009B, "L Type Changed" }, + { 0x009C, "L Bad Data Block Found" }, + { 0x009E, "L Read of Data Block in BDT" }, + { 0x009F, "L Write Back Data for Disk Block Lost" }, + { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" }, + { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" }, + { 0x00A2, "L Standby Rebuild Started" }, + /* Fault Management Events (0x0100 - 0x017F) */ + { 0x0140, "E Fan %d Failed" }, + { 0x0141, "E Fan %d OK" }, + { 0x0142, "E Fan %d Not Present" }, + { 0x0143, "E Power Supply %d Failed" }, + { 0x0144, "E Power Supply %d OK" }, + { 0x0145, "E Power Supply %d Not Present" }, + { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" }, + { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" }, + { 0x0148, "E Temperature Sensor %d Temperature Normal" }, + { 0x0149, "E Temperature Sensor %d Not Present" }, + { 0x014A, "E Enclosure Management Unit %d Access Critical" }, + { 0x014B, "E Enclosure Management Unit %d Access OK" }, + { 0x014C, "E Enclosure Management Unit %d Access Offline" }, + /* Controller Events (0x0180 - 0x01FF) */ + { 0x0181, "C Cache Write Back Error" }, + { 0x0188, "C Battery Backup Unit Found" }, + { 0x0189, "C Battery Backup Unit Charge Level Low" }, + { 0x018A, "C Battery Backup Unit Charge Level OK" }, + { 0x0193, "C Installation Aborted" }, + { 0x0195, "C Battery Backup Unit Physically Removed" }, + { 0x0196, "C Memory Error During Warm Boot" }, + { 0x019E, "C Memory Soft ECC Error Corrected" }, + { 0x019F, "C Memory Hard ECC Error Corrected" }, + { 0x01A2, "C Battery Backup Unit Failed" }, + { 0x01AB, "C Mirror Race Recovery Failed" }, + { 0x01AC, "C Mirror Race on Critical Drive" }, + /* Controller Internal Processor Events */ + { 0x0380, "C Internal Controller Hung" }, + { 0x0381, "C Internal Controller Firmware Breakpoint" }, + { 0x0390, "C Internal Controller i960 Processor Specific Error" }, + { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" }, + { 0, "" } }; + int EventListIndex = 0, EventCode; + unsigned char EventType, *EventMessage; + if (Event->EventCode == 0x1C && + RequestSense->SenseKey == DAC960_SenseKey_VendorSpecific && + (RequestSense->AdditionalSenseCode == 0x80 || + RequestSense->AdditionalSenseCode == 0x81)) + Event->EventCode = ((RequestSense->AdditionalSenseCode - 0x80) << 8) | + RequestSense->AdditionalSenseCodeQualifier; + while (true) + { + EventCode = EventList[EventListIndex].EventCode; + if (EventCode == Event->EventCode || EventCode == 0) break; + EventListIndex++; + } + EventType = EventList[EventListIndex].EventMessage[0]; + EventMessage = &EventList[EventListIndex].EventMessage[2]; + if (EventCode == 0) + { + DAC960_Critical("Unknown Controller Event Code %04X\n", + Controller, Event->EventCode); + return; + } + switch (EventType) + { + case 'P': + DAC960_Critical("Physical Device %d:%d %s\n", Controller, + Event->Channel, Event->TargetID, EventMessage); + break; + case 'L': + DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) %s\n", Controller, + Event->LogicalUnit, Controller->ControllerNumber, + Event->LogicalUnit, EventMessage); + break; + case 'M': + DAC960_Progress("Logical Drive %d (/dev/rd/c%dd%d) %s\n", Controller, + Event->LogicalUnit, Controller->ControllerNumber, + Event->LogicalUnit, EventMessage); + break; + case 'S': + if (RequestSense->SenseKey == DAC960_SenseKey_NoSense || + (RequestSense->SenseKey == DAC960_SenseKey_NotReady && + RequestSense->AdditionalSenseCode == 0x04 && + (RequestSense->AdditionalSenseCodeQualifier == 0x01 || + RequestSense->AdditionalSenseCodeQualifier == 0x02))) + break; + DAC960_Critical("Physical Device %d:%d %s\n", Controller, + Event->Channel, Event->TargetID, EventMessage); + DAC960_Critical("Physical Device %d:%d Request Sense: " + "Sense Key = %X, ASC = %02X, ASCQ = %02X\n", + Controller, + Event->Channel, + Event->TargetID, + RequestSense->SenseKey, + RequestSense->AdditionalSenseCode, + RequestSense->AdditionalSenseCodeQualifier); + DAC960_Critical("Physical Device %d:%d Request Sense: " + "Information = %02X%02X%02X%02X " + "%02X%02X%02X%02X\n", + Controller, + Event->Channel, + Event->TargetID, + RequestSense->Information[0], + RequestSense->Information[1], + RequestSense->Information[2], + RequestSense->Information[3], + RequestSense->CommandSpecificInformation[0], + RequestSense->CommandSpecificInformation[1], + RequestSense->CommandSpecificInformation[2], + RequestSense->CommandSpecificInformation[3]); + break; + case 'E': + if (Controller->SuppressEnclosureMessages) break; + sprintf(MessageBuffer, EventMessage, Event->LogicalUnit); + DAC960_Critical("Enclosure %d %s\n", Controller, + Event->TargetID, MessageBuffer); + break; + case 'C': + DAC960_Critical("Controller %s\n", Controller, EventMessage); + break; + default: + DAC960_Critical("Unknown Controller Event Code %04X\n", + Controller, Event->EventCode); + break; + } +} + + +/* + DAC960_V2_ReportProgress prints an appropriate progress message for + Logical Device Long Operations. +*/ + +static void DAC960_V2_ReportProgress(DAC960_Controller_T *Controller, + unsigned char *MessageString, + unsigned int LogicalDeviceNumber, + unsigned long BlocksCompleted, + unsigned long LogicalDeviceSize) +{ + Controller->EphemeralProgressMessage = true; + DAC960_Progress("%s in Progress: Logical Drive %d (/dev/rd/c%dd%d) " + "%d%% completed\n", Controller, + MessageString, + LogicalDeviceNumber, + Controller->ControllerNumber, + LogicalDeviceNumber, + (100 * (BlocksCompleted >> 7)) / (LogicalDeviceSize >> 7)); + Controller->EphemeralProgressMessage = false; +} + + +/* + DAC960_V2_ProcessCompletedCommand performs completion processing for Command + for DAC960 V2 Firmware Controllers. +*/ + +static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + DAC960_CommandType_T CommandType = Command->CommandType; + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_IOCTL_Opcode_T CommandOpcode = CommandMailbox->Common.IOCTL_Opcode; + DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus; + + if (CommandType == DAC960_ReadCommand || + CommandType == DAC960_WriteCommand) + { + +#ifdef FORCE_RETRY_DEBUG + CommandStatus = DAC960_V2_AbormalCompletion; +#endif + Command->V2.RequestSense->SenseKey = DAC960_SenseKey_MediumError; + + if (CommandStatus == DAC960_V2_NormalCompletion) { + + if (!DAC960_ProcessCompletedRequest(Command, true)) + BUG(); + + } else if (Command->V2.RequestSense->SenseKey == DAC960_SenseKey_MediumError) + { + /* + * break the command down into pieces and resubmit each + * piece, hoping that some of them will succeed. + */ + DAC960_queue_partial_rw(Command); + return; + } + else + { + if (Command->V2.RequestSense->SenseKey != DAC960_SenseKey_NotReady) + DAC960_V2_ReadWriteError(Command); + /* + Perform completion processing for all buffers in this I/O Request. + */ + (void)DAC960_ProcessCompletedRequest(Command, false); + } + } + else if (CommandType == DAC960_ReadRetryCommand || + CommandType == DAC960_WriteRetryCommand) + { + bool normal_completion; + +#ifdef FORCE_RETRY_FAILURE_DEBUG + static int retry_count = 1; +#endif + /* + Perform completion processing for the portion that was + retried, and submit the next portion, if any. + */ + normal_completion = true; + if (CommandStatus != DAC960_V2_NormalCompletion) { + normal_completion = false; + if (Command->V2.RequestSense->SenseKey != DAC960_SenseKey_NotReady) + DAC960_V2_ReadWriteError(Command); + } + +#ifdef FORCE_RETRY_FAILURE_DEBUG + if (!(++retry_count % 10000)) { + printk("V2 error retry failure test\n"); + normal_completion = false; + DAC960_V2_ReadWriteError(Command); + } +#endif + + if (!DAC960_ProcessCompletedRequest(Command, normal_completion)) { + DAC960_queue_partial_rw(Command); + return; + } + } + else if (CommandType == DAC960_MonitoringCommand) + { + if (Controller->ShutdownMonitoringTimer) + return; + if (CommandOpcode == DAC960_V2_GetControllerInfo) + { + DAC960_V2_ControllerInfo_T *NewControllerInfo = + Controller->V2.NewControllerInformation; + DAC960_V2_ControllerInfo_T *ControllerInfo = + &Controller->V2.ControllerInformation; + Controller->LogicalDriveCount = + NewControllerInfo->LogicalDevicesPresent; + Controller->V2.NeedLogicalDeviceInformation = true; + Controller->V2.NeedPhysicalDeviceInformation = true; + Controller->V2.StartLogicalDeviceInformationScan = true; + Controller->V2.StartPhysicalDeviceInformationScan = true; + Controller->MonitoringAlertMode = + (NewControllerInfo->LogicalDevicesCritical > 0 || + NewControllerInfo->LogicalDevicesOffline > 0 || + NewControllerInfo->PhysicalDisksCritical > 0 || + NewControllerInfo->PhysicalDisksOffline > 0); + memcpy(ControllerInfo, NewControllerInfo, + sizeof(DAC960_V2_ControllerInfo_T)); + } + else if (CommandOpcode == DAC960_V2_GetEvent) + { + if (CommandStatus == DAC960_V2_NormalCompletion) { + DAC960_V2_ReportEvent(Controller, Controller->V2.Event); + } + Controller->V2.NextEventSequenceNumber++; + } + else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid && + CommandStatus == DAC960_V2_NormalCompletion) + { + DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo = + Controller->V2.NewPhysicalDeviceInformation; + unsigned int PhysicalDeviceIndex = Controller->V2.PhysicalDeviceIndex; + DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo = + Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex]; + DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber = + Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex]; + unsigned int DeviceIndex; + while (PhysicalDeviceInfo != NULL && + (NewPhysicalDeviceInfo->Channel > + PhysicalDeviceInfo->Channel || + (NewPhysicalDeviceInfo->Channel == + PhysicalDeviceInfo->Channel && + (NewPhysicalDeviceInfo->TargetID > + PhysicalDeviceInfo->TargetID || + (NewPhysicalDeviceInfo->TargetID == + PhysicalDeviceInfo->TargetID && + NewPhysicalDeviceInfo->LogicalUnit > + PhysicalDeviceInfo->LogicalUnit))))) + { + DAC960_Critical("Physical Device %d:%d No Longer Exists\n", + Controller, + PhysicalDeviceInfo->Channel, + PhysicalDeviceInfo->TargetID); + Controller->V2.PhysicalDeviceInformation + [PhysicalDeviceIndex] = NULL; + Controller->V2.InquiryUnitSerialNumber + [PhysicalDeviceIndex] = NULL; + kfree(PhysicalDeviceInfo); + kfree(InquiryUnitSerialNumber); + for (DeviceIndex = PhysicalDeviceIndex; + DeviceIndex < DAC960_V2_MaxPhysicalDevices - 1; + DeviceIndex++) + { + Controller->V2.PhysicalDeviceInformation[DeviceIndex] = + Controller->V2.PhysicalDeviceInformation[DeviceIndex+1]; + Controller->V2.InquiryUnitSerialNumber[DeviceIndex] = + Controller->V2.InquiryUnitSerialNumber[DeviceIndex+1]; + } + Controller->V2.PhysicalDeviceInformation + [DAC960_V2_MaxPhysicalDevices-1] = NULL; + Controller->V2.InquiryUnitSerialNumber + [DAC960_V2_MaxPhysicalDevices-1] = NULL; + PhysicalDeviceInfo = + Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex]; + InquiryUnitSerialNumber = + Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex]; + } + if (PhysicalDeviceInfo == NULL || + (NewPhysicalDeviceInfo->Channel != + PhysicalDeviceInfo->Channel) || + (NewPhysicalDeviceInfo->TargetID != + PhysicalDeviceInfo->TargetID) || + (NewPhysicalDeviceInfo->LogicalUnit != + PhysicalDeviceInfo->LogicalUnit)) + { + PhysicalDeviceInfo = + kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T), GFP_ATOMIC); + InquiryUnitSerialNumber = + kmalloc(sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), + GFP_ATOMIC); + if (InquiryUnitSerialNumber == NULL || + PhysicalDeviceInfo == NULL) + { + kfree(InquiryUnitSerialNumber); + InquiryUnitSerialNumber = NULL; + kfree(PhysicalDeviceInfo); + PhysicalDeviceInfo = NULL; + } + DAC960_Critical("Physical Device %d:%d Now Exists%s\n", + Controller, + NewPhysicalDeviceInfo->Channel, + NewPhysicalDeviceInfo->TargetID, + (PhysicalDeviceInfo != NULL + ? "" : " - Allocation Failed")); + if (PhysicalDeviceInfo != NULL) + { + memset(PhysicalDeviceInfo, 0, + sizeof(DAC960_V2_PhysicalDeviceInfo_T)); + PhysicalDeviceInfo->PhysicalDeviceState = + DAC960_V2_Device_InvalidState; + memset(InquiryUnitSerialNumber, 0, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T)); + InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F; + for (DeviceIndex = DAC960_V2_MaxPhysicalDevices - 1; + DeviceIndex > PhysicalDeviceIndex; + DeviceIndex--) + { + Controller->V2.PhysicalDeviceInformation[DeviceIndex] = + Controller->V2.PhysicalDeviceInformation[DeviceIndex-1]; + Controller->V2.InquiryUnitSerialNumber[DeviceIndex] = + Controller->V2.InquiryUnitSerialNumber[DeviceIndex-1]; + } + Controller->V2.PhysicalDeviceInformation + [PhysicalDeviceIndex] = + PhysicalDeviceInfo; + Controller->V2.InquiryUnitSerialNumber + [PhysicalDeviceIndex] = + InquiryUnitSerialNumber; + Controller->V2.NeedDeviceSerialNumberInformation = true; + } + } + if (PhysicalDeviceInfo != NULL) + { + if (NewPhysicalDeviceInfo->PhysicalDeviceState != + PhysicalDeviceInfo->PhysicalDeviceState) + DAC960_Critical( + "Physical Device %d:%d is now %s\n", Controller, + NewPhysicalDeviceInfo->Channel, + NewPhysicalDeviceInfo->TargetID, + (NewPhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Online + ? "ONLINE" + : NewPhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Rebuild + ? "REBUILD" + : NewPhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Missing + ? "MISSING" + : NewPhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Critical + ? "CRITICAL" + : NewPhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Dead + ? "DEAD" + : NewPhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_SuspectedDead + ? "SUSPECTED-DEAD" + : NewPhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_CommandedOffline + ? "COMMANDED-OFFLINE" + : NewPhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Standby + ? "STANDBY" : "UNKNOWN")); + if ((NewPhysicalDeviceInfo->ParityErrors != + PhysicalDeviceInfo->ParityErrors) || + (NewPhysicalDeviceInfo->SoftErrors != + PhysicalDeviceInfo->SoftErrors) || + (NewPhysicalDeviceInfo->HardErrors != + PhysicalDeviceInfo->HardErrors) || + (NewPhysicalDeviceInfo->MiscellaneousErrors != + PhysicalDeviceInfo->MiscellaneousErrors) || + (NewPhysicalDeviceInfo->CommandTimeouts != + PhysicalDeviceInfo->CommandTimeouts) || + (NewPhysicalDeviceInfo->Retries != + PhysicalDeviceInfo->Retries) || + (NewPhysicalDeviceInfo->Aborts != + PhysicalDeviceInfo->Aborts) || + (NewPhysicalDeviceInfo->PredictedFailuresDetected != + PhysicalDeviceInfo->PredictedFailuresDetected)) + { + DAC960_Critical("Physical Device %d:%d Errors: " + "Parity = %d, Soft = %d, " + "Hard = %d, Misc = %d\n", + Controller, + NewPhysicalDeviceInfo->Channel, + NewPhysicalDeviceInfo->TargetID, + NewPhysicalDeviceInfo->ParityErrors, + NewPhysicalDeviceInfo->SoftErrors, + NewPhysicalDeviceInfo->HardErrors, + NewPhysicalDeviceInfo->MiscellaneousErrors); + DAC960_Critical("Physical Device %d:%d Errors: " + "Timeouts = %d, Retries = %d, " + "Aborts = %d, Predicted = %d\n", + Controller, + NewPhysicalDeviceInfo->Channel, + NewPhysicalDeviceInfo->TargetID, + NewPhysicalDeviceInfo->CommandTimeouts, + NewPhysicalDeviceInfo->Retries, + NewPhysicalDeviceInfo->Aborts, + NewPhysicalDeviceInfo + ->PredictedFailuresDetected); + } + if ((PhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_Dead || + PhysicalDeviceInfo->PhysicalDeviceState + == DAC960_V2_Device_InvalidState) && + NewPhysicalDeviceInfo->PhysicalDeviceState + != DAC960_V2_Device_Dead) + Controller->V2.NeedDeviceSerialNumberInformation = true; + memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo, + sizeof(DAC960_V2_PhysicalDeviceInfo_T)); + } + NewPhysicalDeviceInfo->LogicalUnit++; + Controller->V2.PhysicalDeviceIndex++; + } + else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid) + { + unsigned int DeviceIndex; + for (DeviceIndex = Controller->V2.PhysicalDeviceIndex; + DeviceIndex < DAC960_V2_MaxPhysicalDevices; + DeviceIndex++) + { + DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo = + Controller->V2.PhysicalDeviceInformation[DeviceIndex]; + DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber = + Controller->V2.InquiryUnitSerialNumber[DeviceIndex]; + if (PhysicalDeviceInfo == NULL) break; + DAC960_Critical("Physical Device %d:%d No Longer Exists\n", + Controller, + PhysicalDeviceInfo->Channel, + PhysicalDeviceInfo->TargetID); + Controller->V2.PhysicalDeviceInformation[DeviceIndex] = NULL; + Controller->V2.InquiryUnitSerialNumber[DeviceIndex] = NULL; + kfree(PhysicalDeviceInfo); + kfree(InquiryUnitSerialNumber); + } + Controller->V2.NeedPhysicalDeviceInformation = false; + } + else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid && + CommandStatus == DAC960_V2_NormalCompletion) + { + DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo = + Controller->V2.NewLogicalDeviceInformation; + unsigned short LogicalDeviceNumber = + NewLogicalDeviceInfo->LogicalDeviceNumber; + DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo = + Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber]; + if (LogicalDeviceInfo == NULL) + { + DAC960_V2_PhysicalDevice_T PhysicalDevice; + PhysicalDevice.Controller = 0; + PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel; + PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID; + PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit; + Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] = + PhysicalDevice; + LogicalDeviceInfo = kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T), + GFP_ATOMIC); + Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] = + LogicalDeviceInfo; + DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) " + "Now Exists%s\n", Controller, + LogicalDeviceNumber, + Controller->ControllerNumber, + LogicalDeviceNumber, + (LogicalDeviceInfo != NULL + ? "" : " - Allocation Failed")); + if (LogicalDeviceInfo != NULL) + { + memset(LogicalDeviceInfo, 0, + sizeof(DAC960_V2_LogicalDeviceInfo_T)); + DAC960_ComputeGenericDiskInfo(Controller); + } + } + if (LogicalDeviceInfo != NULL) + { + unsigned long LogicalDeviceSize = + NewLogicalDeviceInfo->ConfigurableDeviceSize; + if (NewLogicalDeviceInfo->LogicalDeviceState != + LogicalDeviceInfo->LogicalDeviceState) + DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) " + "is now %s\n", Controller, + LogicalDeviceNumber, + Controller->ControllerNumber, + LogicalDeviceNumber, + (NewLogicalDeviceInfo->LogicalDeviceState + == DAC960_V2_LogicalDevice_Online + ? "ONLINE" + : NewLogicalDeviceInfo->LogicalDeviceState + == DAC960_V2_LogicalDevice_Critical + ? "CRITICAL" : "OFFLINE")); + if ((NewLogicalDeviceInfo->SoftErrors != + LogicalDeviceInfo->SoftErrors) || + (NewLogicalDeviceInfo->CommandsFailed != + LogicalDeviceInfo->CommandsFailed) || + (NewLogicalDeviceInfo->DeferredWriteErrors != + LogicalDeviceInfo->DeferredWriteErrors)) + DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) Errors: " + "Soft = %d, Failed = %d, Deferred Write = %d\n", + Controller, LogicalDeviceNumber, + Controller->ControllerNumber, + LogicalDeviceNumber, + NewLogicalDeviceInfo->SoftErrors, + NewLogicalDeviceInfo->CommandsFailed, + NewLogicalDeviceInfo->DeferredWriteErrors); + if (NewLogicalDeviceInfo->ConsistencyCheckInProgress) + DAC960_V2_ReportProgress(Controller, + "Consistency Check", + LogicalDeviceNumber, + NewLogicalDeviceInfo + ->ConsistencyCheckBlockNumber, + LogicalDeviceSize); + else if (NewLogicalDeviceInfo->RebuildInProgress) + DAC960_V2_ReportProgress(Controller, + "Rebuild", + LogicalDeviceNumber, + NewLogicalDeviceInfo + ->RebuildBlockNumber, + LogicalDeviceSize); + else if (NewLogicalDeviceInfo->BackgroundInitializationInProgress) + DAC960_V2_ReportProgress(Controller, + "Background Initialization", + LogicalDeviceNumber, + NewLogicalDeviceInfo + ->BackgroundInitializationBlockNumber, + LogicalDeviceSize); + else if (NewLogicalDeviceInfo->ForegroundInitializationInProgress) + DAC960_V2_ReportProgress(Controller, + "Foreground Initialization", + LogicalDeviceNumber, + NewLogicalDeviceInfo + ->ForegroundInitializationBlockNumber, + LogicalDeviceSize); + else if (NewLogicalDeviceInfo->DataMigrationInProgress) + DAC960_V2_ReportProgress(Controller, + "Data Migration", + LogicalDeviceNumber, + NewLogicalDeviceInfo + ->DataMigrationBlockNumber, + LogicalDeviceSize); + else if (NewLogicalDeviceInfo->PatrolOperationInProgress) + DAC960_V2_ReportProgress(Controller, + "Patrol Operation", + LogicalDeviceNumber, + NewLogicalDeviceInfo + ->PatrolOperationBlockNumber, + LogicalDeviceSize); + if (LogicalDeviceInfo->BackgroundInitializationInProgress && + !NewLogicalDeviceInfo->BackgroundInitializationInProgress) + DAC960_Progress("Logical Drive %d (/dev/rd/c%dd%d) " + "Background Initialization %s\n", + Controller, + LogicalDeviceNumber, + Controller->ControllerNumber, + LogicalDeviceNumber, + (NewLogicalDeviceInfo->LogicalDeviceControl + .LogicalDeviceInitialized + ? "Completed" : "Failed")); + memcpy(LogicalDeviceInfo, NewLogicalDeviceInfo, + sizeof(DAC960_V2_LogicalDeviceInfo_T)); + } + Controller->V2.LogicalDriveFoundDuringScan + [LogicalDeviceNumber] = true; + NewLogicalDeviceInfo->LogicalDeviceNumber++; + } + else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid) + { + int LogicalDriveNumber; + for (LogicalDriveNumber = 0; + LogicalDriveNumber < DAC960_MaxLogicalDrives; + LogicalDriveNumber++) + { + DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo = + Controller->V2.LogicalDeviceInformation[LogicalDriveNumber]; + if (LogicalDeviceInfo == NULL || + Controller->V2.LogicalDriveFoundDuringScan + [LogicalDriveNumber]) + continue; + DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) " + "No Longer Exists\n", Controller, + LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber); + Controller->V2.LogicalDeviceInformation + [LogicalDriveNumber] = NULL; + kfree(LogicalDeviceInfo); + Controller->LogicalDriveInitiallyAccessible + [LogicalDriveNumber] = false; + DAC960_ComputeGenericDiskInfo(Controller); + } + Controller->V2.NeedLogicalDeviceInformation = false; + } + else if (CommandOpcode == DAC960_V2_SCSI_10_Passthru) + { + DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber = + Controller->V2.InquiryUnitSerialNumber[Controller->V2.PhysicalDeviceIndex - 1]; + + if (CommandStatus != DAC960_V2_NormalCompletion) { + memset(InquiryUnitSerialNumber, + 0, sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T)); + InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F; + } else + memcpy(InquiryUnitSerialNumber, + Controller->V2.NewInquiryUnitSerialNumber, + sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T)); + + Controller->V2.NeedDeviceSerialNumberInformation = false; + } + + if (Controller->V2.HealthStatusBuffer->NextEventSequenceNumber + - Controller->V2.NextEventSequenceNumber > 0) + { + CommandMailbox->GetEvent.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->GetEvent.DataTransferSize = sizeof(DAC960_V2_Event_T); + CommandMailbox->GetEvent.EventSequenceNumberHigh16 = + Controller->V2.NextEventSequenceNumber >> 16; + CommandMailbox->GetEvent.ControllerNumber = 0; + CommandMailbox->GetEvent.IOCTL_Opcode = + DAC960_V2_GetEvent; + CommandMailbox->GetEvent.EventSequenceNumberLow16 = + Controller->V2.NextEventSequenceNumber & 0xFFFF; + CommandMailbox->GetEvent.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.EventDMA; + CommandMailbox->GetEvent.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->GetEvent.DataTransferSize; + DAC960_QueueCommand(Command); + return; + } + if (Controller->V2.NeedPhysicalDeviceInformation) + { + if (Controller->V2.NeedDeviceSerialNumberInformation) + { + DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber = + Controller->V2.NewInquiryUnitSerialNumber; + InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F; + + DAC960_V2_ConstructNewUnitSerialNumber(Controller, CommandMailbox, + Controller->V2.NewPhysicalDeviceInformation->Channel, + Controller->V2.NewPhysicalDeviceInformation->TargetID, + Controller->V2.NewPhysicalDeviceInformation->LogicalUnit - 1); + + + DAC960_QueueCommand(Command); + return; + } + if (Controller->V2.StartPhysicalDeviceInformationScan) + { + Controller->V2.PhysicalDeviceIndex = 0; + Controller->V2.NewPhysicalDeviceInformation->Channel = 0; + Controller->V2.NewPhysicalDeviceInformation->TargetID = 0; + Controller->V2.NewPhysicalDeviceInformation->LogicalUnit = 0; + Controller->V2.StartPhysicalDeviceInformationScan = false; + } + CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->PhysicalDeviceInfo.DataTransferSize = + sizeof(DAC960_V2_PhysicalDeviceInfo_T); + CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit = + Controller->V2.NewPhysicalDeviceInformation->LogicalUnit; + CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = + Controller->V2.NewPhysicalDeviceInformation->TargetID; + CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = + Controller->V2.NewPhysicalDeviceInformation->Channel; + CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode = + DAC960_V2_GetPhysicalDeviceInfoValid; + CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.NewPhysicalDeviceInformationDMA; + CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->PhysicalDeviceInfo.DataTransferSize; + DAC960_QueueCommand(Command); + return; + } + if (Controller->V2.NeedLogicalDeviceInformation) + { + if (Controller->V2.StartLogicalDeviceInformationScan) + { + int LogicalDriveNumber; + for (LogicalDriveNumber = 0; + LogicalDriveNumber < DAC960_MaxLogicalDrives; + LogicalDriveNumber++) + Controller->V2.LogicalDriveFoundDuringScan + [LogicalDriveNumber] = false; + Controller->V2.NewLogicalDeviceInformation->LogicalDeviceNumber = 0; + Controller->V2.StartLogicalDeviceInformationScan = false; + } + CommandMailbox->LogicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->LogicalDeviceInfo.DataTransferSize = + sizeof(DAC960_V2_LogicalDeviceInfo_T); + CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber = + Controller->V2.NewLogicalDeviceInformation->LogicalDeviceNumber; + CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode = + DAC960_V2_GetLogicalDeviceInfoValid; + CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.NewLogicalDeviceInformationDMA; + CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->LogicalDeviceInfo.DataTransferSize; + DAC960_QueueCommand(Command); + return; + } + Controller->MonitoringTimerCount++; + Controller->MonitoringTimer.expires = + jiffies + DAC960_HealthStatusMonitoringInterval; + add_timer(&Controller->MonitoringTimer); + } + if (CommandType == DAC960_ImmediateCommand) + { + complete(Command->Completion); + Command->Completion = NULL; + return; + } + if (CommandType == DAC960_QueuedCommand) + { + DAC960_V2_KernelCommand_T *KernelCommand = Command->V2.KernelCommand; + KernelCommand->CommandStatus = CommandStatus; + KernelCommand->RequestSenseLength = Command->V2.RequestSenseLength; + KernelCommand->DataTransferLength = Command->V2.DataTransferResidue; + Command->V2.KernelCommand = NULL; + DAC960_DeallocateCommand(Command); + KernelCommand->CompletionFunction(KernelCommand); + return; + } + /* + Queue a Status Monitoring Command to the Controller using the just + completed Command if one was deferred previously due to lack of a + free Command when the Monitoring Timer Function was called. + */ + if (Controller->MonitoringCommandDeferred) + { + Controller->MonitoringCommandDeferred = false; + DAC960_V2_QueueMonitoringCommand(Command); + return; + } + /* + Deallocate the Command. + */ + DAC960_DeallocateCommand(Command); + /* + Wake up any processes waiting on a free Command. + */ + wake_up(&Controller->CommandWaitQueue); +} + +/* + DAC960_GEM_InterruptHandler handles hardware interrupts from DAC960 GEM Series + Controllers. +*/ + +static irqreturn_t DAC960_GEM_InterruptHandler(int IRQ_Channel, + void *DeviceIdentifier) +{ + DAC960_Controller_T *Controller = DeviceIdentifier; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V2_StatusMailbox_T *NextStatusMailbox; + unsigned long flags; + + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_GEM_AcknowledgeInterrupt(ControllerBaseAddress); + NextStatusMailbox = Controller->V2.NextStatusMailbox; + while (NextStatusMailbox->Fields.CommandIdentifier > 0) + { + DAC960_V2_CommandIdentifier_T CommandIdentifier = + NextStatusMailbox->Fields.CommandIdentifier; + DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1]; + Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus; + Command->V2.RequestSenseLength = + NextStatusMailbox->Fields.RequestSenseLength; + Command->V2.DataTransferResidue = + NextStatusMailbox->Fields.DataTransferResidue; + NextStatusMailbox->Words[0] = 0; + if (++NextStatusMailbox > Controller->V2.LastStatusMailbox) + NextStatusMailbox = Controller->V2.FirstStatusMailbox; + DAC960_V2_ProcessCompletedCommand(Command); + } + Controller->V2.NextStatusMailbox = NextStatusMailbox; + /* + Attempt to remove additional I/O Requests from the Controller's + I/O Request Queue and queue them to the Controller. + */ + DAC960_ProcessRequest(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + return IRQ_HANDLED; +} + +/* + DAC960_BA_InterruptHandler handles hardware interrupts from DAC960 BA Series + Controllers. +*/ + +static irqreturn_t DAC960_BA_InterruptHandler(int IRQ_Channel, + void *DeviceIdentifier) +{ + DAC960_Controller_T *Controller = DeviceIdentifier; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V2_StatusMailbox_T *NextStatusMailbox; + unsigned long flags; + + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_BA_AcknowledgeInterrupt(ControllerBaseAddress); + NextStatusMailbox = Controller->V2.NextStatusMailbox; + while (NextStatusMailbox->Fields.CommandIdentifier > 0) + { + DAC960_V2_CommandIdentifier_T CommandIdentifier = + NextStatusMailbox->Fields.CommandIdentifier; + DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1]; + Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus; + Command->V2.RequestSenseLength = + NextStatusMailbox->Fields.RequestSenseLength; + Command->V2.DataTransferResidue = + NextStatusMailbox->Fields.DataTransferResidue; + NextStatusMailbox->Words[0] = 0; + if (++NextStatusMailbox > Controller->V2.LastStatusMailbox) + NextStatusMailbox = Controller->V2.FirstStatusMailbox; + DAC960_V2_ProcessCompletedCommand(Command); + } + Controller->V2.NextStatusMailbox = NextStatusMailbox; + /* + Attempt to remove additional I/O Requests from the Controller's + I/O Request Queue and queue them to the Controller. + */ + DAC960_ProcessRequest(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + return IRQ_HANDLED; +} + + +/* + DAC960_LP_InterruptHandler handles hardware interrupts from DAC960 LP Series + Controllers. +*/ + +static irqreturn_t DAC960_LP_InterruptHandler(int IRQ_Channel, + void *DeviceIdentifier) +{ + DAC960_Controller_T *Controller = DeviceIdentifier; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V2_StatusMailbox_T *NextStatusMailbox; + unsigned long flags; + + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_LP_AcknowledgeInterrupt(ControllerBaseAddress); + NextStatusMailbox = Controller->V2.NextStatusMailbox; + while (NextStatusMailbox->Fields.CommandIdentifier > 0) + { + DAC960_V2_CommandIdentifier_T CommandIdentifier = + NextStatusMailbox->Fields.CommandIdentifier; + DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1]; + Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus; + Command->V2.RequestSenseLength = + NextStatusMailbox->Fields.RequestSenseLength; + Command->V2.DataTransferResidue = + NextStatusMailbox->Fields.DataTransferResidue; + NextStatusMailbox->Words[0] = 0; + if (++NextStatusMailbox > Controller->V2.LastStatusMailbox) + NextStatusMailbox = Controller->V2.FirstStatusMailbox; + DAC960_V2_ProcessCompletedCommand(Command); + } + Controller->V2.NextStatusMailbox = NextStatusMailbox; + /* + Attempt to remove additional I/O Requests from the Controller's + I/O Request Queue and queue them to the Controller. + */ + DAC960_ProcessRequest(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + return IRQ_HANDLED; +} + + +/* + DAC960_LA_InterruptHandler handles hardware interrupts from DAC960 LA Series + Controllers. +*/ + +static irqreturn_t DAC960_LA_InterruptHandler(int IRQ_Channel, + void *DeviceIdentifier) +{ + DAC960_Controller_T *Controller = DeviceIdentifier; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V1_StatusMailbox_T *NextStatusMailbox; + unsigned long flags; + + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_LA_AcknowledgeInterrupt(ControllerBaseAddress); + NextStatusMailbox = Controller->V1.NextStatusMailbox; + while (NextStatusMailbox->Fields.Valid) + { + DAC960_V1_CommandIdentifier_T CommandIdentifier = + NextStatusMailbox->Fields.CommandIdentifier; + DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1]; + Command->V1.CommandStatus = NextStatusMailbox->Fields.CommandStatus; + NextStatusMailbox->Word = 0; + if (++NextStatusMailbox > Controller->V1.LastStatusMailbox) + NextStatusMailbox = Controller->V1.FirstStatusMailbox; + DAC960_V1_ProcessCompletedCommand(Command); + } + Controller->V1.NextStatusMailbox = NextStatusMailbox; + /* + Attempt to remove additional I/O Requests from the Controller's + I/O Request Queue and queue them to the Controller. + */ + DAC960_ProcessRequest(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + return IRQ_HANDLED; +} + + +/* + DAC960_PG_InterruptHandler handles hardware interrupts from DAC960 PG Series + Controllers. +*/ + +static irqreturn_t DAC960_PG_InterruptHandler(int IRQ_Channel, + void *DeviceIdentifier) +{ + DAC960_Controller_T *Controller = DeviceIdentifier; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + DAC960_V1_StatusMailbox_T *NextStatusMailbox; + unsigned long flags; + + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_PG_AcknowledgeInterrupt(ControllerBaseAddress); + NextStatusMailbox = Controller->V1.NextStatusMailbox; + while (NextStatusMailbox->Fields.Valid) + { + DAC960_V1_CommandIdentifier_T CommandIdentifier = + NextStatusMailbox->Fields.CommandIdentifier; + DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1]; + Command->V1.CommandStatus = NextStatusMailbox->Fields.CommandStatus; + NextStatusMailbox->Word = 0; + if (++NextStatusMailbox > Controller->V1.LastStatusMailbox) + NextStatusMailbox = Controller->V1.FirstStatusMailbox; + DAC960_V1_ProcessCompletedCommand(Command); + } + Controller->V1.NextStatusMailbox = NextStatusMailbox; + /* + Attempt to remove additional I/O Requests from the Controller's + I/O Request Queue and queue them to the Controller. + */ + DAC960_ProcessRequest(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + return IRQ_HANDLED; +} + + +/* + DAC960_PD_InterruptHandler handles hardware interrupts from DAC960 PD Series + Controllers. +*/ + +static irqreturn_t DAC960_PD_InterruptHandler(int IRQ_Channel, + void *DeviceIdentifier) +{ + DAC960_Controller_T *Controller = DeviceIdentifier; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + unsigned long flags; + + spin_lock_irqsave(&Controller->queue_lock, flags); + while (DAC960_PD_StatusAvailableP(ControllerBaseAddress)) + { + DAC960_V1_CommandIdentifier_T CommandIdentifier = + DAC960_PD_ReadStatusCommandIdentifier(ControllerBaseAddress); + DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1]; + Command->V1.CommandStatus = + DAC960_PD_ReadStatusRegister(ControllerBaseAddress); + DAC960_PD_AcknowledgeInterrupt(ControllerBaseAddress); + DAC960_PD_AcknowledgeStatus(ControllerBaseAddress); + DAC960_V1_ProcessCompletedCommand(Command); + } + /* + Attempt to remove additional I/O Requests from the Controller's + I/O Request Queue and queue them to the Controller. + */ + DAC960_ProcessRequest(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + return IRQ_HANDLED; +} + + +/* + DAC960_P_InterruptHandler handles hardware interrupts from DAC960 P Series + Controllers. + + Translations of DAC960_V1_Enquiry and DAC960_V1_GetDeviceState rely + on the data having been placed into DAC960_Controller_T, rather than + an arbitrary buffer. +*/ + +static irqreturn_t DAC960_P_InterruptHandler(int IRQ_Channel, + void *DeviceIdentifier) +{ + DAC960_Controller_T *Controller = DeviceIdentifier; + void __iomem *ControllerBaseAddress = Controller->BaseAddress; + unsigned long flags; + + spin_lock_irqsave(&Controller->queue_lock, flags); + while (DAC960_PD_StatusAvailableP(ControllerBaseAddress)) + { + DAC960_V1_CommandIdentifier_T CommandIdentifier = + DAC960_PD_ReadStatusCommandIdentifier(ControllerBaseAddress); + DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1]; + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + DAC960_V1_CommandOpcode_T CommandOpcode = + CommandMailbox->Common.CommandOpcode; + Command->V1.CommandStatus = + DAC960_PD_ReadStatusRegister(ControllerBaseAddress); + DAC960_PD_AcknowledgeInterrupt(ControllerBaseAddress); + DAC960_PD_AcknowledgeStatus(ControllerBaseAddress); + switch (CommandOpcode) + { + case DAC960_V1_Enquiry_Old: + Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Enquiry; + DAC960_P_To_PD_TranslateEnquiry(Controller->V1.NewEnquiry); + break; + case DAC960_V1_GetDeviceState_Old: + Command->V1.CommandMailbox.Common.CommandOpcode = + DAC960_V1_GetDeviceState; + DAC960_P_To_PD_TranslateDeviceState(Controller->V1.NewDeviceState); + break; + case DAC960_V1_Read_Old: + Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Read; + DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox); + break; + case DAC960_V1_Write_Old: + Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Write; + DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox); + break; + case DAC960_V1_ReadWithScatterGather_Old: + Command->V1.CommandMailbox.Common.CommandOpcode = + DAC960_V1_ReadWithScatterGather; + DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox); + break; + case DAC960_V1_WriteWithScatterGather_Old: + Command->V1.CommandMailbox.Common.CommandOpcode = + DAC960_V1_WriteWithScatterGather; + DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox); + break; + default: + break; + } + DAC960_V1_ProcessCompletedCommand(Command); + } + /* + Attempt to remove additional I/O Requests from the Controller's + I/O Request Queue and queue them to the Controller. + */ + DAC960_ProcessRequest(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + return IRQ_HANDLED; +} + + +/* + DAC960_V1_QueueMonitoringCommand queues a Monitoring Command to DAC960 V1 + Firmware Controllers. +*/ + +static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + DAC960_V1_ClearCommand(Command); + Command->CommandType = DAC960_MonitoringCommand; + CommandMailbox->Type3.CommandOpcode = DAC960_V1_Enquiry; + CommandMailbox->Type3.BusAddress = Controller->V1.NewEnquiryDMA; + DAC960_QueueCommand(Command); +} + + +/* + DAC960_V2_QueueMonitoringCommand queues a Monitoring Command to DAC960 V2 + Firmware Controllers. +*/ + +static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *Command) +{ + DAC960_Controller_T *Controller = Command->Controller; + DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox; + DAC960_V2_ClearCommand(Command); + Command->CommandType = DAC960_MonitoringCommand; + CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->ControllerInfo.CommandControlBits + .DataTransferControllerToHost = true; + CommandMailbox->ControllerInfo.CommandControlBits + .NoAutoRequestSense = true; + CommandMailbox->ControllerInfo.DataTransferSize = + sizeof(DAC960_V2_ControllerInfo_T); + CommandMailbox->ControllerInfo.ControllerNumber = 0; + CommandMailbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo; + CommandMailbox->ControllerInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.NewControllerInformationDMA; + CommandMailbox->ControllerInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->ControllerInfo.DataTransferSize; + DAC960_QueueCommand(Command); +} + + +/* + DAC960_MonitoringTimerFunction is the timer function for monitoring + the status of DAC960 Controllers. +*/ + +static void DAC960_MonitoringTimerFunction(unsigned long TimerData) +{ + DAC960_Controller_T *Controller = (DAC960_Controller_T *) TimerData; + DAC960_Command_T *Command; + unsigned long flags; + + if (Controller->FirmwareType == DAC960_V1_Controller) + { + spin_lock_irqsave(&Controller->queue_lock, flags); + /* + Queue a Status Monitoring Command to Controller. + */ + Command = DAC960_AllocateCommand(Controller); + if (Command != NULL) + DAC960_V1_QueueMonitoringCommand(Command); + else Controller->MonitoringCommandDeferred = true; + spin_unlock_irqrestore(&Controller->queue_lock, flags); + } + else + { + DAC960_V2_ControllerInfo_T *ControllerInfo = + &Controller->V2.ControllerInformation; + unsigned int StatusChangeCounter = + Controller->V2.HealthStatusBuffer->StatusChangeCounter; + bool ForceMonitoringCommand = false; + if (time_after(jiffies, Controller->SecondaryMonitoringTime + + DAC960_SecondaryMonitoringInterval)) + { + int LogicalDriveNumber; + for (LogicalDriveNumber = 0; + LogicalDriveNumber < DAC960_MaxLogicalDrives; + LogicalDriveNumber++) + { + DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo = + Controller->V2.LogicalDeviceInformation[LogicalDriveNumber]; + if (LogicalDeviceInfo == NULL) continue; + if (!LogicalDeviceInfo->LogicalDeviceControl + .LogicalDeviceInitialized) + { + ForceMonitoringCommand = true; + break; + } + } + Controller->SecondaryMonitoringTime = jiffies; + } + if (StatusChangeCounter == Controller->V2.StatusChangeCounter && + Controller->V2.HealthStatusBuffer->NextEventSequenceNumber + == Controller->V2.NextEventSequenceNumber && + (ControllerInfo->BackgroundInitializationsActive + + ControllerInfo->LogicalDeviceInitializationsActive + + ControllerInfo->PhysicalDeviceInitializationsActive + + ControllerInfo->ConsistencyChecksActive + + ControllerInfo->RebuildsActive + + ControllerInfo->OnlineExpansionsActive == 0 || + time_before(jiffies, Controller->PrimaryMonitoringTime + + DAC960_MonitoringTimerInterval)) && + !ForceMonitoringCommand) + { + Controller->MonitoringTimer.expires = + jiffies + DAC960_HealthStatusMonitoringInterval; + add_timer(&Controller->MonitoringTimer); + return; + } + Controller->V2.StatusChangeCounter = StatusChangeCounter; + Controller->PrimaryMonitoringTime = jiffies; + + spin_lock_irqsave(&Controller->queue_lock, flags); + /* + Queue a Status Monitoring Command to Controller. + */ + Command = DAC960_AllocateCommand(Controller); + if (Command != NULL) + DAC960_V2_QueueMonitoringCommand(Command); + else Controller->MonitoringCommandDeferred = true; + spin_unlock_irqrestore(&Controller->queue_lock, flags); + /* + Wake up any processes waiting on a Health Status Buffer change. + */ + wake_up(&Controller->HealthStatusWaitQueue); + } +} + +/* + DAC960_CheckStatusBuffer verifies that there is room to hold ByteCount + additional bytes in the Combined Status Buffer and grows the buffer if + necessary. It returns true if there is enough room and false otherwise. +*/ + +static bool DAC960_CheckStatusBuffer(DAC960_Controller_T *Controller, + unsigned int ByteCount) +{ + unsigned char *NewStatusBuffer; + if (Controller->InitialStatusLength + 1 + + Controller->CurrentStatusLength + ByteCount + 1 <= + Controller->CombinedStatusBufferLength) + return true; + if (Controller->CombinedStatusBufferLength == 0) + { + unsigned int NewStatusBufferLength = DAC960_InitialStatusBufferSize; + while (NewStatusBufferLength < ByteCount) + NewStatusBufferLength *= 2; + Controller->CombinedStatusBuffer = kmalloc(NewStatusBufferLength, + GFP_ATOMIC); + if (Controller->CombinedStatusBuffer == NULL) return false; + Controller->CombinedStatusBufferLength = NewStatusBufferLength; + return true; + } + NewStatusBuffer = kmalloc(2 * Controller->CombinedStatusBufferLength, + GFP_ATOMIC); + if (NewStatusBuffer == NULL) + { + DAC960_Warning("Unable to expand Combined Status Buffer - Truncating\n", + Controller); + return false; + } + memcpy(NewStatusBuffer, Controller->CombinedStatusBuffer, + Controller->CombinedStatusBufferLength); + kfree(Controller->CombinedStatusBuffer); + Controller->CombinedStatusBuffer = NewStatusBuffer; + Controller->CombinedStatusBufferLength *= 2; + Controller->CurrentStatusBuffer = + &NewStatusBuffer[Controller->InitialStatusLength + 1]; + return true; +} + + +/* + DAC960_Message prints Driver Messages. +*/ + +static void DAC960_Message(DAC960_MessageLevel_T MessageLevel, + unsigned char *Format, + DAC960_Controller_T *Controller, + ...) +{ + static unsigned char Buffer[DAC960_LineBufferSize]; + static bool BeginningOfLine = true; + va_list Arguments; + int Length = 0; + va_start(Arguments, Controller); + Length = vsprintf(Buffer, Format, Arguments); + va_end(Arguments); + if (Controller == NULL) + printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel], + DAC960_ControllerCount, Buffer); + else if (MessageLevel == DAC960_AnnounceLevel || + MessageLevel == DAC960_InfoLevel) + { + if (!Controller->ControllerInitialized) + { + if (DAC960_CheckStatusBuffer(Controller, Length)) + { + strcpy(&Controller->CombinedStatusBuffer + [Controller->InitialStatusLength], + Buffer); + Controller->InitialStatusLength += Length; + Controller->CurrentStatusBuffer = + &Controller->CombinedStatusBuffer + [Controller->InitialStatusLength + 1]; + } + if (MessageLevel == DAC960_AnnounceLevel) + { + static int AnnouncementLines = 0; + if (++AnnouncementLines <= 2) + printk("%sDAC960: %s", DAC960_MessageLevelMap[MessageLevel], + Buffer); + } + else + { + if (BeginningOfLine) + { + if (Buffer[0] != '\n' || Length > 1) + printk("%sDAC960#%d: %s", + DAC960_MessageLevelMap[MessageLevel], + Controller->ControllerNumber, Buffer); + } + else printk("%s", Buffer); + } + } + else if (DAC960_CheckStatusBuffer(Controller, Length)) + { + strcpy(&Controller->CurrentStatusBuffer[ + Controller->CurrentStatusLength], Buffer); + Controller->CurrentStatusLength += Length; + } + } + else if (MessageLevel == DAC960_ProgressLevel) + { + strcpy(Controller->ProgressBuffer, Buffer); + Controller->ProgressBufferLength = Length; + if (Controller->EphemeralProgressMessage) + { + if (time_after_eq(jiffies, Controller->LastProgressReportTime + + DAC960_ProgressReportingInterval)) + { + printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel], + Controller->ControllerNumber, Buffer); + Controller->LastProgressReportTime = jiffies; + } + } + else printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel], + Controller->ControllerNumber, Buffer); + } + else if (MessageLevel == DAC960_UserCriticalLevel) + { + strcpy(&Controller->UserStatusBuffer[Controller->UserStatusLength], + Buffer); + Controller->UserStatusLength += Length; + if (Buffer[0] != '\n' || Length > 1) + printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel], + Controller->ControllerNumber, Buffer); + } + else + { + if (BeginningOfLine) + printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel], + Controller->ControllerNumber, Buffer); + else printk("%s", Buffer); + } + BeginningOfLine = (Buffer[Length-1] == '\n'); +} + + +/* + DAC960_ParsePhysicalDevice parses spaces followed by a Physical Device + Channel:TargetID specification from a User Command string. It updates + Channel and TargetID and returns true on success and false on failure. +*/ + +static bool DAC960_ParsePhysicalDevice(DAC960_Controller_T *Controller, + char *UserCommandString, + unsigned char *Channel, + unsigned char *TargetID) +{ + char *NewUserCommandString = UserCommandString; + unsigned long XChannel, XTargetID; + while (*UserCommandString == ' ') UserCommandString++; + if (UserCommandString == NewUserCommandString) + return false; + XChannel = simple_strtoul(UserCommandString, &NewUserCommandString, 10); + if (NewUserCommandString == UserCommandString || + *NewUserCommandString != ':' || + XChannel >= Controller->Channels) + return false; + UserCommandString = ++NewUserCommandString; + XTargetID = simple_strtoul(UserCommandString, &NewUserCommandString, 10); + if (NewUserCommandString == UserCommandString || + *NewUserCommandString != '\0' || + XTargetID >= Controller->Targets) + return false; + *Channel = XChannel; + *TargetID = XTargetID; + return true; +} + + +/* + DAC960_ParseLogicalDrive parses spaces followed by a Logical Drive Number + specification from a User Command string. It updates LogicalDriveNumber and + returns true on success and false on failure. +*/ + +static bool DAC960_ParseLogicalDrive(DAC960_Controller_T *Controller, + char *UserCommandString, + unsigned char *LogicalDriveNumber) +{ + char *NewUserCommandString = UserCommandString; + unsigned long XLogicalDriveNumber; + while (*UserCommandString == ' ') UserCommandString++; + if (UserCommandString == NewUserCommandString) + return false; + XLogicalDriveNumber = + simple_strtoul(UserCommandString, &NewUserCommandString, 10); + if (NewUserCommandString == UserCommandString || + *NewUserCommandString != '\0' || + XLogicalDriveNumber > DAC960_MaxLogicalDrives - 1) + return false; + *LogicalDriveNumber = XLogicalDriveNumber; + return true; +} + + +/* + DAC960_V1_SetDeviceState sets the Device State for a Physical Device for + DAC960 V1 Firmware Controllers. +*/ + +static void DAC960_V1_SetDeviceState(DAC960_Controller_T *Controller, + DAC960_Command_T *Command, + unsigned char Channel, + unsigned char TargetID, + DAC960_V1_PhysicalDeviceState_T + DeviceState, + const unsigned char *DeviceStateString) +{ + DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox; + CommandMailbox->Type3D.CommandOpcode = DAC960_V1_StartDevice; + CommandMailbox->Type3D.Channel = Channel; + CommandMailbox->Type3D.TargetID = TargetID; + CommandMailbox->Type3D.DeviceState = DeviceState; + CommandMailbox->Type3D.Modifier = 0; + DAC960_ExecuteCommand(Command); + switch (Command->V1.CommandStatus) + { + case DAC960_V1_NormalCompletion: + DAC960_UserCritical("%s of Physical Device %d:%d Succeeded\n", Controller, + DeviceStateString, Channel, TargetID); + break; + case DAC960_V1_UnableToStartDevice: + DAC960_UserCritical("%s of Physical Device %d:%d Failed - " + "Unable to Start Device\n", Controller, + DeviceStateString, Channel, TargetID); + break; + case DAC960_V1_NoDeviceAtAddress: + DAC960_UserCritical("%s of Physical Device %d:%d Failed - " + "No Device at Address\n", Controller, + DeviceStateString, Channel, TargetID); + break; + case DAC960_V1_InvalidChannelOrTargetOrModifier: + DAC960_UserCritical("%s of Physical Device %d:%d Failed - " + "Invalid Channel or Target or Modifier\n", + Controller, DeviceStateString, Channel, TargetID); + break; + case DAC960_V1_ChannelBusy: + DAC960_UserCritical("%s of Physical Device %d:%d Failed - " + "Channel Busy\n", Controller, + DeviceStateString, Channel, TargetID); + break; + default: + DAC960_UserCritical("%s of Physical Device %d:%d Failed - " + "Unexpected Status %04X\n", Controller, + DeviceStateString, Channel, TargetID, + Command->V1.CommandStatus); + break; + } +} + + +/* + DAC960_V1_ExecuteUserCommand executes a User Command for DAC960 V1 Firmware + Controllers. +*/ + +static bool DAC960_V1_ExecuteUserCommand(DAC960_Controller_T *Controller, + unsigned char *UserCommand) +{ + DAC960_Command_T *Command; + DAC960_V1_CommandMailbox_T *CommandMailbox; + unsigned long flags; + unsigned char Channel, TargetID, LogicalDriveNumber; + + spin_lock_irqsave(&Controller->queue_lock, flags); + while ((Command = DAC960_AllocateCommand(Controller)) == NULL) + DAC960_WaitForCommand(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + Controller->UserStatusLength = 0; + DAC960_V1_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox = &Command->V1.CommandMailbox; + if (strcmp(UserCommand, "flush-cache") == 0) + { + CommandMailbox->Type3.CommandOpcode = DAC960_V1_Flush; + DAC960_ExecuteCommand(Command); + DAC960_UserCritical("Cache Flush Completed\n", Controller); + } + else if (strncmp(UserCommand, "kill", 4) == 0 && + DAC960_ParsePhysicalDevice(Controller, &UserCommand[4], + &Channel, &TargetID)) + { + DAC960_V1_DeviceState_T *DeviceState = + &Controller->V1.DeviceState[Channel][TargetID]; + if (DeviceState->Present && + DeviceState->DeviceType == DAC960_V1_DiskType && + DeviceState->DeviceState != DAC960_V1_Device_Dead) + DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID, + DAC960_V1_Device_Dead, "Kill"); + else DAC960_UserCritical("Kill of Physical Device %d:%d Illegal\n", + Controller, Channel, TargetID); + } + else if (strncmp(UserCommand, "make-online", 11) == 0 && + DAC960_ParsePhysicalDevice(Controller, &UserCommand[11], + &Channel, &TargetID)) + { + DAC960_V1_DeviceState_T *DeviceState = + &Controller->V1.DeviceState[Channel][TargetID]; + if (DeviceState->Present && + DeviceState->DeviceType == DAC960_V1_DiskType && + DeviceState->DeviceState == DAC960_V1_Device_Dead) + DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID, + DAC960_V1_Device_Online, "Make Online"); + else DAC960_UserCritical("Make Online of Physical Device %d:%d Illegal\n", + Controller, Channel, TargetID); + + } + else if (strncmp(UserCommand, "make-standby", 12) == 0 && + DAC960_ParsePhysicalDevice(Controller, &UserCommand[12], + &Channel, &TargetID)) + { + DAC960_V1_DeviceState_T *DeviceState = + &Controller->V1.DeviceState[Channel][TargetID]; + if (DeviceState->Present && + DeviceState->DeviceType == DAC960_V1_DiskType && + DeviceState->DeviceState == DAC960_V1_Device_Dead) + DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID, + DAC960_V1_Device_Standby, "Make Standby"); + else DAC960_UserCritical("Make Standby of Physical " + "Device %d:%d Illegal\n", + Controller, Channel, TargetID); + } + else if (strncmp(UserCommand, "rebuild", 7) == 0 && + DAC960_ParsePhysicalDevice(Controller, &UserCommand[7], + &Channel, &TargetID)) + { + CommandMailbox->Type3D.CommandOpcode = DAC960_V1_RebuildAsync; + CommandMailbox->Type3D.Channel = Channel; + CommandMailbox->Type3D.TargetID = TargetID; + DAC960_ExecuteCommand(Command); + switch (Command->V1.CommandStatus) + { + case DAC960_V1_NormalCompletion: + DAC960_UserCritical("Rebuild of Physical Device %d:%d Initiated\n", + Controller, Channel, TargetID); + break; + case DAC960_V1_AttemptToRebuildOnlineDrive: + DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - " + "Attempt to Rebuild Online or " + "Unresponsive Drive\n", + Controller, Channel, TargetID); + break; + case DAC960_V1_NewDiskFailedDuringRebuild: + DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - " + "New Disk Failed During Rebuild\n", + Controller, Channel, TargetID); + break; + case DAC960_V1_InvalidDeviceAddress: + DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - " + "Invalid Device Address\n", + Controller, Channel, TargetID); + break; + case DAC960_V1_RebuildOrCheckAlreadyInProgress: + DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - " + "Rebuild or Consistency Check Already " + "in Progress\n", Controller, Channel, TargetID); + break; + default: + DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - " + "Unexpected Status %04X\n", Controller, + Channel, TargetID, Command->V1.CommandStatus); + break; + } + } + else if (strncmp(UserCommand, "check-consistency", 17) == 0 && + DAC960_ParseLogicalDrive(Controller, &UserCommand[17], + &LogicalDriveNumber)) + { + CommandMailbox->Type3C.CommandOpcode = DAC960_V1_CheckConsistencyAsync; + CommandMailbox->Type3C.LogicalDriveNumber = LogicalDriveNumber; + CommandMailbox->Type3C.AutoRestore = true; + DAC960_ExecuteCommand(Command); + switch (Command->V1.CommandStatus) + { + case DAC960_V1_NormalCompletion: + DAC960_UserCritical("Consistency Check of Logical Drive %d " + "(/dev/rd/c%dd%d) Initiated\n", + Controller, LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber); + break; + case DAC960_V1_DependentDiskIsDead: + DAC960_UserCritical("Consistency Check of Logical Drive %d " + "(/dev/rd/c%dd%d) Failed - " + "Dependent Physical Device is DEAD\n", + Controller, LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber); + break; + case DAC960_V1_InvalidOrNonredundantLogicalDrive: + DAC960_UserCritical("Consistency Check of Logical Drive %d " + "(/dev/rd/c%dd%d) Failed - " + "Invalid or Nonredundant Logical Drive\n", + Controller, LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber); + break; + case DAC960_V1_RebuildOrCheckAlreadyInProgress: + DAC960_UserCritical("Consistency Check of Logical Drive %d " + "(/dev/rd/c%dd%d) Failed - Rebuild or " + "Consistency Check Already in Progress\n", + Controller, LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber); + break; + default: + DAC960_UserCritical("Consistency Check of Logical Drive %d " + "(/dev/rd/c%dd%d) Failed - " + "Unexpected Status %04X\n", + Controller, LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber, Command->V1.CommandStatus); + break; + } + } + else if (strcmp(UserCommand, "cancel-rebuild") == 0 || + strcmp(UserCommand, "cancel-consistency-check") == 0) + { + /* + the OldRebuildRateConstant is never actually used + once its value is retrieved from the controller. + */ + unsigned char *OldRebuildRateConstant; + dma_addr_t OldRebuildRateConstantDMA; + + OldRebuildRateConstant = pci_alloc_consistent( Controller->PCIDevice, + sizeof(char), &OldRebuildRateConstantDMA); + if (OldRebuildRateConstant == NULL) { + DAC960_UserCritical("Cancellation of Rebuild or " + "Consistency Check Failed - " + "Out of Memory", + Controller); + goto failure; + } + CommandMailbox->Type3R.CommandOpcode = DAC960_V1_RebuildControl; + CommandMailbox->Type3R.RebuildRateConstant = 0xFF; + CommandMailbox->Type3R.BusAddress = OldRebuildRateConstantDMA; + DAC960_ExecuteCommand(Command); + switch (Command->V1.CommandStatus) + { + case DAC960_V1_NormalCompletion: + DAC960_UserCritical("Rebuild or Consistency Check Cancelled\n", + Controller); + break; + default: + DAC960_UserCritical("Cancellation of Rebuild or " + "Consistency Check Failed - " + "Unexpected Status %04X\n", + Controller, Command->V1.CommandStatus); + break; + } +failure: + pci_free_consistent(Controller->PCIDevice, sizeof(char), + OldRebuildRateConstant, OldRebuildRateConstantDMA); + } + else DAC960_UserCritical("Illegal User Command: '%s'\n", + Controller, UserCommand); + + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_DeallocateCommand(Command); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + return true; +} + + +/* + DAC960_V2_TranslatePhysicalDevice translates a Physical Device Channel and + TargetID into a Logical Device. It returns true on success and false + on failure. +*/ + +static bool DAC960_V2_TranslatePhysicalDevice(DAC960_Command_T *Command, + unsigned char Channel, + unsigned char TargetID, + unsigned short + *LogicalDeviceNumber) +{ + DAC960_V2_CommandMailbox_T SavedCommandMailbox, *CommandMailbox; + DAC960_Controller_T *Controller = Command->Controller; + + CommandMailbox = &Command->V2.CommandMailbox; + memcpy(&SavedCommandMailbox, CommandMailbox, + sizeof(DAC960_V2_CommandMailbox_T)); + + CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->PhysicalDeviceInfo.CommandControlBits + .DataTransferControllerToHost = true; + CommandMailbox->PhysicalDeviceInfo.CommandControlBits + .NoAutoRequestSense = true; + CommandMailbox->PhysicalDeviceInfo.DataTransferSize = + sizeof(DAC960_V2_PhysicalToLogicalDevice_T); + CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID; + CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel; + CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode = + DAC960_V2_TranslatePhysicalToLogicalDevice; + CommandMailbox->Common.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.PhysicalToLogicalDeviceDMA; + CommandMailbox->Common.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->Common.DataTransferSize; + + DAC960_ExecuteCommand(Command); + *LogicalDeviceNumber = Controller->V2.PhysicalToLogicalDevice->LogicalDeviceNumber; + + memcpy(CommandMailbox, &SavedCommandMailbox, + sizeof(DAC960_V2_CommandMailbox_T)); + return (Command->V2.CommandStatus == DAC960_V2_NormalCompletion); +} + + +/* + DAC960_V2_ExecuteUserCommand executes a User Command for DAC960 V2 Firmware + Controllers. +*/ + +static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller, + unsigned char *UserCommand) +{ + DAC960_Command_T *Command; + DAC960_V2_CommandMailbox_T *CommandMailbox; + unsigned long flags; + unsigned char Channel, TargetID, LogicalDriveNumber; + unsigned short LogicalDeviceNumber; + + spin_lock_irqsave(&Controller->queue_lock, flags); + while ((Command = DAC960_AllocateCommand(Controller)) == NULL) + DAC960_WaitForCommand(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + Controller->UserStatusLength = 0; + DAC960_V2_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox = &Command->V2.CommandMailbox; + CommandMailbox->Common.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->Common.CommandControlBits.DataTransferControllerToHost = true; + CommandMailbox->Common.CommandControlBits.NoAutoRequestSense = true; + if (strcmp(UserCommand, "flush-cache") == 0) + { + CommandMailbox->DeviceOperation.IOCTL_Opcode = DAC960_V2_PauseDevice; + CommandMailbox->DeviceOperation.OperationDevice = + DAC960_V2_RAID_Controller; + DAC960_ExecuteCommand(Command); + DAC960_UserCritical("Cache Flush Completed\n", Controller); + } + else if (strncmp(UserCommand, "kill", 4) == 0 && + DAC960_ParsePhysicalDevice(Controller, &UserCommand[4], + &Channel, &TargetID) && + DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID, + &LogicalDeviceNumber)) + { + CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber = + LogicalDeviceNumber; + CommandMailbox->SetDeviceState.IOCTL_Opcode = + DAC960_V2_SetDeviceState; + CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState = + DAC960_V2_Device_Dead; + DAC960_ExecuteCommand(Command); + DAC960_UserCritical("Kill of Physical Device %d:%d %s\n", + Controller, Channel, TargetID, + (Command->V2.CommandStatus + == DAC960_V2_NormalCompletion + ? "Succeeded" : "Failed")); + } + else if (strncmp(UserCommand, "make-online", 11) == 0 && + DAC960_ParsePhysicalDevice(Controller, &UserCommand[11], + &Channel, &TargetID) && + DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID, + &LogicalDeviceNumber)) + { + CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber = + LogicalDeviceNumber; + CommandMailbox->SetDeviceState.IOCTL_Opcode = + DAC960_V2_SetDeviceState; + CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState = + DAC960_V2_Device_Online; + DAC960_ExecuteCommand(Command); + DAC960_UserCritical("Make Online of Physical Device %d:%d %s\n", + Controller, Channel, TargetID, + (Command->V2.CommandStatus + == DAC960_V2_NormalCompletion + ? "Succeeded" : "Failed")); + } + else if (strncmp(UserCommand, "make-standby", 12) == 0 && + DAC960_ParsePhysicalDevice(Controller, &UserCommand[12], + &Channel, &TargetID) && + DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID, + &LogicalDeviceNumber)) + { + CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber = + LogicalDeviceNumber; + CommandMailbox->SetDeviceState.IOCTL_Opcode = + DAC960_V2_SetDeviceState; + CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState = + DAC960_V2_Device_Standby; + DAC960_ExecuteCommand(Command); + DAC960_UserCritical("Make Standby of Physical Device %d:%d %s\n", + Controller, Channel, TargetID, + (Command->V2.CommandStatus + == DAC960_V2_NormalCompletion + ? "Succeeded" : "Failed")); + } + else if (strncmp(UserCommand, "rebuild", 7) == 0 && + DAC960_ParsePhysicalDevice(Controller, &UserCommand[7], + &Channel, &TargetID) && + DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID, + &LogicalDeviceNumber)) + { + CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber = + LogicalDeviceNumber; + CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode = + DAC960_V2_RebuildDeviceStart; + DAC960_ExecuteCommand(Command); + DAC960_UserCritical("Rebuild of Physical Device %d:%d %s\n", + Controller, Channel, TargetID, + (Command->V2.CommandStatus + == DAC960_V2_NormalCompletion + ? "Initiated" : "Not Initiated")); + } + else if (strncmp(UserCommand, "cancel-rebuild", 14) == 0 && + DAC960_ParsePhysicalDevice(Controller, &UserCommand[14], + &Channel, &TargetID) && + DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID, + &LogicalDeviceNumber)) + { + CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber = + LogicalDeviceNumber; + CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode = + DAC960_V2_RebuildDeviceStop; + DAC960_ExecuteCommand(Command); + DAC960_UserCritical("Rebuild of Physical Device %d:%d %s\n", + Controller, Channel, TargetID, + (Command->V2.CommandStatus + == DAC960_V2_NormalCompletion + ? "Cancelled" : "Not Cancelled")); + } + else if (strncmp(UserCommand, "check-consistency", 17) == 0 && + DAC960_ParseLogicalDrive(Controller, &UserCommand[17], + &LogicalDriveNumber)) + { + CommandMailbox->ConsistencyCheck.LogicalDevice.LogicalDeviceNumber = + LogicalDriveNumber; + CommandMailbox->ConsistencyCheck.IOCTL_Opcode = + DAC960_V2_ConsistencyCheckStart; + CommandMailbox->ConsistencyCheck.RestoreConsistency = true; + CommandMailbox->ConsistencyCheck.InitializedAreaOnly = false; + DAC960_ExecuteCommand(Command); + DAC960_UserCritical("Consistency Check of Logical Drive %d " + "(/dev/rd/c%dd%d) %s\n", + Controller, LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber, + (Command->V2.CommandStatus + == DAC960_V2_NormalCompletion + ? "Initiated" : "Not Initiated")); + } + else if (strncmp(UserCommand, "cancel-consistency-check", 24) == 0 && + DAC960_ParseLogicalDrive(Controller, &UserCommand[24], + &LogicalDriveNumber)) + { + CommandMailbox->ConsistencyCheck.LogicalDevice.LogicalDeviceNumber = + LogicalDriveNumber; + CommandMailbox->ConsistencyCheck.IOCTL_Opcode = + DAC960_V2_ConsistencyCheckStop; + DAC960_ExecuteCommand(Command); + DAC960_UserCritical("Consistency Check of Logical Drive %d " + "(/dev/rd/c%dd%d) %s\n", + Controller, LogicalDriveNumber, + Controller->ControllerNumber, + LogicalDriveNumber, + (Command->V2.CommandStatus + == DAC960_V2_NormalCompletion + ? "Cancelled" : "Not Cancelled")); + } + else if (strcmp(UserCommand, "perform-discovery") == 0) + { + CommandMailbox->Common.IOCTL_Opcode = DAC960_V2_StartDiscovery; + DAC960_ExecuteCommand(Command); + DAC960_UserCritical("Discovery %s\n", Controller, + (Command->V2.CommandStatus + == DAC960_V2_NormalCompletion + ? "Initiated" : "Not Initiated")); + if (Command->V2.CommandStatus == DAC960_V2_NormalCompletion) + { + CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL; + CommandMailbox->ControllerInfo.CommandControlBits + .DataTransferControllerToHost = true; + CommandMailbox->ControllerInfo.CommandControlBits + .NoAutoRequestSense = true; + CommandMailbox->ControllerInfo.DataTransferSize = + sizeof(DAC960_V2_ControllerInfo_T); + CommandMailbox->ControllerInfo.ControllerNumber = 0; + CommandMailbox->ControllerInfo.IOCTL_Opcode = + DAC960_V2_GetControllerInfo; + /* + * How does this NOT race with the queued Monitoring + * usage of this structure? + */ + CommandMailbox->ControllerInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = + Controller->V2.NewControllerInformationDMA; + CommandMailbox->ControllerInfo.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->ControllerInfo.DataTransferSize; + DAC960_ExecuteCommand(Command); + while (Controller->V2.NewControllerInformation->PhysicalScanActive) + { + DAC960_ExecuteCommand(Command); + sleep_on_timeout(&Controller->CommandWaitQueue, HZ); + } + DAC960_UserCritical("Discovery Completed\n", Controller); + } + } + else if (strcmp(UserCommand, "suppress-enclosure-messages") == 0) + Controller->SuppressEnclosureMessages = true; + else DAC960_UserCritical("Illegal User Command: '%s'\n", + Controller, UserCommand); + + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_DeallocateCommand(Command); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + return true; +} + + +/* + DAC960_ProcReadStatus implements reading /proc/rd/status. +*/ + +static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset, + int Count, int *EOF, void *Data) +{ + unsigned char *StatusMessage = "OK\n"; + int ControllerNumber, BytesAvailable; + for (ControllerNumber = 0; + ControllerNumber < DAC960_ControllerCount; + ControllerNumber++) + { + DAC960_Controller_T *Controller = DAC960_Controllers[ControllerNumber]; + if (Controller == NULL) continue; + if (Controller->MonitoringAlertMode) + { + StatusMessage = "ALERT\n"; + break; + } + } + BytesAvailable = strlen(StatusMessage) - Offset; + if (Count >= BytesAvailable) + { + Count = BytesAvailable; + *EOF = true; + } + if (Count <= 0) return 0; + *Start = Page; + memcpy(Page, &StatusMessage[Offset], Count); + return Count; +} + + +/* + DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status. +*/ + +static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset, + int Count, int *EOF, void *Data) +{ + DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; + int BytesAvailable = Controller->InitialStatusLength - Offset; + if (Count >= BytesAvailable) + { + Count = BytesAvailable; + *EOF = true; + } + if (Count <= 0) return 0; + *Start = Page; + memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count); + return Count; +} + + +/* + DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status. +*/ + +static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, + int Count, int *EOF, void *Data) +{ + DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; + unsigned char *StatusMessage = + "No Rebuild or Consistency Check in Progress\n"; + int ProgressMessageLength = strlen(StatusMessage); + int BytesAvailable; + if (jiffies != Controller->LastCurrentStatusTime) + { + Controller->CurrentStatusLength = 0; + DAC960_AnnounceDriver(Controller); + DAC960_ReportControllerConfiguration(Controller); + DAC960_ReportDeviceConfiguration(Controller); + if (Controller->ProgressBufferLength > 0) + ProgressMessageLength = Controller->ProgressBufferLength; + if (DAC960_CheckStatusBuffer(Controller, 2 + ProgressMessageLength)) + { + unsigned char *CurrentStatusBuffer = Controller->CurrentStatusBuffer; + CurrentStatusBuffer[Controller->CurrentStatusLength++] = ' '; + CurrentStatusBuffer[Controller->CurrentStatusLength++] = ' '; + if (Controller->ProgressBufferLength > 0) + strcpy(&CurrentStatusBuffer[Controller->CurrentStatusLength], + Controller->ProgressBuffer); + else + strcpy(&CurrentStatusBuffer[Controller->CurrentStatusLength], + StatusMessage); + Controller->CurrentStatusLength += ProgressMessageLength; + } + Controller->LastCurrentStatusTime = jiffies; + } + BytesAvailable = Controller->CurrentStatusLength - Offset; + if (Count >= BytesAvailable) + { + Count = BytesAvailable; + *EOF = true; + } + if (Count <= 0) return 0; + *Start = Page; + memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count); + return Count; +} + + +/* + DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command. +*/ + +static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset, + int Count, int *EOF, void *Data) +{ + DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; + int BytesAvailable = Controller->UserStatusLength - Offset; + if (Count >= BytesAvailable) + { + Count = BytesAvailable; + *EOF = true; + } + if (Count <= 0) return 0; + *Start = Page; + memcpy(Page, &Controller->UserStatusBuffer[Offset], Count); + return Count; +} + + +/* + DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command. +*/ + +static int DAC960_ProcWriteUserCommand(struct file *file, + const char __user *Buffer, + unsigned long Count, void *Data) +{ + DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; + unsigned char CommandBuffer[80]; + int Length; + if (Count > sizeof(CommandBuffer)-1) return -EINVAL; + if (copy_from_user(CommandBuffer, Buffer, Count)) return -EFAULT; + CommandBuffer[Count] = '\0'; + Length = strlen(CommandBuffer); + if (CommandBuffer[Length-1] == '\n') + CommandBuffer[--Length] = '\0'; + if (Controller->FirmwareType == DAC960_V1_Controller) + return (DAC960_V1_ExecuteUserCommand(Controller, CommandBuffer) + ? Count : -EBUSY); + else + return (DAC960_V2_ExecuteUserCommand(Controller, CommandBuffer) + ? Count : -EBUSY); +} + + +/* + DAC960_CreateProcEntries creates the /proc/rd/... entries for the + DAC960 Driver. +*/ + +static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller) +{ + struct proc_dir_entry *StatusProcEntry; + struct proc_dir_entry *ControllerProcEntry; + struct proc_dir_entry *UserCommandProcEntry; + + if (DAC960_ProcDirectoryEntry == NULL) { + DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); + StatusProcEntry = create_proc_read_entry("status", 0, + DAC960_ProcDirectoryEntry, + DAC960_ProcReadStatus, NULL); + } + + sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); + ControllerProcEntry = proc_mkdir(Controller->ControllerName, + DAC960_ProcDirectoryEntry); + create_proc_read_entry("initial_status", 0, ControllerProcEntry, + DAC960_ProcReadInitialStatus, Controller); + create_proc_read_entry("current_status", 0, ControllerProcEntry, + DAC960_ProcReadCurrentStatus, Controller); + UserCommandProcEntry = + create_proc_read_entry("user_command", S_IWUSR | S_IRUSR, + ControllerProcEntry, DAC960_ProcReadUserCommand, + Controller); + UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand; + Controller->ControllerProcEntry = ControllerProcEntry; +} + + +/* + DAC960_DestroyProcEntries destroys the /proc/rd/... entries for the + DAC960 Driver. +*/ + +static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller) +{ + if (Controller->ControllerProcEntry == NULL) + return; + remove_proc_entry("initial_status", Controller->ControllerProcEntry); + remove_proc_entry("current_status", Controller->ControllerProcEntry); + remove_proc_entry("user_command", Controller->ControllerProcEntry); + remove_proc_entry(Controller->ControllerName, DAC960_ProcDirectoryEntry); + Controller->ControllerProcEntry = NULL; +} + +#ifdef DAC960_GAM_MINOR + +/* + * DAC960_gam_ioctl is the ioctl function for performing RAID operations. +*/ + +static long DAC960_gam_ioctl(struct file *file, unsigned int Request, + unsigned long Argument) +{ + long ErrorCode = 0; + if (!capable(CAP_SYS_ADMIN)) return -EACCES; + + lock_kernel(); + switch (Request) + { + case DAC960_IOCTL_GET_CONTROLLER_COUNT: + ErrorCode = DAC960_ControllerCount; + break; + case DAC960_IOCTL_GET_CONTROLLER_INFO: + { + DAC960_ControllerInfo_T __user *UserSpaceControllerInfo = + (DAC960_ControllerInfo_T __user *) Argument; + DAC960_ControllerInfo_T ControllerInfo; + DAC960_Controller_T *Controller; + int ControllerNumber; + if (UserSpaceControllerInfo == NULL) + ErrorCode = -EINVAL; + else ErrorCode = get_user(ControllerNumber, + &UserSpaceControllerInfo->ControllerNumber); + if (ErrorCode != 0) + break;; + ErrorCode = -ENXIO; + if (ControllerNumber < 0 || + ControllerNumber > DAC960_ControllerCount - 1) { + break; + } + Controller = DAC960_Controllers[ControllerNumber]; + if (Controller == NULL) + break;; + memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T)); + ControllerInfo.ControllerNumber = ControllerNumber; + ControllerInfo.FirmwareType = Controller->FirmwareType; + ControllerInfo.Channels = Controller->Channels; + ControllerInfo.Targets = Controller->Targets; + ControllerInfo.PCI_Bus = Controller->Bus; + ControllerInfo.PCI_Device = Controller->Device; + ControllerInfo.PCI_Function = Controller->Function; + ControllerInfo.IRQ_Channel = Controller->IRQ_Channel; + ControllerInfo.PCI_Address = Controller->PCI_Address; + strcpy(ControllerInfo.ModelName, Controller->ModelName); + strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion); + ErrorCode = (copy_to_user(UserSpaceControllerInfo, &ControllerInfo, + sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0); + break; + } + case DAC960_IOCTL_V1_EXECUTE_COMMAND: + { + DAC960_V1_UserCommand_T __user *UserSpaceUserCommand = + (DAC960_V1_UserCommand_T __user *) Argument; + DAC960_V1_UserCommand_T UserCommand; + DAC960_Controller_T *Controller; + DAC960_Command_T *Command = NULL; + DAC960_V1_CommandOpcode_T CommandOpcode; + DAC960_V1_CommandStatus_T CommandStatus; + DAC960_V1_DCDB_T DCDB; + DAC960_V1_DCDB_T *DCDB_IOBUF = NULL; + dma_addr_t DCDB_IOBUFDMA; + unsigned long flags; + int ControllerNumber, DataTransferLength; + unsigned char *DataTransferBuffer = NULL; + dma_addr_t DataTransferBufferDMA; + if (UserSpaceUserCommand == NULL) { + ErrorCode = -EINVAL; + break; + } + if (copy_from_user(&UserCommand, UserSpaceUserCommand, + sizeof(DAC960_V1_UserCommand_T))) { + ErrorCode = -EFAULT; + break; + } + ControllerNumber = UserCommand.ControllerNumber; + ErrorCode = -ENXIO; + if (ControllerNumber < 0 || + ControllerNumber > DAC960_ControllerCount - 1) + break; + Controller = DAC960_Controllers[ControllerNumber]; + if (Controller == NULL) + break; + ErrorCode = -EINVAL; + if (Controller->FirmwareType != DAC960_V1_Controller) + break; + CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode; + DataTransferLength = UserCommand.DataTransferLength; + if (CommandOpcode & 0x80) + break; + if (CommandOpcode == DAC960_V1_DCDB) + { + if (copy_from_user(&DCDB, UserCommand.DCDB, + sizeof(DAC960_V1_DCDB_T))) { + ErrorCode = -EFAULT; + break; + } + if (DCDB.Channel >= DAC960_V1_MaxChannels) + break; + if (!((DataTransferLength == 0 && + DCDB.Direction + == DAC960_V1_DCDB_NoDataTransfer) || + (DataTransferLength > 0 && + DCDB.Direction + == DAC960_V1_DCDB_DataTransferDeviceToSystem) || + (DataTransferLength < 0 && + DCDB.Direction + == DAC960_V1_DCDB_DataTransferSystemToDevice))) + break; + if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength) + != abs(DataTransferLength)) + break; + DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice, + sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA); + if (DCDB_IOBUF == NULL) { + ErrorCode = -ENOMEM; + break; + } + } + ErrorCode = -ENOMEM; + if (DataTransferLength > 0) + { + DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, + DataTransferLength, &DataTransferBufferDMA); + if (DataTransferBuffer == NULL) + break; + memset(DataTransferBuffer, 0, DataTransferLength); + } + else if (DataTransferLength < 0) + { + DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, + -DataTransferLength, &DataTransferBufferDMA); + if (DataTransferBuffer == NULL) + break; + if (copy_from_user(DataTransferBuffer, + UserCommand.DataTransferBuffer, + -DataTransferLength)) { + ErrorCode = -EFAULT; + break; + } + } + if (CommandOpcode == DAC960_V1_DCDB) + { + spin_lock_irqsave(&Controller->queue_lock, flags); + while ((Command = DAC960_AllocateCommand(Controller)) == NULL) + DAC960_WaitForCommand(Controller); + while (Controller->V1.DirectCommandActive[DCDB.Channel] + [DCDB.TargetID]) + { + spin_unlock_irq(&Controller->queue_lock); + __wait_event(Controller->CommandWaitQueue, + !Controller->V1.DirectCommandActive + [DCDB.Channel][DCDB.TargetID]); + spin_lock_irq(&Controller->queue_lock); + } + Controller->V1.DirectCommandActive[DCDB.Channel] + [DCDB.TargetID] = true; + spin_unlock_irqrestore(&Controller->queue_lock, flags); + DAC960_V1_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + memcpy(&Command->V1.CommandMailbox, &UserCommand.CommandMailbox, + sizeof(DAC960_V1_CommandMailbox_T)); + Command->V1.CommandMailbox.Type3.BusAddress = DCDB_IOBUFDMA; + DCDB.BusAddress = DataTransferBufferDMA; + memcpy(DCDB_IOBUF, &DCDB, sizeof(DAC960_V1_DCDB_T)); + } + else + { + spin_lock_irqsave(&Controller->queue_lock, flags); + while ((Command = DAC960_AllocateCommand(Controller)) == NULL) + DAC960_WaitForCommand(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + DAC960_V1_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + memcpy(&Command->V1.CommandMailbox, &UserCommand.CommandMailbox, + sizeof(DAC960_V1_CommandMailbox_T)); + if (DataTransferBuffer != NULL) + Command->V1.CommandMailbox.Type3.BusAddress = + DataTransferBufferDMA; + } + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V1.CommandStatus; + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_DeallocateCommand(Command); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + if (DataTransferLength > 0) + { + if (copy_to_user(UserCommand.DataTransferBuffer, + DataTransferBuffer, DataTransferLength)) { + ErrorCode = -EFAULT; + goto Failure1; + } + } + if (CommandOpcode == DAC960_V1_DCDB) + { + /* + I don't believe Target or Channel in the DCDB_IOBUF + should be any different from the contents of DCDB. + */ + Controller->V1.DirectCommandActive[DCDB.Channel] + [DCDB.TargetID] = false; + if (copy_to_user(UserCommand.DCDB, DCDB_IOBUF, + sizeof(DAC960_V1_DCDB_T))) { + ErrorCode = -EFAULT; + goto Failure1; + } + } + ErrorCode = CommandStatus; + Failure1: + if (DataTransferBuffer != NULL) + pci_free_consistent(Controller->PCIDevice, abs(DataTransferLength), + DataTransferBuffer, DataTransferBufferDMA); + if (DCDB_IOBUF != NULL) + pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T), + DCDB_IOBUF, DCDB_IOBUFDMA); + break; + } + case DAC960_IOCTL_V2_EXECUTE_COMMAND: + { + DAC960_V2_UserCommand_T __user *UserSpaceUserCommand = + (DAC960_V2_UserCommand_T __user *) Argument; + DAC960_V2_UserCommand_T UserCommand; + DAC960_Controller_T *Controller; + DAC960_Command_T *Command = NULL; + DAC960_V2_CommandMailbox_T *CommandMailbox; + DAC960_V2_CommandStatus_T CommandStatus; + unsigned long flags; + int ControllerNumber, DataTransferLength; + int DataTransferResidue, RequestSenseLength; + unsigned char *DataTransferBuffer = NULL; + dma_addr_t DataTransferBufferDMA; + unsigned char *RequestSenseBuffer = NULL; + dma_addr_t RequestSenseBufferDMA; + + ErrorCode = -EINVAL; + if (UserSpaceUserCommand == NULL) + break; + if (copy_from_user(&UserCommand, UserSpaceUserCommand, + sizeof(DAC960_V2_UserCommand_T))) { + ErrorCode = -EFAULT; + break; + } + ErrorCode = -ENXIO; + ControllerNumber = UserCommand.ControllerNumber; + if (ControllerNumber < 0 || + ControllerNumber > DAC960_ControllerCount - 1) + break; + Controller = DAC960_Controllers[ControllerNumber]; + if (Controller == NULL) + break; + if (Controller->FirmwareType != DAC960_V2_Controller){ + ErrorCode = -EINVAL; + break; + } + DataTransferLength = UserCommand.DataTransferLength; + ErrorCode = -ENOMEM; + if (DataTransferLength > 0) + { + DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, + DataTransferLength, &DataTransferBufferDMA); + if (DataTransferBuffer == NULL) + break; + memset(DataTransferBuffer, 0, DataTransferLength); + } + else if (DataTransferLength < 0) + { + DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice, + -DataTransferLength, &DataTransferBufferDMA); + if (DataTransferBuffer == NULL) + break; + if (copy_from_user(DataTransferBuffer, + UserCommand.DataTransferBuffer, + -DataTransferLength)) { + ErrorCode = -EFAULT; + goto Failure2; + } + } + RequestSenseLength = UserCommand.RequestSenseLength; + if (RequestSenseLength > 0) + { + RequestSenseBuffer = pci_alloc_consistent(Controller->PCIDevice, + RequestSenseLength, &RequestSenseBufferDMA); + if (RequestSenseBuffer == NULL) + { + ErrorCode = -ENOMEM; + goto Failure2; + } + memset(RequestSenseBuffer, 0, RequestSenseLength); + } + spin_lock_irqsave(&Controller->queue_lock, flags); + while ((Command = DAC960_AllocateCommand(Controller)) == NULL) + DAC960_WaitForCommand(Controller); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + DAC960_V2_ClearCommand(Command); + Command->CommandType = DAC960_ImmediateCommand; + CommandMailbox = &Command->V2.CommandMailbox; + memcpy(CommandMailbox, &UserCommand.CommandMailbox, + sizeof(DAC960_V2_CommandMailbox_T)); + CommandMailbox->Common.CommandControlBits + .AdditionalScatterGatherListMemory = false; + CommandMailbox->Common.CommandControlBits + .NoAutoRequestSense = true; + CommandMailbox->Common.DataTransferSize = 0; + CommandMailbox->Common.DataTransferPageNumber = 0; + memset(&CommandMailbox->Common.DataTransferMemoryAddress, 0, + sizeof(DAC960_V2_DataTransferMemoryAddress_T)); + if (DataTransferLength != 0) + { + if (DataTransferLength > 0) + { + CommandMailbox->Common.CommandControlBits + .DataTransferControllerToHost = true; + CommandMailbox->Common.DataTransferSize = DataTransferLength; + } + else + { + CommandMailbox->Common.CommandControlBits + .DataTransferControllerToHost = false; + CommandMailbox->Common.DataTransferSize = -DataTransferLength; + } + CommandMailbox->Common.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentDataPointer = DataTransferBufferDMA; + CommandMailbox->Common.DataTransferMemoryAddress + .ScatterGatherSegments[0] + .SegmentByteCount = + CommandMailbox->Common.DataTransferSize; + } + if (RequestSenseLength > 0) + { + CommandMailbox->Common.CommandControlBits + .NoAutoRequestSense = false; + CommandMailbox->Common.RequestSenseSize = RequestSenseLength; + CommandMailbox->Common.RequestSenseBusAddress = + RequestSenseBufferDMA; + } + DAC960_ExecuteCommand(Command); + CommandStatus = Command->V2.CommandStatus; + RequestSenseLength = Command->V2.RequestSenseLength; + DataTransferResidue = Command->V2.DataTransferResidue; + spin_lock_irqsave(&Controller->queue_lock, flags); + DAC960_DeallocateCommand(Command); + spin_unlock_irqrestore(&Controller->queue_lock, flags); + if (RequestSenseLength > UserCommand.RequestSenseLength) + RequestSenseLength = UserCommand.RequestSenseLength; + if (copy_to_user(&UserSpaceUserCommand->DataTransferLength, + &DataTransferResidue, + sizeof(DataTransferResidue))) { + ErrorCode = -EFAULT; + goto Failure2; + } + if (copy_to_user(&UserSpaceUserCommand->RequestSenseLength, + &RequestSenseLength, sizeof(RequestSenseLength))) { + ErrorCode = -EFAULT; + goto Failure2; + } + if (DataTransferLength > 0) + { + if (copy_to_user(UserCommand.DataTransferBuffer, + DataTransferBuffer, DataTransferLength)) { + ErrorCode = -EFAULT; + goto Failure2; + } + } + if (RequestSenseLength > 0) + { + if (copy_to_user(UserCommand.RequestSenseBuffer, + RequestSenseBuffer, RequestSenseLength)) { + ErrorCode = -EFAULT; + goto Failure2; + } + } + ErrorCode = CommandStatus; + Failure2: + pci_free_consistent(Controller->PCIDevice, abs(DataTransferLength), + DataTransferBuffer, DataTransferBufferDMA); + if (RequestSenseBuffer != NULL) + pci_free_consistent(Controller->PCIDevice, RequestSenseLength, + RequestSenseBuffer, RequestSenseBufferDMA); + break; + } + case DAC960_IOCTL_V2_GET_HEALTH_STATUS: + { + DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus = + (DAC960_V2_GetHealthStatus_T __user *) Argument; + DAC960_V2_GetHealthStatus_T GetHealthStatus; + DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer; + DAC960_Controller_T *Controller; + int ControllerNumber; + if (UserSpaceGetHealthStatus == NULL) { + ErrorCode = -EINVAL; + break; + } + if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus, + sizeof(DAC960_V2_GetHealthStatus_T))) { + ErrorCode = -EFAULT; + break; + } + ErrorCode = -ENXIO; + ControllerNumber = GetHealthStatus.ControllerNumber; + if (ControllerNumber < 0 || + ControllerNumber > DAC960_ControllerCount - 1) + break; + Controller = DAC960_Controllers[ControllerNumber]; + if (Controller == NULL) + break; + if (Controller->FirmwareType != DAC960_V2_Controller) { + ErrorCode = -EINVAL; + break; + } + if (copy_from_user(&HealthStatusBuffer, + GetHealthStatus.HealthStatusBuffer, + sizeof(DAC960_V2_HealthStatusBuffer_T))) { + ErrorCode = -EFAULT; + break; + } + while (Controller->V2.HealthStatusBuffer->StatusChangeCounter + == HealthStatusBuffer.StatusChangeCounter && + Controller->V2.HealthStatusBuffer->NextEventSequenceNumber + == HealthStatusBuffer.NextEventSequenceNumber) + { + interruptible_sleep_on_timeout(&Controller->HealthStatusWaitQueue, + DAC960_MonitoringTimerInterval); + if (signal_pending(current)) { + ErrorCode = -EINTR; + break; + } + } + if (copy_to_user(GetHealthStatus.HealthStatusBuffer, + Controller->V2.HealthStatusBuffer, + sizeof(DAC960_V2_HealthStatusBuffer_T))) + ErrorCode = -EFAULT; + else + ErrorCode = 0; + } + default: + ErrorCode = -ENOTTY; + } + unlock_kernel(); + return ErrorCode; +} + +static const struct file_operations DAC960_gam_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = DAC960_gam_ioctl +}; + +static struct miscdevice DAC960_gam_dev = { + DAC960_GAM_MINOR, + "dac960_gam", + &DAC960_gam_fops +}; + +static int DAC960_gam_init(void) +{ + int ret; + + ret = misc_register(&DAC960_gam_dev); + if (ret) + printk(KERN_ERR "DAC960_gam: can't misc_register on minor %d\n", DAC960_GAM_MINOR); + return ret; +} + +static void DAC960_gam_cleanup(void) +{ + misc_deregister(&DAC960_gam_dev); +} + +#endif /* DAC960_GAM_MINOR */ + +static struct DAC960_privdata DAC960_GEM_privdata = { + .HardwareType = DAC960_GEM_Controller, + .FirmwareType = DAC960_V2_Controller, + .InterruptHandler = DAC960_GEM_InterruptHandler, + .MemoryWindowSize = DAC960_GEM_RegisterWindowSize, +}; + + +static struct DAC960_privdata DAC960_BA_privdata = { + .HardwareType = DAC960_BA_Controller, + .FirmwareType = DAC960_V2_Controller, + .InterruptHandler = DAC960_BA_InterruptHandler, + .MemoryWindowSize = DAC960_BA_RegisterWindowSize, +}; + +static struct DAC960_privdata DAC960_LP_privdata = { + .HardwareType = DAC960_LP_Controller, + .FirmwareType = DAC960_LP_Controller, + .InterruptHandler = DAC960_LP_InterruptHandler, + .MemoryWindowSize = DAC960_LP_RegisterWindowSize, +}; + +static struct DAC960_privdata DAC960_LA_privdata = { + .HardwareType = DAC960_LA_Controller, + .FirmwareType = DAC960_V1_Controller, + .InterruptHandler = DAC960_LA_InterruptHandler, + .MemoryWindowSize = DAC960_LA_RegisterWindowSize, +}; + +static struct DAC960_privdata DAC960_PG_privdata = { + .HardwareType = DAC960_PG_Controller, + .FirmwareType = DAC960_V1_Controller, + .InterruptHandler = DAC960_PG_InterruptHandler, + .MemoryWindowSize = DAC960_PG_RegisterWindowSize, +}; + +static struct DAC960_privdata DAC960_PD_privdata = { + .HardwareType = DAC960_PD_Controller, + .FirmwareType = DAC960_V1_Controller, + .InterruptHandler = DAC960_PD_InterruptHandler, + .MemoryWindowSize = DAC960_PD_RegisterWindowSize, +}; + +static struct DAC960_privdata DAC960_P_privdata = { + .HardwareType = DAC960_P_Controller, + .FirmwareType = DAC960_V1_Controller, + .InterruptHandler = DAC960_P_InterruptHandler, + .MemoryWindowSize = DAC960_PD_RegisterWindowSize, +}; + +static struct pci_device_id DAC960_id_table[] = { + { + .vendor = PCI_VENDOR_ID_MYLEX, + .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM, + .subvendor = PCI_VENDOR_ID_MYLEX, + .subdevice = PCI_ANY_ID, + .driver_data = (unsigned long) &DAC960_GEM_privdata, + }, + { + .vendor = PCI_VENDOR_ID_MYLEX, + .device = PCI_DEVICE_ID_MYLEX_DAC960_BA, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (unsigned long) &DAC960_BA_privdata, + }, + { + .vendor = PCI_VENDOR_ID_MYLEX, + .device = PCI_DEVICE_ID_MYLEX_DAC960_LP, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (unsigned long) &DAC960_LP_privdata, + }, + { + .vendor = PCI_VENDOR_ID_DEC, + .device = PCI_DEVICE_ID_DEC_21285, + .subvendor = PCI_VENDOR_ID_MYLEX, + .subdevice = PCI_DEVICE_ID_MYLEX_DAC960_LA, + .driver_data = (unsigned long) &DAC960_LA_privdata, + }, + { + .vendor = PCI_VENDOR_ID_MYLEX, + .device = PCI_DEVICE_ID_MYLEX_DAC960_PG, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (unsigned long) &DAC960_PG_privdata, + }, + { + .vendor = PCI_VENDOR_ID_MYLEX, + .device = PCI_DEVICE_ID_MYLEX_DAC960_PD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (unsigned long) &DAC960_PD_privdata, + }, + { + .vendor = PCI_VENDOR_ID_MYLEX, + .device = PCI_DEVICE_ID_MYLEX_DAC960_P, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (unsigned long) &DAC960_P_privdata, + }, + {0, }, +}; + +MODULE_DEVICE_TABLE(pci, DAC960_id_table); + +static struct pci_driver DAC960_pci_driver = { + .name = "DAC960", + .id_table = DAC960_id_table, + .probe = DAC960_Probe, + .remove = DAC960_Remove, +}; + +static int DAC960_init_module(void) +{ + int ret; + + ret = pci_register_driver(&DAC960_pci_driver); +#ifdef DAC960_GAM_MINOR + if (!ret) + DAC960_gam_init(); +#endif + return ret; +} + +static void DAC960_cleanup_module(void) +{ + int i; + +#ifdef DAC960_GAM_MINOR + DAC960_gam_cleanup(); +#endif + + for (i = 0; i < DAC960_ControllerCount; i++) { + DAC960_Controller_T *Controller = DAC960_Controllers[i]; + if (Controller == NULL) + continue; + DAC960_FinalizeController(Controller); + } + if (DAC960_ProcDirectoryEntry != NULL) { + remove_proc_entry("rd/status", NULL); + remove_proc_entry("rd", NULL); + } + DAC960_ControllerCount = 0; + pci_unregister_driver(&DAC960_pci_driver); +} + +module_init(DAC960_init_module); +module_exit(DAC960_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h new file mode 100644 index 0000000..85fa9bb --- /dev/null +++ b/drivers/block/DAC960.h @@ -0,0 +1,4415 @@ +/* + + Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers + + Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com> + + This program is free software; you may redistribute and/or modify it under + the terms of the GNU General Public License Version 2 as published by the + Free Software Foundation. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY + or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for complete details. + + The author respectfully requests that any modifications to this software be + sent directly to him for evaluation and testing. + +*/ + + +/* + Define the maximum number of DAC960 Controllers supported by this driver. +*/ + +#define DAC960_MaxControllers 8 + + +/* + Define the maximum number of Controller Channels supported by DAC960 + V1 and V2 Firmware Controllers. +*/ + +#define DAC960_V1_MaxChannels 3 +#define DAC960_V2_MaxChannels 4 + + +/* + Define the maximum number of Targets per Channel supported by DAC960 + V1 and V2 Firmware Controllers. +*/ + +#define DAC960_V1_MaxTargets 16 +#define DAC960_V2_MaxTargets 128 + + +/* + Define the maximum number of Logical Drives supported by DAC960 + V1 and V2 Firmware Controllers. +*/ + +#define DAC960_MaxLogicalDrives 32 + + +/* + Define the maximum number of Physical Devices supported by DAC960 + V1 and V2 Firmware Controllers. +*/ + +#define DAC960_V1_MaxPhysicalDevices 45 +#define DAC960_V2_MaxPhysicalDevices 272 + +/* + Define a 32/64 bit I/O Address data type. +*/ + +typedef unsigned long DAC960_IO_Address_T; + + +/* + Define a 32/64 bit PCI Bus Address data type. +*/ + +typedef unsigned long DAC960_PCI_Address_T; + + +/* + Define a 32 bit Bus Address data type. +*/ + +typedef unsigned int DAC960_BusAddress32_T; + + +/* + Define a 64 bit Bus Address data type. +*/ + +typedef unsigned long long DAC960_BusAddress64_T; + + +/* + Define a 32 bit Byte Count data type. +*/ + +typedef unsigned int DAC960_ByteCount32_T; + + +/* + Define a 64 bit Byte Count data type. +*/ + +typedef unsigned long long DAC960_ByteCount64_T; + + +/* + dma_loaf is used by helper routines to divide a region of + dma mapped memory into smaller pieces, where those pieces + are not of uniform size. + */ + +struct dma_loaf { + void *cpu_base; + dma_addr_t dma_base; + size_t length; + void *cpu_free; + dma_addr_t dma_free; +}; + +/* + Define the SCSI INQUIRY Standard Data structure. +*/ + +typedef struct DAC960_SCSI_Inquiry +{ + unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */ + unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */ + unsigned char DeviceTypeModifier:7; /* Byte 1 Bits 0-6 */ + bool RMB:1; /* Byte 1 Bit 7 */ + unsigned char ANSI_ApprovedVersion:3; /* Byte 2 Bits 0-2 */ + unsigned char ECMA_Version:3; /* Byte 2 Bits 3-5 */ + unsigned char ISO_Version:2; /* Byte 2 Bits 6-7 */ + unsigned char ResponseDataFormat:4; /* Byte 3 Bits 0-3 */ + unsigned char :2; /* Byte 3 Bits 4-5 */ + bool TrmIOP:1; /* Byte 3 Bit 6 */ + bool AENC:1; /* Byte 3 Bit 7 */ + unsigned char AdditionalLength; /* Byte 4 */ + unsigned char :8; /* Byte 5 */ + unsigned char :8; /* Byte 6 */ + bool SftRe:1; /* Byte 7 Bit 0 */ + bool CmdQue:1; /* Byte 7 Bit 1 */ + bool :1; /* Byte 7 Bit 2 */ + bool Linked:1; /* Byte 7 Bit 3 */ + bool Sync:1; /* Byte 7 Bit 4 */ + bool WBus16:1; /* Byte 7 Bit 5 */ + bool WBus32:1; /* Byte 7 Bit 6 */ + bool RelAdr:1; /* Byte 7 Bit 7 */ + unsigned char VendorIdentification[8]; /* Bytes 8-15 */ + unsigned char ProductIdentification[16]; /* Bytes 16-31 */ + unsigned char ProductRevisionLevel[4]; /* Bytes 32-35 */ +} +DAC960_SCSI_Inquiry_T; + + +/* + Define the SCSI INQUIRY Unit Serial Number structure. +*/ + +typedef struct DAC960_SCSI_Inquiry_UnitSerialNumber +{ + unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */ + unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */ + unsigned char PageCode; /* Byte 1 */ + unsigned char :8; /* Byte 2 */ + unsigned char PageLength; /* Byte 3 */ + unsigned char ProductSerialNumber[28]; /* Bytes 4-31 */ +} +DAC960_SCSI_Inquiry_UnitSerialNumber_T; + + +/* + Define the SCSI REQUEST SENSE Sense Key type. +*/ + +typedef enum +{ + DAC960_SenseKey_NoSense = 0x0, + DAC960_SenseKey_RecoveredError = 0x1, + DAC960_SenseKey_NotReady = 0x2, + DAC960_SenseKey_MediumError = 0x3, + DAC960_SenseKey_HardwareError = 0x4, + DAC960_SenseKey_IllegalRequest = 0x5, + DAC960_SenseKey_UnitAttention = 0x6, + DAC960_SenseKey_DataProtect = 0x7, + DAC960_SenseKey_BlankCheck = 0x8, + DAC960_SenseKey_VendorSpecific = 0x9, + DAC960_SenseKey_CopyAborted = 0xA, + DAC960_SenseKey_AbortedCommand = 0xB, + DAC960_SenseKey_Equal = 0xC, + DAC960_SenseKey_VolumeOverflow = 0xD, + DAC960_SenseKey_Miscompare = 0xE, + DAC960_SenseKey_Reserved = 0xF +} +__attribute__ ((packed)) +DAC960_SCSI_RequestSenseKey_T; + + +/* + Define the SCSI REQUEST SENSE structure. +*/ + +typedef struct DAC960_SCSI_RequestSense +{ + unsigned char ErrorCode:7; /* Byte 0 Bits 0-6 */ + bool Valid:1; /* Byte 0 Bit 7 */ + unsigned char SegmentNumber; /* Byte 1 */ + DAC960_SCSI_RequestSenseKey_T SenseKey:4; /* Byte 2 Bits 0-3 */ + unsigned char :1; /* Byte 2 Bit 4 */ + bool ILI:1; /* Byte 2 Bit 5 */ + bool EOM:1; /* Byte 2 Bit 6 */ + bool Filemark:1; /* Byte 2 Bit 7 */ + unsigned char Information[4]; /* Bytes 3-6 */ + unsigned char AdditionalSenseLength; /* Byte 7 */ + unsigned char CommandSpecificInformation[4]; /* Bytes 8-11 */ + unsigned char AdditionalSenseCode; /* Byte 12 */ + unsigned char AdditionalSenseCodeQualifier; /* Byte 13 */ +} +DAC960_SCSI_RequestSense_T; + + +/* + Define the DAC960 V1 Firmware Command Opcodes. +*/ + +typedef enum +{ + /* I/O Commands */ + DAC960_V1_ReadExtended = 0x33, + DAC960_V1_WriteExtended = 0x34, + DAC960_V1_ReadAheadExtended = 0x35, + DAC960_V1_ReadExtendedWithScatterGather = 0xB3, + DAC960_V1_WriteExtendedWithScatterGather = 0xB4, + DAC960_V1_Read = 0x36, + DAC960_V1_ReadWithScatterGather = 0xB6, + DAC960_V1_Write = 0x37, + DAC960_V1_WriteWithScatterGather = 0xB7, + DAC960_V1_DCDB = 0x04, + DAC960_V1_DCDBWithScatterGather = 0x84, + DAC960_V1_Flush = 0x0A, + /* Controller Status Related Commands */ + DAC960_V1_Enquiry = 0x53, + DAC960_V1_Enquiry2 = 0x1C, + DAC960_V1_GetLogicalDriveElement = 0x55, + DAC960_V1_GetLogicalDriveInformation = 0x19, + DAC960_V1_IOPortRead = 0x39, + DAC960_V1_IOPortWrite = 0x3A, + DAC960_V1_GetSDStats = 0x3E, + DAC960_V1_GetPDStats = 0x3F, + DAC960_V1_PerformEventLogOperation = 0x72, + /* Device Related Commands */ + DAC960_V1_StartDevice = 0x10, + DAC960_V1_GetDeviceState = 0x50, + DAC960_V1_StopChannel = 0x13, + DAC960_V1_StartChannel = 0x12, + DAC960_V1_ResetChannel = 0x1A, + /* Commands Associated with Data Consistency and Errors */ + DAC960_V1_Rebuild = 0x09, + DAC960_V1_RebuildAsync = 0x16, + DAC960_V1_CheckConsistency = 0x0F, + DAC960_V1_CheckConsistencyAsync = 0x1E, + DAC960_V1_RebuildStat = 0x0C, + DAC960_V1_GetRebuildProgress = 0x27, + DAC960_V1_RebuildControl = 0x1F, + DAC960_V1_ReadBadBlockTable = 0x0B, + DAC960_V1_ReadBadDataTable = 0x25, + DAC960_V1_ClearBadDataTable = 0x26, + DAC960_V1_GetErrorTable = 0x17, + DAC960_V1_AddCapacityAsync = 0x2A, + DAC960_V1_BackgroundInitializationControl = 0x2B, + /* Configuration Related Commands */ + DAC960_V1_ReadConfig2 = 0x3D, + DAC960_V1_WriteConfig2 = 0x3C, + DAC960_V1_ReadConfigurationOnDisk = 0x4A, + DAC960_V1_WriteConfigurationOnDisk = 0x4B, + DAC960_V1_ReadConfiguration = 0x4E, + DAC960_V1_ReadBackupConfiguration = 0x4D, + DAC960_V1_WriteConfiguration = 0x4F, + DAC960_V1_AddConfiguration = 0x4C, + DAC960_V1_ReadConfigurationLabel = 0x48, + DAC960_V1_WriteConfigurationLabel = 0x49, + /* Firmware Upgrade Related Commands */ + DAC960_V1_LoadImage = 0x20, + DAC960_V1_StoreImage = 0x21, + DAC960_V1_ProgramImage = 0x22, + /* Diagnostic Commands */ + DAC960_V1_SetDiagnosticMode = 0x31, + DAC960_V1_RunDiagnostic = 0x32, + /* Subsystem Service Commands */ + DAC960_V1_GetSubsystemData = 0x70, + DAC960_V1_SetSubsystemParameters = 0x71, + /* Version 2.xx Firmware Commands */ + DAC960_V1_Enquiry_Old = 0x05, + DAC960_V1_GetDeviceState_Old = 0x14, + DAC960_V1_Read_Old = 0x02, + DAC960_V1_Write_Old = 0x03, + DAC960_V1_ReadWithScatterGather_Old = 0x82, + DAC960_V1_WriteWithScatterGather_Old = 0x83 +} +__attribute__ ((packed)) +DAC960_V1_CommandOpcode_T; + + +/* + Define the DAC960 V1 Firmware Command Identifier type. +*/ + +typedef unsigned char DAC960_V1_CommandIdentifier_T; + + +/* + Define the DAC960 V1 Firmware Command Status Codes. +*/ + +#define DAC960_V1_NormalCompletion 0x0000 /* Common */ +#define DAC960_V1_CheckConditionReceived 0x0002 /* Common */ +#define DAC960_V1_NoDeviceAtAddress 0x0102 /* Common */ +#define DAC960_V1_InvalidDeviceAddress 0x0105 /* Common */ +#define DAC960_V1_InvalidParameter 0x0105 /* Common */ +#define DAC960_V1_IrrecoverableDataError 0x0001 /* I/O */ +#define DAC960_V1_LogicalDriveNonexistentOrOffline 0x0002 /* I/O */ +#define DAC960_V1_AccessBeyondEndOfLogicalDrive 0x0105 /* I/O */ +#define DAC960_V1_BadDataEncountered 0x010C /* I/O */ +#define DAC960_V1_DeviceBusy 0x0008 /* DCDB */ +#define DAC960_V1_DeviceNonresponsive 0x000E /* DCDB */ +#define DAC960_V1_CommandTerminatedAbnormally 0x000F /* DCDB */ +#define DAC960_V1_UnableToStartDevice 0x0002 /* Device */ +#define DAC960_V1_InvalidChannelOrTargetOrModifier 0x0105 /* Device */ +#define DAC960_V1_ChannelBusy 0x0106 /* Device */ +#define DAC960_V1_ChannelNotStopped 0x0002 /* Device */ +#define DAC960_V1_AttemptToRebuildOnlineDrive 0x0002 /* Consistency */ +#define DAC960_V1_RebuildBadBlocksEncountered 0x0003 /* Consistency */ +#define DAC960_V1_NewDiskFailedDuringRebuild 0x0004 /* Consistency */ +#define DAC960_V1_RebuildOrCheckAlreadyInProgress 0x0106 /* Consistency */ +#define DAC960_V1_DependentDiskIsDead 0x0002 /* Consistency */ +#define DAC960_V1_InconsistentBlocksFound 0x0003 /* Consistency */ +#define DAC960_V1_InvalidOrNonredundantLogicalDrive 0x0105 /* Consistency */ +#define DAC960_V1_NoRebuildOrCheckInProgress 0x0105 /* Consistency */ +#define DAC960_V1_RebuildInProgress_DataValid 0x0000 /* Consistency */ +#define DAC960_V1_RebuildFailed_LogicalDriveFailure 0x0002 /* Consistency */ +#define DAC960_V1_RebuildFailed_BadBlocksOnOther 0x0003 /* Consistency */ +#define DAC960_V1_RebuildFailed_NewDriveFailed 0x0004 /* Consistency */ +#define DAC960_V1_RebuildSuccessful 0x0100 /* Consistency */ +#define DAC960_V1_RebuildSuccessfullyTerminated 0x0107 /* Consistency */ +#define DAC960_V1_BackgroundInitSuccessful 0x0100 /* Consistency */ +#define DAC960_V1_BackgroundInitAborted 0x0005 /* Consistency */ +#define DAC960_V1_NoBackgroundInitInProgress 0x0105 /* Consistency */ +#define DAC960_V1_AddCapacityInProgress 0x0004 /* Consistency */ +#define DAC960_V1_AddCapacityFailedOrSuspended 0x00F4 /* Consistency */ +#define DAC960_V1_Config2ChecksumError 0x0002 /* Configuration */ +#define DAC960_V1_ConfigurationSuspended 0x0106 /* Configuration */ +#define DAC960_V1_FailedToConfigureNVRAM 0x0105 /* Configuration */ +#define DAC960_V1_ConfigurationNotSavedStateChange 0x0106 /* Configuration */ +#define DAC960_V1_SubsystemNotInstalled 0x0001 /* Subsystem */ +#define DAC960_V1_SubsystemFailed 0x0002 /* Subsystem */ +#define DAC960_V1_SubsystemBusy 0x0106 /* Subsystem */ + +typedef unsigned short DAC960_V1_CommandStatus_T; + + +/* + Define the DAC960 V1 Firmware Enquiry Command reply structure. +*/ + +typedef struct DAC960_V1_Enquiry +{ + unsigned char NumberOfLogicalDrives; /* Byte 0 */ + unsigned int :24; /* Bytes 1-3 */ + unsigned int LogicalDriveSizes[32]; /* Bytes 4-131 */ + unsigned short FlashAge; /* Bytes 132-133 */ + struct { + bool DeferredWriteError:1; /* Byte 134 Bit 0 */ + bool BatteryLow:1; /* Byte 134 Bit 1 */ + unsigned char :6; /* Byte 134 Bits 2-7 */ + } StatusFlags; + unsigned char :8; /* Byte 135 */ + unsigned char MinorFirmwareVersion; /* Byte 136 */ + unsigned char MajorFirmwareVersion; /* Byte 137 */ + enum { + DAC960_V1_NoStandbyRebuildOrCheckInProgress = 0x00, + DAC960_V1_StandbyRebuildInProgress = 0x01, + DAC960_V1_BackgroundRebuildInProgress = 0x02, + DAC960_V1_BackgroundCheckInProgress = 0x03, + DAC960_V1_StandbyRebuildCompletedWithError = 0xFF, + DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed = 0xF0, + DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed = 0xF1, + DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses = 0xF2, + DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated = 0xF3 + } __attribute__ ((packed)) RebuildFlag; /* Byte 138 */ + unsigned char MaxCommands; /* Byte 139 */ + unsigned char OfflineLogicalDriveCount; /* Byte 140 */ + unsigned char :8; /* Byte 141 */ + unsigned short EventLogSequenceNumber; /* Bytes 142-143 */ + unsigned char CriticalLogicalDriveCount; /* Byte 144 */ + unsigned int :24; /* Bytes 145-147 */ + unsigned char DeadDriveCount; /* Byte 148 */ + unsigned char :8; /* Byte 149 */ + unsigned char RebuildCount; /* Byte 150 */ + struct { + unsigned char :3; /* Byte 151 Bits 0-2 */ + bool BatteryBackupUnitPresent:1; /* Byte 151 Bit 3 */ + unsigned char :3; /* Byte 151 Bits 4-6 */ + unsigned char :1; /* Byte 151 Bit 7 */ + } MiscFlags; + struct { + unsigned char TargetID; + unsigned char Channel; + } DeadDrives[21]; /* Bytes 152-194 */ + unsigned char Reserved[62]; /* Bytes 195-255 */ +} +__attribute__ ((packed)) +DAC960_V1_Enquiry_T; + + +/* + Define the DAC960 V1 Firmware Enquiry2 Command reply structure. +*/ + +typedef struct DAC960_V1_Enquiry2 +{ + struct { + enum { + DAC960_V1_P_PD_PU = 0x01, + DAC960_V1_PL = 0x02, + DAC960_V1_PG = 0x10, + DAC960_V1_PJ = 0x11, + DAC960_V1_PR = 0x12, + DAC960_V1_PT = 0x13, + DAC960_V1_PTL0 = 0x14, + DAC960_V1_PRL = 0x15, + DAC960_V1_PTL1 = 0x16, + DAC960_V1_1164P = 0x20 + } __attribute__ ((packed)) SubModel; /* Byte 0 */ + unsigned char ActualChannels; /* Byte 1 */ + enum { + DAC960_V1_FiveChannelBoard = 0x01, + DAC960_V1_ThreeChannelBoard = 0x02, + DAC960_V1_TwoChannelBoard = 0x03, + DAC960_V1_ThreeChannelASIC_DAC = 0x04 + } __attribute__ ((packed)) Model; /* Byte 2 */ + enum { + DAC960_V1_EISA_Controller = 0x01, + DAC960_V1_MicroChannel_Controller = 0x02, + DAC960_V1_PCI_Controller = 0x03, + DAC960_V1_SCSItoSCSI_Controller = 0x08 + } __attribute__ ((packed)) ProductFamily; /* Byte 3 */ + } HardwareID; /* Bytes 0-3 */ + /* MajorVersion.MinorVersion-FirmwareType-TurnID */ + struct { + unsigned char MajorVersion; /* Byte 4 */ + unsigned char MinorVersion; /* Byte 5 */ + unsigned char TurnID; /* Byte 6 */ + char FirmwareType; /* Byte 7 */ + } FirmwareID; /* Bytes 4-7 */ + unsigned char :8; /* Byte 8 */ + unsigned int :24; /* Bytes 9-11 */ + unsigned char ConfiguredChannels; /* Byte 12 */ + unsigned char ActualChannels; /* Byte 13 */ + unsigned char MaxTargets; /* Byte 14 */ + unsigned char MaxTags; /* Byte 15 */ + unsigned char MaxLogicalDrives; /* Byte 16 */ + unsigned char MaxArms; /* Byte 17 */ + unsigned char MaxSpans; /* Byte 18 */ + unsigned char :8; /* Byte 19 */ + unsigned int :32; /* Bytes 20-23 */ + unsigned int MemorySize; /* Bytes 24-27 */ + unsigned int CacheSize; /* Bytes 28-31 */ + unsigned int FlashMemorySize; /* Bytes 32-35 */ + unsigned int NonVolatileMemorySize; /* Bytes 36-39 */ + struct { + enum { + DAC960_V1_RamType_DRAM = 0x0, + DAC960_V1_RamType_EDO = 0x1, + DAC960_V1_RamType_SDRAM = 0x2, + DAC960_V1_RamType_Last = 0x7 + } __attribute__ ((packed)) RamType:3; /* Byte 40 Bits 0-2 */ + enum { + DAC960_V1_ErrorCorrection_None = 0x0, + DAC960_V1_ErrorCorrection_Parity = 0x1, + DAC960_V1_ErrorCorrection_ECC = 0x2, + DAC960_V1_ErrorCorrection_Last = 0x7 + } __attribute__ ((packed)) ErrorCorrection:3; /* Byte 40 Bits 3-5 */ + bool FastPageMode:1; /* Byte 40 Bit 6 */ + bool LowPowerMemory:1; /* Byte 40 Bit 7 */ + unsigned char :8; /* Bytes 41 */ + } MemoryType; + unsigned short ClockSpeed; /* Bytes 42-43 */ + unsigned short MemorySpeed; /* Bytes 44-45 */ + unsigned short HardwareSpeed; /* Bytes 46-47 */ + unsigned int :32; /* Bytes 48-51 */ + unsigned int :32; /* Bytes 52-55 */ + unsigned char :8; /* Byte 56 */ + unsigned char :8; /* Byte 57 */ + unsigned short :16; /* Bytes 58-59 */ + unsigned short MaxCommands; /* Bytes 60-61 */ + unsigned short MaxScatterGatherEntries; /* Bytes 62-63 */ + unsigned short MaxDriveCommands; /* Bytes 64-65 */ + unsigned short MaxIODescriptors; /* Bytes 66-67 */ + unsigned short MaxCombinedSectors; /* Bytes 68-69 */ + unsigned char Latency; /* Byte 70 */ + unsigned char :8; /* Byte 71 */ + unsigned char SCSITimeout; /* Byte 72 */ + unsigned char :8; /* Byte 73 */ + unsigned short MinFreeLines; /* Bytes 74-75 */ + unsigned int :32; /* Bytes 76-79 */ + unsigned int :32; /* Bytes 80-83 */ + unsigned char RebuildRateConstant; /* Byte 84 */ + unsigned char :8; /* Byte 85 */ + unsigned char :8; /* Byte 86 */ + unsigned char :8; /* Byte 87 */ + unsigned int :32; /* Bytes 88-91 */ + unsigned int :32; /* Bytes 92-95 */ + unsigned short PhysicalDriveBlockSize; /* Bytes 96-97 */ + unsigned short LogicalDriveBlockSize; /* Bytes 98-99 */ + unsigned short MaxBlocksPerCommand; /* Bytes 100-101 */ + unsigned short BlockFactor; /* Bytes 102-103 */ + unsigned short CacheLineSize; /* Bytes 104-105 */ + struct { + enum { + DAC960_V1_Narrow_8bit = 0x0, + DAC960_V1_Wide_16bit = 0x1, + DAC960_V1_Wide_32bit = 0x2 + } __attribute__ ((packed)) BusWidth:2; /* Byte 106 Bits 0-1 */ + enum { + DAC960_V1_Fast = 0x0, + DAC960_V1_Ultra = 0x1, + DAC960_V1_Ultra2 = 0x2 + } __attribute__ ((packed)) BusSpeed:2; /* Byte 106 Bits 2-3 */ + bool Differential:1; /* Byte 106 Bit 4 */ + unsigned char :3; /* Byte 106 Bits 5-7 */ + } SCSICapability; + unsigned char :8; /* Byte 107 */ + unsigned int :32; /* Bytes 108-111 */ + unsigned short FirmwareBuildNumber; /* Bytes 112-113 */ + enum { + DAC960_V1_AEMI = 0x01, + DAC960_V1_OEM1 = 0x02, + DAC960_V1_OEM2 = 0x04, + DAC960_V1_OEM3 = 0x08, + DAC960_V1_Conner = 0x10, + DAC960_V1_SAFTE = 0x20 + } __attribute__ ((packed)) FaultManagementType; /* Byte 114 */ + unsigned char :8; /* Byte 115 */ + struct { + bool Clustering:1; /* Byte 116 Bit 0 */ + bool MylexOnlineRAIDExpansion:1; /* Byte 116 Bit 1 */ + bool ReadAhead:1; /* Byte 116 Bit 2 */ + bool BackgroundInitialization:1; /* Byte 116 Bit 3 */ + unsigned int :28; /* Bytes 116-119 */ + } FirmwareFeatures; + unsigned int :32; /* Bytes 120-123 */ + unsigned int :32; /* Bytes 124-127 */ +} +DAC960_V1_Enquiry2_T; + + +/* + Define the DAC960 V1 Firmware Logical Drive State type. +*/ + +typedef enum +{ + DAC960_V1_LogicalDrive_Online = 0x03, + DAC960_V1_LogicalDrive_Critical = 0x04, + DAC960_V1_LogicalDrive_Offline = 0xFF +} +__attribute__ ((packed)) +DAC960_V1_LogicalDriveState_T; + + +/* + Define the DAC960 V1 Firmware Logical Drive Information structure. +*/ + +typedef struct DAC960_V1_LogicalDriveInformation +{ + unsigned int LogicalDriveSize; /* Bytes 0-3 */ + DAC960_V1_LogicalDriveState_T LogicalDriveState; /* Byte 4 */ + unsigned char RAIDLevel:7; /* Byte 5 Bits 0-6 */ + bool WriteBack:1; /* Byte 5 Bit 7 */ + unsigned short :16; /* Bytes 6-7 */ +} +DAC960_V1_LogicalDriveInformation_T; + + +/* + Define the DAC960 V1 Firmware Get Logical Drive Information Command + reply structure. +*/ + +typedef DAC960_V1_LogicalDriveInformation_T + DAC960_V1_LogicalDriveInformationArray_T[DAC960_MaxLogicalDrives]; + + +/* + Define the DAC960 V1 Firmware Perform Event Log Operation Types. +*/ + +typedef enum +{ + DAC960_V1_GetEventLogEntry = 0x00 +} +__attribute__ ((packed)) +DAC960_V1_PerformEventLogOpType_T; + + +/* + Define the DAC960 V1 Firmware Get Event Log Entry Command reply structure. +*/ + +typedef struct DAC960_V1_EventLogEntry +{ + unsigned char MessageType; /* Byte 0 */ + unsigned char MessageLength; /* Byte 1 */ + unsigned char TargetID:5; /* Byte 2 Bits 0-4 */ + unsigned char Channel:3; /* Byte 2 Bits 5-7 */ + unsigned char LogicalUnit:6; /* Byte 3 Bits 0-5 */ + unsigned char :2; /* Byte 3 Bits 6-7 */ + unsigned short SequenceNumber; /* Bytes 4-5 */ + unsigned char ErrorCode:7; /* Byte 6 Bits 0-6 */ + bool Valid:1; /* Byte 6 Bit 7 */ + unsigned char SegmentNumber; /* Byte 7 */ + DAC960_SCSI_RequestSenseKey_T SenseKey:4; /* Byte 8 Bits 0-3 */ + unsigned char :1; /* Byte 8 Bit 4 */ + bool ILI:1; /* Byte 8 Bit 5 */ + bool EOM:1; /* Byte 8 Bit 6 */ + bool Filemark:1; /* Byte 8 Bit 7 */ + unsigned char Information[4]; /* Bytes 9-12 */ + unsigned char AdditionalSenseLength; /* Byte 13 */ + unsigned char CommandSpecificInformation[4]; /* Bytes 14-17 */ + unsigned char AdditionalSenseCode; /* Byte 18 */ + unsigned char AdditionalSenseCodeQualifier; /* Byte 19 */ + unsigned char Dummy[12]; /* Bytes 20-31 */ +} +DAC960_V1_EventLogEntry_T; + + +/* + Define the DAC960 V1 Firmware Physical Device State type. +*/ + +typedef enum +{ + DAC960_V1_Device_Dead = 0x00, + DAC960_V1_Device_WriteOnly = 0x02, + DAC960_V1_Device_Online = 0x03, + DAC960_V1_Device_Standby = 0x10 +} +__attribute__ ((packed)) +DAC960_V1_PhysicalDeviceState_T; + + +/* + Define the DAC960 V1 Firmware Get Device State Command reply structure. + The structure is padded by 2 bytes for compatibility with Version 2.xx + Firmware. +*/ + +typedef struct DAC960_V1_DeviceState +{ + bool Present:1; /* Byte 0 Bit 0 */ + unsigned char :7; /* Byte 0 Bits 1-7 */ + enum { + DAC960_V1_OtherType = 0x0, + DAC960_V1_DiskType = 0x1, + DAC960_V1_SequentialType = 0x2, + DAC960_V1_CDROM_or_WORM_Type = 0x3 + } __attribute__ ((packed)) DeviceType:2; /* Byte 1 Bits 0-1 */ + bool :1; /* Byte 1 Bit 2 */ + bool Fast20:1; /* Byte 1 Bit 3 */ + bool Sync:1; /* Byte 1 Bit 4 */ + bool Fast:1; /* Byte 1 Bit 5 */ + bool Wide:1; /* Byte 1 Bit 6 */ + bool TaggedQueuingSupported:1; /* Byte 1 Bit 7 */ + DAC960_V1_PhysicalDeviceState_T DeviceState; /* Byte 2 */ + unsigned char :8; /* Byte 3 */ + unsigned char SynchronousMultiplier; /* Byte 4 */ + unsigned char SynchronousOffset:5; /* Byte 5 Bits 0-4 */ + unsigned char :3; /* Byte 5 Bits 5-7 */ + unsigned int DiskSize __attribute__ ((packed)); /* Bytes 6-9 */ + unsigned short :16; /* Bytes 10-11 */ +} +DAC960_V1_DeviceState_T; + + +/* + Define the DAC960 V1 Firmware Get Rebuild Progress Command reply structure. +*/ + +typedef struct DAC960_V1_RebuildProgress +{ + unsigned int LogicalDriveNumber; /* Bytes 0-3 */ + unsigned int LogicalDriveSize; /* Bytes 4-7 */ + unsigned int RemainingBlocks; /* Bytes 8-11 */ +} +DAC960_V1_RebuildProgress_T; + + +/* + Define the DAC960 V1 Firmware Background Initialization Status Command + reply structure. +*/ + +typedef struct DAC960_V1_BackgroundInitializationStatus +{ + unsigned int LogicalDriveSize; /* Bytes 0-3 */ + unsigned int BlocksCompleted; /* Bytes 4-7 */ + unsigned char Reserved1[12]; /* Bytes 8-19 */ + unsigned int LogicalDriveNumber; /* Bytes 20-23 */ + unsigned char RAIDLevel; /* Byte 24 */ + enum { + DAC960_V1_BackgroundInitializationInvalid = 0x00, + DAC960_V1_BackgroundInitializationStarted = 0x02, + DAC960_V1_BackgroundInitializationInProgress = 0x04, + DAC960_V1_BackgroundInitializationSuspended = 0x05, + DAC960_V1_BackgroundInitializationCancelled = 0x06 + } __attribute__ ((packed)) Status; /* Byte 25 */ + unsigned char Reserved2[6]; /* Bytes 26-31 */ +} +DAC960_V1_BackgroundInitializationStatus_T; + + +/* + Define the DAC960 V1 Firmware Error Table Entry structure. +*/ + +typedef struct DAC960_V1_ErrorTableEntry +{ + unsigned char ParityErrorCount; /* Byte 0 */ + unsigned char SoftErrorCount; /* Byte 1 */ + unsigned char HardErrorCount; /* Byte 2 */ + unsigned char MiscErrorCount; /* Byte 3 */ +} +DAC960_V1_ErrorTableEntry_T; + + +/* + Define the DAC960 V1 Firmware Get Error Table Command reply structure. +*/ + +typedef struct DAC960_V1_ErrorTable +{ + DAC960_V1_ErrorTableEntry_T + ErrorTableEntries[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets]; +} +DAC960_V1_ErrorTable_T; + + +/* + Define the DAC960 V1 Firmware Read Config2 Command reply structure. +*/ + +typedef struct DAC960_V1_Config2 +{ + unsigned char :1; /* Byte 0 Bit 0 */ + bool ActiveNegationEnabled:1; /* Byte 0 Bit 1 */ + unsigned char :5; /* Byte 0 Bits 2-6 */ + bool NoRescanIfResetReceivedDuringScan:1; /* Byte 0 Bit 7 */ + bool StorageWorksSupportEnabled:1; /* Byte 1 Bit 0 */ + bool HewlettPackardSupportEnabled:1; /* Byte 1 Bit 1 */ + bool NoDisconnectOnFirstCommand:1; /* Byte 1 Bit 2 */ + unsigned char :2; /* Byte 1 Bits 3-4 */ + bool AEMI_ARM:1; /* Byte 1 Bit 5 */ + bool AEMI_OFM:1; /* Byte 1 Bit 6 */ + unsigned char :1; /* Byte 1 Bit 7 */ + enum { + DAC960_V1_OEMID_Mylex = 0x00, + DAC960_V1_OEMID_IBM = 0x08, + DAC960_V1_OEMID_HP = 0x0A, + DAC960_V1_OEMID_DEC = 0x0C, + DAC960_V1_OEMID_Siemens = 0x10, + DAC960_V1_OEMID_Intel = 0x12 + } __attribute__ ((packed)) OEMID; /* Byte 2 */ + unsigned char OEMModelNumber; /* Byte 3 */ + unsigned char PhysicalSector; /* Byte 4 */ + unsigned char LogicalSector; /* Byte 5 */ + unsigned char BlockFactor; /* Byte 6 */ + bool ReadAheadEnabled:1; /* Byte 7 Bit 0 */ + bool LowBIOSDelay:1; /* Byte 7 Bit 1 */ + unsigned char :2; /* Byte 7 Bits 2-3 */ + bool ReassignRestrictedToOneSector:1; /* Byte 7 Bit 4 */ + unsigned char :1; /* Byte 7 Bit 5 */ + bool ForceUnitAccessDuringWriteRecovery:1; /* Byte 7 Bit 6 */ + bool EnableLeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */ + unsigned char DefaultRebuildRate; /* Byte 8 */ + unsigned char :8; /* Byte 9 */ + unsigned char BlocksPerCacheLine; /* Byte 10 */ + unsigned char BlocksPerStripe; /* Byte 11 */ + struct { + enum { + DAC960_V1_Async = 0x0, + DAC960_V1_Sync_8MHz = 0x1, + DAC960_V1_Sync_5MHz = 0x2, + DAC960_V1_Sync_10or20MHz = 0x3 /* Byte 11 Bits 0-1 */ + } __attribute__ ((packed)) Speed:2; + bool Force8Bit:1; /* Byte 11 Bit 2 */ + bool DisableFast20:1; /* Byte 11 Bit 3 */ + unsigned char :3; /* Byte 11 Bits 4-6 */ + bool EnableTaggedQueuing:1; /* Byte 11 Bit 7 */ + } __attribute__ ((packed)) ChannelParameters[6]; /* Bytes 12-17 */ + unsigned char SCSIInitiatorID; /* Byte 18 */ + unsigned char :8; /* Byte 19 */ + enum { + DAC960_V1_StartupMode_ControllerSpinUp = 0x00, + DAC960_V1_StartupMode_PowerOnSpinUp = 0x01 + } __attribute__ ((packed)) StartupMode; /* Byte 20 */ + unsigned char SimultaneousDeviceSpinUpCount; /* Byte 21 */ + unsigned char SecondsDelayBetweenSpinUps; /* Byte 22 */ + unsigned char Reserved1[29]; /* Bytes 23-51 */ + bool BIOSDisabled:1; /* Byte 52 Bit 0 */ + bool CDROMBootEnabled:1; /* Byte 52 Bit 1 */ + unsigned char :3; /* Byte 52 Bits 2-4 */ + enum { + DAC960_V1_Geometry_128_32 = 0x0, + DAC960_V1_Geometry_255_63 = 0x1, + DAC960_V1_Geometry_Reserved1 = 0x2, + DAC960_V1_Geometry_Reserved2 = 0x3 + } __attribute__ ((packed)) DriveGeometry:2; /* Byte 52 Bits 5-6 */ + unsigned char :1; /* Byte 52 Bit 7 */ + unsigned char Reserved2[9]; /* Bytes 53-61 */ + unsigned short Checksum; /* Bytes 62-63 */ +} +DAC960_V1_Config2_T; + + +/* + Define the DAC960 V1 Firmware DCDB request structure. +*/ + +typedef struct DAC960_V1_DCDB +{ + unsigned char TargetID:4; /* Byte 0 Bits 0-3 */ + unsigned char Channel:4; /* Byte 0 Bits 4-7 */ + enum { + DAC960_V1_DCDB_NoDataTransfer = 0, + DAC960_V1_DCDB_DataTransferDeviceToSystem = 1, + DAC960_V1_DCDB_DataTransferSystemToDevice = 2, + DAC960_V1_DCDB_IllegalDataTransfer = 3 + } __attribute__ ((packed)) Direction:2; /* Byte 1 Bits 0-1 */ + bool EarlyStatus:1; /* Byte 1 Bit 2 */ + unsigned char :1; /* Byte 1 Bit 3 */ + enum { + DAC960_V1_DCDB_Timeout_24_hours = 0, + DAC960_V1_DCDB_Timeout_10_seconds = 1, + DAC960_V1_DCDB_Timeout_60_seconds = 2, + DAC960_V1_DCDB_Timeout_10_minutes = 3 + } __attribute__ ((packed)) Timeout:2; /* Byte 1 Bits 4-5 */ + bool NoAutomaticRequestSense:1; /* Byte 1 Bit 6 */ + bool DisconnectPermitted:1; /* Byte 1 Bit 7 */ + unsigned short TransferLength; /* Bytes 2-3 */ + DAC960_BusAddress32_T BusAddress; /* Bytes 4-7 */ + unsigned char CDBLength:4; /* Byte 8 Bits 0-3 */ + unsigned char TransferLengthHigh4:4; /* Byte 8 Bits 4-7 */ + unsigned char SenseLength; /* Byte 9 */ + unsigned char CDB[12]; /* Bytes 10-21 */ + unsigned char SenseData[64]; /* Bytes 22-85 */ + unsigned char Status; /* Byte 86 */ + unsigned char :8; /* Byte 87 */ +} +DAC960_V1_DCDB_T; + + +/* + Define the DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address + 32 Bit Byte Count structure. +*/ + +typedef struct DAC960_V1_ScatterGatherSegment +{ + DAC960_BusAddress32_T SegmentDataPointer; /* Bytes 0-3 */ + DAC960_ByteCount32_T SegmentByteCount; /* Bytes 4-7 */ +} +DAC960_V1_ScatterGatherSegment_T; + + +/* + Define the 13 Byte DAC960 V1 Firmware Command Mailbox structure. Bytes 13-15 + are not used. The Command Mailbox structure is padded to 16 bytes for + efficient access. +*/ + +typedef union DAC960_V1_CommandMailbox +{ + unsigned int Words[4]; /* Words 0-3 */ + unsigned char Bytes[16]; /* Bytes 0-15 */ + struct { + DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */ + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */ + unsigned char Dummy[14]; /* Bytes 2-15 */ + } __attribute__ ((packed)) Common; + struct { + DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */ + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */ + unsigned char Dummy1[6]; /* Bytes 2-7 */ + DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */ + unsigned char Dummy2[4]; /* Bytes 12-15 */ + } __attribute__ ((packed)) Type3; + struct { + DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */ + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */ + unsigned char CommandOpcode2; /* Byte 2 */ + unsigned char Dummy1[5]; /* Bytes 3-7 */ + DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */ + unsigned char Dummy2[4]; /* Bytes 12-15 */ + } __attribute__ ((packed)) Type3B; + struct { + DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */ + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */ + unsigned char Dummy1[5]; /* Bytes 2-6 */ + unsigned char LogicalDriveNumber:6; /* Byte 7 Bits 0-6 */ + bool AutoRestore:1; /* Byte 7 Bit 7 */ + unsigned char Dummy2[8]; /* Bytes 8-15 */ + } __attribute__ ((packed)) Type3C; + struct { + DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */ + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */ + unsigned char Channel; /* Byte 2 */ + unsigned char TargetID; /* Byte 3 */ + DAC960_V1_PhysicalDeviceState_T DeviceState:5; /* Byte 4 Bits 0-4 */ + unsigned char Modifier:3; /* Byte 4 Bits 5-7 */ + unsigned char Dummy1[3]; /* Bytes 5-7 */ + DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */ + unsigned char Dummy2[4]; /* Bytes 12-15 */ + } __attribute__ ((packed)) Type3D; + struct { + DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */ + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */ + DAC960_V1_PerformEventLogOpType_T OperationType; /* Byte 2 */ + unsigned char OperationQualifier; /* Byte 3 */ + unsigned short SequenceNumber; /* Bytes 4-5 */ + unsigned char Dummy1[2]; /* Bytes 6-7 */ + DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */ + unsigned char Dummy2[4]; /* Bytes 12-15 */ + } __attribute__ ((packed)) Type3E; + struct { + DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */ + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */ + unsigned char Dummy1[2]; /* Bytes 2-3 */ + unsigned char RebuildRateConstant; /* Byte 4 */ + unsigned char Dummy2[3]; /* Bytes 5-7 */ + DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */ + unsigned char Dummy3[4]; /* Bytes 12-15 */ + } __attribute__ ((packed)) Type3R; + struct { + DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */ + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */ + unsigned short TransferLength; /* Bytes 2-3 */ + unsigned int LogicalBlockAddress; /* Bytes 4-7 */ + DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */ + unsigned char LogicalDriveNumber; /* Byte 12 */ + unsigned char Dummy[3]; /* Bytes 13-15 */ + } __attribute__ ((packed)) Type4; + struct { + DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */ + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */ + struct { + unsigned short TransferLength:11; /* Bytes 2-3 */ + unsigned char LogicalDriveNumber:5; /* Byte 3 Bits 3-7 */ + } __attribute__ ((packed)) LD; + unsigned int LogicalBlockAddress; /* Bytes 4-7 */ + DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */ + unsigned char ScatterGatherCount:6; /* Byte 12 Bits 0-5 */ + enum { + DAC960_V1_ScatterGather_32BitAddress_32BitByteCount = 0x0, + DAC960_V1_ScatterGather_32BitAddress_16BitByteCount = 0x1, + DAC960_V1_ScatterGather_32BitByteCount_32BitAddress = 0x2, + DAC960_V1_ScatterGather_16BitByteCount_32BitAddress = 0x3 + } __attribute__ ((packed)) ScatterGatherType:2; /* Byte 12 Bits 6-7 */ + unsigned char Dummy[3]; /* Bytes 13-15 */ + } __attribute__ ((packed)) Type5; + struct { + DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */ + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */ + unsigned char CommandOpcode2; /* Byte 2 */ + unsigned char :8; /* Byte 3 */ + DAC960_BusAddress32_T CommandMailboxesBusAddress; /* Bytes 4-7 */ + DAC960_BusAddress32_T StatusMailboxesBusAddress; /* Bytes 8-11 */ + unsigned char Dummy[4]; /* Bytes 12-15 */ + } __attribute__ ((packed)) TypeX; +} +DAC960_V1_CommandMailbox_T; + + +/* + Define the DAC960 V2 Firmware Command Opcodes. +*/ + +typedef enum +{ + DAC960_V2_MemCopy = 0x01, + DAC960_V2_SCSI_10_Passthru = 0x02, + DAC960_V2_SCSI_255_Passthru = 0x03, + DAC960_V2_SCSI_10 = 0x04, + DAC960_V2_SCSI_256 = 0x05, + DAC960_V2_IOCTL = 0x20 +} +__attribute__ ((packed)) +DAC960_V2_CommandOpcode_T; + + +/* + Define the DAC960 V2 Firmware IOCTL Opcodes. +*/ + +typedef enum +{ + DAC960_V2_GetControllerInfo = 0x01, + DAC960_V2_GetLogicalDeviceInfoValid = 0x03, + DAC960_V2_GetPhysicalDeviceInfoValid = 0x05, + DAC960_V2_GetHealthStatus = 0x11, + DAC960_V2_GetEvent = 0x15, + DAC960_V2_StartDiscovery = 0x81, + DAC960_V2_SetDeviceState = 0x82, + DAC960_V2_RebuildDeviceStart = 0x88, + DAC960_V2_RebuildDeviceStop = 0x89, + DAC960_V2_ConsistencyCheckStart = 0x8C, + DAC960_V2_ConsistencyCheckStop = 0x8D, + DAC960_V2_SetMemoryMailbox = 0x8E, + DAC960_V2_PauseDevice = 0x92, + DAC960_V2_TranslatePhysicalToLogicalDevice = 0xC5 +} +__attribute__ ((packed)) +DAC960_V2_IOCTL_Opcode_T; + + +/* + Define the DAC960 V2 Firmware Command Identifier type. +*/ + +typedef unsigned short DAC960_V2_CommandIdentifier_T; + + +/* + Define the DAC960 V2 Firmware Command Status Codes. +*/ + +#define DAC960_V2_NormalCompletion 0x00 +#define DAC960_V2_AbormalCompletion 0x02 +#define DAC960_V2_DeviceBusy 0x08 +#define DAC960_V2_DeviceNonresponsive 0x0E +#define DAC960_V2_DeviceNonresponsive2 0x0F +#define DAC960_V2_DeviceRevervationConflict 0x18 + +typedef unsigned char DAC960_V2_CommandStatus_T; + + +/* + Define the DAC960 V2 Firmware Memory Type structure. +*/ + +typedef struct DAC960_V2_MemoryType +{ + enum { + DAC960_V2_MemoryType_Reserved = 0x00, + DAC960_V2_MemoryType_DRAM = 0x01, + DAC960_V2_MemoryType_EDRAM = 0x02, + DAC960_V2_MemoryType_EDO = 0x03, + DAC960_V2_MemoryType_SDRAM = 0x04, + DAC960_V2_MemoryType_Last = 0x1F + } __attribute__ ((packed)) MemoryType:5; /* Byte 0 Bits 0-4 */ + bool :1; /* Byte 0 Bit 5 */ + bool MemoryParity:1; /* Byte 0 Bit 6 */ + bool MemoryECC:1; /* Byte 0 Bit 7 */ +} +DAC960_V2_MemoryType_T; + + +/* + Define the DAC960 V2 Firmware Processor Type structure. +*/ + +typedef enum +{ + DAC960_V2_ProcessorType_i960CA = 0x01, + DAC960_V2_ProcessorType_i960RD = 0x02, + DAC960_V2_ProcessorType_i960RN = 0x03, + DAC960_V2_ProcessorType_i960RP = 0x04, + DAC960_V2_ProcessorType_NorthBay = 0x05, + DAC960_V2_ProcessorType_StrongArm = 0x06, + DAC960_V2_ProcessorType_i960RM = 0x07 +} +__attribute__ ((packed)) +DAC960_V2_ProcessorType_T; + + +/* + Define the DAC960 V2 Firmware Get Controller Info reply structure. +*/ + +typedef struct DAC960_V2_ControllerInfo +{ + unsigned char :8; /* Byte 0 */ + enum { + DAC960_V2_SCSI_Bus = 0x00, + DAC960_V2_Fibre_Bus = 0x01, + DAC960_V2_PCI_Bus = 0x03 + } __attribute__ ((packed)) BusInterfaceType; /* Byte 1 */ + enum { + DAC960_V2_DAC960E = 0x01, + DAC960_V2_DAC960M = 0x08, + DAC960_V2_DAC960PD = 0x10, + DAC960_V2_DAC960PL = 0x11, + DAC960_V2_DAC960PU = 0x12, + DAC960_V2_DAC960PE = 0x13, + DAC960_V2_DAC960PG = 0x14, + DAC960_V2_DAC960PJ = 0x15, + DAC960_V2_DAC960PTL0 = 0x16, + DAC960_V2_DAC960PR = 0x17, + DAC960_V2_DAC960PRL = 0x18, + DAC960_V2_DAC960PT = 0x19, + DAC960_V2_DAC1164P = 0x1A, + DAC960_V2_DAC960PTL1 = 0x1B, + DAC960_V2_EXR2000P = 0x1C, + DAC960_V2_EXR3000P = 0x1D, + DAC960_V2_AcceleRAID352 = 0x1E, + DAC960_V2_AcceleRAID170 = 0x1F, + DAC960_V2_AcceleRAID160 = 0x20, + DAC960_V2_DAC960S = 0x60, + DAC960_V2_DAC960SU = 0x61, + DAC960_V2_DAC960SX = 0x62, + DAC960_V2_DAC960SF = 0x63, + DAC960_V2_DAC960SS = 0x64, + DAC960_V2_DAC960FL = 0x65, + DAC960_V2_DAC960LL = 0x66, + DAC960_V2_DAC960FF = 0x67, + DAC960_V2_DAC960HP = 0x68, + DAC960_V2_RAIDBRICK = 0x69, + DAC960_V2_METEOR_FL = 0x6A, + DAC960_V2_METEOR_FF = 0x6B + } __attribute__ ((packed)) ControllerType; /* Byte 2 */ + unsigned char :8; /* Byte 3 */ + unsigned short BusInterfaceSpeedMHz; /* Bytes 4-5 */ + unsigned char BusWidthBits; /* Byte 6 */ + unsigned char FlashCodeTypeOrProductID; /* Byte 7 */ + unsigned char NumberOfHostPortsPresent; /* Byte 8 */ + unsigned char Reserved1[7]; /* Bytes 9-15 */ + unsigned char BusInterfaceName[16]; /* Bytes 16-31 */ + unsigned char ControllerName[16]; /* Bytes 32-47 */ + unsigned char Reserved2[16]; /* Bytes 48-63 */ + /* Firmware Release Information */ + unsigned char FirmwareMajorVersion; /* Byte 64 */ + unsigned char FirmwareMinorVersion; /* Byte 65 */ + unsigned char FirmwareTurnNumber; /* Byte 66 */ + unsigned char FirmwareBuildNumber; /* Byte 67 */ + unsigned char FirmwareReleaseDay; /* Byte 68 */ + unsigned char FirmwareReleaseMonth; /* Byte 69 */ + unsigned char FirmwareReleaseYearHigh2Digits; /* Byte 70 */ + unsigned char FirmwareReleaseYearLow2Digits; /* Byte 71 */ + /* Hardware Release Information */ + unsigned char HardwareRevision; /* Byte 72 */ + unsigned int :24; /* Bytes 73-75 */ + unsigned char HardwareReleaseDay; /* Byte 76 */ + unsigned char HardwareReleaseMonth; /* Byte 77 */ + unsigned char HardwareReleaseYearHigh2Digits; /* Byte 78 */ + unsigned char HardwareReleaseYearLow2Digits; /* Byte 79 */ + /* Hardware Manufacturing Information */ + unsigned char ManufacturingBatchNumber; /* Byte 80 */ + unsigned char :8; /* Byte 81 */ + unsigned char ManufacturingPlantNumber; /* Byte 82 */ + unsigned char :8; /* Byte 83 */ + unsigned char HardwareManufacturingDay; /* Byte 84 */ + unsigned char HardwareManufacturingMonth; /* Byte 85 */ + unsigned char HardwareManufacturingYearHigh2Digits; /* Byte 86 */ + unsigned char HardwareManufacturingYearLow2Digits; /* Byte 87 */ + unsigned char MaximumNumberOfPDDperXLD; /* Byte 88 */ + unsigned char MaximumNumberOfILDperXLD; /* Byte 89 */ + unsigned short NonvolatileMemorySizeKB; /* Bytes 90-91 */ + unsigned char MaximumNumberOfXLD; /* Byte 92 */ + unsigned int :24; /* Bytes 93-95 */ + /* Unique Information per Controller */ + unsigned char ControllerSerialNumber[16]; /* Bytes 96-111 */ + unsigned char Reserved3[16]; /* Bytes 112-127 */ + /* Vendor Information */ + unsigned int :24; /* Bytes 128-130 */ + unsigned char OEM_Code; /* Byte 131 */ + unsigned char VendorName[16]; /* Bytes 132-147 */ + /* Other Physical/Controller/Operation Information */ + bool BBU_Present:1; /* Byte 148 Bit 0 */ + bool ActiveActiveClusteringMode:1; /* Byte 148 Bit 1 */ + unsigned char :6; /* Byte 148 Bits 2-7 */ + unsigned char :8; /* Byte 149 */ + unsigned short :16; /* Bytes 150-151 */ + /* Physical Device Scan Information */ + bool PhysicalScanActive:1; /* Byte 152 Bit 0 */ + unsigned char :7; /* Byte 152 Bits 1-7 */ + unsigned char PhysicalDeviceChannelNumber; /* Byte 153 */ + unsigned char PhysicalDeviceTargetID; /* Byte 154 */ + unsigned char PhysicalDeviceLogicalUnit; /* Byte 155 */ + /* Maximum Command Data Transfer Sizes */ + unsigned short MaximumDataTransferSizeInBlocks; /* Bytes 156-157 */ + unsigned short MaximumScatterGatherEntries; /* Bytes 158-159 */ + /* Logical/Physical Device Counts */ + unsigned short LogicalDevicesPresent; /* Bytes 160-161 */ + unsigned short LogicalDevicesCritical; /* Bytes 162-163 */ + unsigned short LogicalDevicesOffline; /* Bytes 164-165 */ + unsigned short PhysicalDevicesPresent; /* Bytes 166-167 */ + unsigned short PhysicalDisksPresent; /* Bytes 168-169 */ + unsigned short PhysicalDisksCritical; /* Bytes 170-171 */ + unsigned short PhysicalDisksOffline; /* Bytes 172-173 */ + unsigned short MaximumParallelCommands; /* Bytes 174-175 */ + /* Channel and Target ID Information */ + unsigned char NumberOfPhysicalChannelsPresent; /* Byte 176 */ + unsigned char NumberOfVirtualChannelsPresent; /* Byte 177 */ + unsigned char NumberOfPhysicalChannelsPossible; /* Byte 178 */ + unsigned char NumberOfVirtualChannelsPossible; /* Byte 179 */ + unsigned char MaximumTargetsPerChannel[16]; /* Bytes 180-195 */ + unsigned char Reserved4[12]; /* Bytes 196-207 */ + /* Memory/Cache Information */ + unsigned short MemorySizeMB; /* Bytes 208-209 */ + unsigned short CacheSizeMB; /* Bytes 210-211 */ + unsigned int ValidCacheSizeInBytes; /* Bytes 212-215 */ + unsigned int DirtyCacheSizeInBytes; /* Bytes 216-219 */ + unsigned short MemorySpeedMHz; /* Bytes 220-221 */ + unsigned char MemoryDataWidthBits; /* Byte 222 */ + DAC960_V2_MemoryType_T MemoryType; /* Byte 223 */ + unsigned char CacheMemoryTypeName[16]; /* Bytes 224-239 */ + /* Execution Memory Information */ + unsigned short ExecutionMemorySizeMB; /* Bytes 240-241 */ + unsigned short ExecutionL2CacheSizeMB; /* Bytes 242-243 */ + unsigned char Reserved5[8]; /* Bytes 244-251 */ + unsigned short ExecutionMemorySpeedMHz; /* Bytes 252-253 */ + unsigned char ExecutionMemoryDataWidthBits; /* Byte 254 */ + DAC960_V2_MemoryType_T ExecutionMemoryType; /* Byte 255 */ + unsigned char ExecutionMemoryTypeName[16]; /* Bytes 256-271 */ + /* First CPU Type Information */ + unsigned short FirstProcessorSpeedMHz; /* Bytes 272-273 */ + DAC960_V2_ProcessorType_T FirstProcessorType; /* Byte 274 */ + unsigned char FirstProcessorCount; /* Byte 275 */ + unsigned char Reserved6[12]; /* Bytes 276-287 */ + unsigned char FirstProcessorName[16]; /* Bytes 288-303 */ + /* Second CPU Type Information */ + unsigned short SecondProcessorSpeedMHz; /* Bytes 304-305 */ + DAC960_V2_ProcessorType_T SecondProcessorType; /* Byte 306 */ + unsigned char SecondProcessorCount; /* Byte 307 */ + unsigned char Reserved7[12]; /* Bytes 308-319 */ + unsigned char SecondProcessorName[16]; /* Bytes 320-335 */ + /* Debugging/Profiling/Command Time Tracing Information */ + unsigned short CurrentProfilingDataPageNumber; /* Bytes 336-337 */ + unsigned short ProgramsAwaitingProfilingData; /* Bytes 338-339 */ + unsigned short CurrentCommandTimeTraceDataPageNumber; /* Bytes 340-341 */ + unsigned short ProgramsAwaitingCommandTimeTraceData; /* Bytes 342-343 */ + unsigned char Reserved8[8]; /* Bytes 344-351 */ + /* Error Counters on Physical Devices */ + unsigned short PhysicalDeviceBusResets; /* Bytes 352-353 */ + unsigned short PhysicalDeviceParityErrors; /* Bytes 355-355 */ + unsigned short PhysicalDeviceSoftErrors; /* Bytes 356-357 */ + unsigned short PhysicalDeviceCommandsFailed; /* Bytes 358-359 */ + unsigned short PhysicalDeviceMiscellaneousErrors; /* Bytes 360-361 */ + unsigned short PhysicalDeviceCommandTimeouts; /* Bytes 362-363 */ + unsigned short PhysicalDeviceSelectionTimeouts; /* Bytes 364-365 */ + unsigned short PhysicalDeviceRetriesDone; /* Bytes 366-367 */ + unsigned short PhysicalDeviceAbortsDone; /* Bytes 368-369 */ + unsigned short PhysicalDeviceHostCommandAbortsDone; /* Bytes 370-371 */ + unsigned short PhysicalDevicePredictedFailuresDetected; /* Bytes 372-373 */ + unsigned short PhysicalDeviceHostCommandsFailed; /* Bytes 374-375 */ + unsigned short PhysicalDeviceHardErrors; /* Bytes 376-377 */ + unsigned char Reserved9[6]; /* Bytes 378-383 */ + /* Error Counters on Logical Devices */ + unsigned short LogicalDeviceSoftErrors; /* Bytes 384-385 */ + unsigned short LogicalDeviceCommandsFailed; /* Bytes 386-387 */ + unsigned short LogicalDeviceHostCommandAbortsDone; /* Bytes 388-389 */ + unsigned short :16; /* Bytes 390-391 */ + /* Error Counters on Controller */ + unsigned short ControllerMemoryErrors; /* Bytes 392-393 */ + unsigned short ControllerHostCommandAbortsDone; /* Bytes 394-395 */ + unsigned int :32; /* Bytes 396-399 */ + /* Long Duration Activity Information */ + unsigned short BackgroundInitializationsActive; /* Bytes 400-401 */ + unsigned short LogicalDeviceInitializationsActive; /* Bytes 402-403 */ + unsigned short PhysicalDeviceInitializationsActive; /* Bytes 404-405 */ + unsigned short ConsistencyChecksActive; /* Bytes 406-407 */ + unsigned short RebuildsActive; /* Bytes 408-409 */ + unsigned short OnlineExpansionsActive; /* Bytes 410-411 */ + unsigned short PatrolActivitiesActive; /* Bytes 412-413 */ + unsigned short :16; /* Bytes 414-415 */ + /* Flash ROM Information */ + unsigned char FlashType; /* Byte 416 */ + unsigned char :8; /* Byte 417 */ + unsigned short FlashSizeMB; /* Bytes 418-419 */ + unsigned int FlashLimit; /* Bytes 420-423 */ + unsigned int FlashCount; /* Bytes 424-427 */ + unsigned int :32; /* Bytes 428-431 */ + unsigned char FlashTypeName[16]; /* Bytes 432-447 */ + /* Firmware Run Time Information */ + unsigned char RebuildRate; /* Byte 448 */ + unsigned char BackgroundInitializationRate; /* Byte 449 */ + unsigned char ForegroundInitializationRate; /* Byte 450 */ + unsigned char ConsistencyCheckRate; /* Byte 451 */ + unsigned int :32; /* Bytes 452-455 */ + unsigned int MaximumDP; /* Bytes 456-459 */ + unsigned int FreeDP; /* Bytes 460-463 */ + unsigned int MaximumIOP; /* Bytes 464-467 */ + unsigned int FreeIOP; /* Bytes 468-471 */ + unsigned short MaximumCombLengthInBlocks; /* Bytes 472-473 */ + unsigned short NumberOfConfigurationGroups; /* Bytes 474-475 */ + bool InstallationAbortStatus:1; /* Byte 476 Bit 0 */ + bool MaintenanceModeStatus:1; /* Byte 476 Bit 1 */ + unsigned int :24; /* Bytes 476-479 */ + unsigned char Reserved10[32]; /* Bytes 480-511 */ + unsigned char Reserved11[512]; /* Bytes 512-1023 */ +} +DAC960_V2_ControllerInfo_T; + + +/* + Define the DAC960 V2 Firmware Logical Device State type. +*/ + +typedef enum +{ + DAC960_V2_LogicalDevice_Online = 0x01, + DAC960_V2_LogicalDevice_Offline = 0x08, + DAC960_V2_LogicalDevice_Critical = 0x09 +} +__attribute__ ((packed)) +DAC960_V2_LogicalDeviceState_T; + + +/* + Define the DAC960 V2 Firmware Get Logical Device Info reply structure. +*/ + +typedef struct DAC960_V2_LogicalDeviceInfo +{ + unsigned char :8; /* Byte 0 */ + unsigned char Channel; /* Byte 1 */ + unsigned char TargetID; /* Byte 2 */ + unsigned char LogicalUnit; /* Byte 3 */ + DAC960_V2_LogicalDeviceState_T LogicalDeviceState; /* Byte 4 */ + unsigned char RAIDLevel; /* Byte 5 */ + unsigned char StripeSize; /* Byte 6 */ + unsigned char CacheLineSize; /* Byte 7 */ + struct { + enum { + DAC960_V2_ReadCacheDisabled = 0x0, + DAC960_V2_ReadCacheEnabled = 0x1, + DAC960_V2_ReadAheadEnabled = 0x2, + DAC960_V2_IntelligentReadAheadEnabled = 0x3, + DAC960_V2_ReadCache_Last = 0x7 + } __attribute__ ((packed)) ReadCache:3; /* Byte 8 Bits 0-2 */ + enum { + DAC960_V2_WriteCacheDisabled = 0x0, + DAC960_V2_LogicalDeviceReadOnly = 0x1, + DAC960_V2_WriteCacheEnabled = 0x2, + DAC960_V2_IntelligentWriteCacheEnabled = 0x3, + DAC960_V2_WriteCache_Last = 0x7 + } __attribute__ ((packed)) WriteCache:3; /* Byte 8 Bits 3-5 */ + bool :1; /* Byte 8 Bit 6 */ + bool LogicalDeviceInitialized:1; /* Byte 8 Bit 7 */ + } LogicalDeviceControl; /* Byte 8 */ + /* Logical Device Operations Status */ + bool ConsistencyCheckInProgress:1; /* Byte 9 Bit 0 */ + bool RebuildInProgress:1; /* Byte 9 Bit 1 */ + bool BackgroundInitializationInProgress:1; /* Byte 9 Bit 2 */ + bool ForegroundInitializationInProgress:1; /* Byte 9 Bit 3 */ + bool DataMigrationInProgress:1; /* Byte 9 Bit 4 */ + bool PatrolOperationInProgress:1; /* Byte 9 Bit 5 */ + unsigned char :2; /* Byte 9 Bits 6-7 */ + unsigned char RAID5WriteUpdate; /* Byte 10 */ + unsigned char RAID5Algorithm; /* Byte 11 */ + unsigned short LogicalDeviceNumber; /* Bytes 12-13 */ + /* BIOS Info */ + bool BIOSDisabled:1; /* Byte 14 Bit 0 */ + bool CDROMBootEnabled:1; /* Byte 14 Bit 1 */ + bool DriveCoercionEnabled:1; /* Byte 14 Bit 2 */ + bool WriteSameDisabled:1; /* Byte 14 Bit 3 */ + bool HBA_ModeEnabled:1; /* Byte 14 Bit 4 */ + enum { + DAC960_V2_Geometry_128_32 = 0x0, + DAC960_V2_Geometry_255_63 = 0x1, + DAC960_V2_Geometry_Reserved1 = 0x2, + DAC960_V2_Geometry_Reserved2 = 0x3 + } __attribute__ ((packed)) DriveGeometry:2; /* Byte 14 Bits 5-6 */ + bool SuperReadAheadEnabled:1; /* Byte 14 Bit 7 */ + unsigned char :8; /* Byte 15 */ + /* Error Counters */ + unsigned short SoftErrors; /* Bytes 16-17 */ + unsigned short CommandsFailed; /* Bytes 18-19 */ + unsigned short HostCommandAbortsDone; /* Bytes 20-21 */ + unsigned short DeferredWriteErrors; /* Bytes 22-23 */ + unsigned int :32; /* Bytes 24-27 */ + unsigned int :32; /* Bytes 28-31 */ + /* Device Size Information */ + unsigned short :16; /* Bytes 32-33 */ + unsigned short DeviceBlockSizeInBytes; /* Bytes 34-35 */ + unsigned int OriginalDeviceSize; /* Bytes 36-39 */ + unsigned int ConfigurableDeviceSize; /* Bytes 40-43 */ + unsigned int :32; /* Bytes 44-47 */ + unsigned char LogicalDeviceName[32]; /* Bytes 48-79 */ + unsigned char SCSI_InquiryData[36]; /* Bytes 80-115 */ + unsigned char Reserved1[12]; /* Bytes 116-127 */ + DAC960_ByteCount64_T LastReadBlockNumber; /* Bytes 128-135 */ + DAC960_ByteCount64_T LastWrittenBlockNumber; /* Bytes 136-143 */ + DAC960_ByteCount64_T ConsistencyCheckBlockNumber; /* Bytes 144-151 */ + DAC960_ByteCount64_T RebuildBlockNumber; /* Bytes 152-159 */ + DAC960_ByteCount64_T BackgroundInitializationBlockNumber; /* Bytes 160-167 */ + DAC960_ByteCount64_T ForegroundInitializationBlockNumber; /* Bytes 168-175 */ + DAC960_ByteCount64_T DataMigrationBlockNumber; /* Bytes 176-183 */ + DAC960_ByteCount64_T PatrolOperationBlockNumber; /* Bytes 184-191 */ + unsigned char Reserved2[64]; /* Bytes 192-255 */ +} +DAC960_V2_LogicalDeviceInfo_T; + + +/* + Define the DAC960 V2 Firmware Physical Device State type. +*/ + +typedef enum +{ + DAC960_V2_Device_Unconfigured = 0x00, + DAC960_V2_Device_Online = 0x01, + DAC960_V2_Device_Rebuild = 0x03, + DAC960_V2_Device_Missing = 0x04, + DAC960_V2_Device_Critical = 0x05, + DAC960_V2_Device_Dead = 0x08, + DAC960_V2_Device_SuspectedDead = 0x0C, + DAC960_V2_Device_CommandedOffline = 0x10, + DAC960_V2_Device_Standby = 0x21, + DAC960_V2_Device_InvalidState = 0xFF +} +__attribute__ ((packed)) +DAC960_V2_PhysicalDeviceState_T; + + +/* + Define the DAC960 V2 Firmware Get Physical Device Info reply structure. +*/ + +typedef struct DAC960_V2_PhysicalDeviceInfo +{ + unsigned char :8; /* Byte 0 */ + unsigned char Channel; /* Byte 1 */ + unsigned char TargetID; /* Byte 2 */ + unsigned char LogicalUnit; /* Byte 3 */ + /* Configuration Status Bits */ + bool PhysicalDeviceFaultTolerant:1; /* Byte 4 Bit 0 */ + bool PhysicalDeviceConnected:1; /* Byte 4 Bit 1 */ + bool PhysicalDeviceLocalToController:1; /* Byte 4 Bit 2 */ + unsigned char :5; /* Byte 4 Bits 3-7 */ + /* Multiple Host/Controller Status Bits */ + bool RemoteHostSystemDead:1; /* Byte 5 Bit 0 */ + bool RemoteControllerDead:1; /* Byte 5 Bit 1 */ + unsigned char :6; /* Byte 5 Bits 2-7 */ + DAC960_V2_PhysicalDeviceState_T PhysicalDeviceState; /* Byte 6 */ + unsigned char NegotiatedDataWidthBits; /* Byte 7 */ + unsigned short NegotiatedSynchronousMegaTransfers; /* Bytes 8-9 */ + /* Multiported Physical Device Information */ + unsigned char NumberOfPortConnections; /* Byte 10 */ + unsigned char DriveAccessibilityBitmap; /* Byte 11 */ + unsigned int :32; /* Bytes 12-15 */ + unsigned char NetworkAddress[16]; /* Bytes 16-31 */ + unsigned short MaximumTags; /* Bytes 32-33 */ + /* Physical Device Operations Status */ + bool ConsistencyCheckInProgress:1; /* Byte 34 Bit 0 */ + bool RebuildInProgress:1; /* Byte 34 Bit 1 */ + bool MakingDataConsistentInProgress:1; /* Byte 34 Bit 2 */ + bool PhysicalDeviceInitializationInProgress:1; /* Byte 34 Bit 3 */ + bool DataMigrationInProgress:1; /* Byte 34 Bit 4 */ + bool PatrolOperationInProgress:1; /* Byte 34 Bit 5 */ + unsigned char :2; /* Byte 34 Bits 6-7 */ + unsigned char LongOperationStatus; /* Byte 35 */ + unsigned char ParityErrors; /* Byte 36 */ + unsigned char SoftErrors; /* Byte 37 */ + unsigned char HardErrors; /* Byte 38 */ + unsigned char MiscellaneousErrors; /* Byte 39 */ + unsigned char CommandTimeouts; /* Byte 40 */ + unsigned char Retries; /* Byte 41 */ + unsigned char Aborts; /* Byte 42 */ + unsigned char PredictedFailuresDetected; /* Byte 43 */ + unsigned int :32; /* Bytes 44-47 */ + unsigned short :16; /* Bytes 48-49 */ + unsigned short DeviceBlockSizeInBytes; /* Bytes 50-51 */ + unsigned int OriginalDeviceSize; /* Bytes 52-55 */ + unsigned int ConfigurableDeviceSize; /* Bytes 56-59 */ + unsigned int :32; /* Bytes 60-63 */ + unsigned char PhysicalDeviceName[16]; /* Bytes 64-79 */ + unsigned char Reserved1[16]; /* Bytes 80-95 */ + unsigned char Reserved2[32]; /* Bytes 96-127 */ + unsigned char SCSI_InquiryData[36]; /* Bytes 128-163 */ + unsigned char Reserved3[20]; /* Bytes 164-183 */ + unsigned char Reserved4[8]; /* Bytes 184-191 */ + DAC960_ByteCount64_T LastReadBlockNumber; /* Bytes 192-199 */ + DAC960_ByteCount64_T LastWrittenBlockNumber; /* Bytes 200-207 */ + DAC960_ByteCount64_T ConsistencyCheckBlockNumber; /* Bytes 208-215 */ + DAC960_ByteCount64_T RebuildBlockNumber; /* Bytes 216-223 */ + DAC960_ByteCount64_T MakingDataConsistentBlockNumber; /* Bytes 224-231 */ + DAC960_ByteCount64_T DeviceInitializationBlockNumber; /* Bytes 232-239 */ + DAC960_ByteCount64_T DataMigrationBlockNumber; /* Bytes 240-247 */ + DAC960_ByteCount64_T PatrolOperationBlockNumber; /* Bytes 248-255 */ + unsigned char Reserved5[256]; /* Bytes 256-511 */ +} +DAC960_V2_PhysicalDeviceInfo_T; + + +/* + Define the DAC960 V2 Firmware Health Status Buffer structure. +*/ + +typedef struct DAC960_V2_HealthStatusBuffer +{ + unsigned int MicrosecondsFromControllerStartTime; /* Bytes 0-3 */ + unsigned int MillisecondsFromControllerStartTime; /* Bytes 4-7 */ + unsigned int SecondsFrom1January1970; /* Bytes 8-11 */ + unsigned int :32; /* Bytes 12-15 */ + unsigned int StatusChangeCounter; /* Bytes 16-19 */ + unsigned int :32; /* Bytes 20-23 */ + unsigned int DebugOutputMessageBufferIndex; /* Bytes 24-27 */ + unsigned int CodedMessageBufferIndex; /* Bytes 28-31 */ + unsigned int CurrentTimeTracePageNumber; /* Bytes 32-35 */ + unsigned int CurrentProfilerPageNumber; /* Bytes 36-39 */ + unsigned int NextEventSequenceNumber; /* Bytes 40-43 */ + unsigned int :32; /* Bytes 44-47 */ + unsigned char Reserved1[16]; /* Bytes 48-63 */ + unsigned char Reserved2[64]; /* Bytes 64-127 */ +} +DAC960_V2_HealthStatusBuffer_T; + + +/* + Define the DAC960 V2 Firmware Get Event reply structure. +*/ + +typedef struct DAC960_V2_Event +{ + unsigned int EventSequenceNumber; /* Bytes 0-3 */ + unsigned int EventTime; /* Bytes 4-7 */ + unsigned int EventCode; /* Bytes 8-11 */ + unsigned char :8; /* Byte 12 */ + unsigned char Channel; /* Byte 13 */ + unsigned char TargetID; /* Byte 14 */ + unsigned char LogicalUnit; /* Byte 15 */ + unsigned int :32; /* Bytes 16-19 */ + unsigned int EventSpecificParameter; /* Bytes 20-23 */ + unsigned char RequestSenseData[40]; /* Bytes 24-63 */ +} +DAC960_V2_Event_T; + + +/* + Define the DAC960 V2 Firmware Command Control Bits structure. +*/ + +typedef struct DAC960_V2_CommandControlBits +{ + bool ForceUnitAccess:1; /* Byte 0 Bit 0 */ + bool DisablePageOut:1; /* Byte 0 Bit 1 */ + bool :1; /* Byte 0 Bit 2 */ + bool AdditionalScatterGatherListMemory:1; /* Byte 0 Bit 3 */ + bool DataTransferControllerToHost:1; /* Byte 0 Bit 4 */ + bool :1; /* Byte 0 Bit 5 */ + bool NoAutoRequestSense:1; /* Byte 0 Bit 6 */ + bool DisconnectProhibited:1; /* Byte 0 Bit 7 */ +} +DAC960_V2_CommandControlBits_T; + + +/* + Define the DAC960 V2 Firmware Command Timeout structure. +*/ + +typedef struct DAC960_V2_CommandTimeout +{ + unsigned char TimeoutValue:6; /* Byte 0 Bits 0-5 */ + enum { + DAC960_V2_TimeoutScale_Seconds = 0, + DAC960_V2_TimeoutScale_Minutes = 1, + DAC960_V2_TimeoutScale_Hours = 2, + DAC960_V2_TimeoutScale_Reserved = 3 + } __attribute__ ((packed)) TimeoutScale:2; /* Byte 0 Bits 6-7 */ +} +DAC960_V2_CommandTimeout_T; + + +/* + Define the DAC960 V2 Firmware Physical Device structure. +*/ + +typedef struct DAC960_V2_PhysicalDevice +{ + unsigned char LogicalUnit; /* Byte 0 */ + unsigned char TargetID; /* Byte 1 */ + unsigned char Channel:3; /* Byte 2 Bits 0-2 */ + unsigned char Controller:5; /* Byte 2 Bits 3-7 */ +} +__attribute__ ((packed)) +DAC960_V2_PhysicalDevice_T; + + +/* + Define the DAC960 V2 Firmware Logical Device structure. +*/ + +typedef struct DAC960_V2_LogicalDevice +{ + unsigned short LogicalDeviceNumber; /* Bytes 0-1 */ + unsigned char :3; /* Byte 2 Bits 0-2 */ + unsigned char Controller:5; /* Byte 2 Bits 3-7 */ +} +__attribute__ ((packed)) +DAC960_V2_LogicalDevice_T; + + +/* + Define the DAC960 V2 Firmware Operation Device type. +*/ + +typedef enum +{ + DAC960_V2_Physical_Device = 0x00, + DAC960_V2_RAID_Device = 0x01, + DAC960_V2_Physical_Channel = 0x02, + DAC960_V2_RAID_Channel = 0x03, + DAC960_V2_Physical_Controller = 0x04, + DAC960_V2_RAID_Controller = 0x05, + DAC960_V2_Configuration_Group = 0x10, + DAC960_V2_Enclosure = 0x11 +} +__attribute__ ((packed)) +DAC960_V2_OperationDevice_T; + + +/* + Define the DAC960 V2 Firmware Translate Physical To Logical Device structure. +*/ + +typedef struct DAC960_V2_PhysicalToLogicalDevice +{ + unsigned short LogicalDeviceNumber; /* Bytes 0-1 */ + unsigned short :16; /* Bytes 2-3 */ + unsigned char PreviousBootController; /* Byte 4 */ + unsigned char PreviousBootChannel; /* Byte 5 */ + unsigned char PreviousBootTargetID; /* Byte 6 */ + unsigned char PreviousBootLogicalUnit; /* Byte 7 */ +} +DAC960_V2_PhysicalToLogicalDevice_T; + + + +/* + Define the DAC960 V2 Firmware Scatter/Gather List Entry structure. +*/ + +typedef struct DAC960_V2_ScatterGatherSegment +{ + DAC960_BusAddress64_T SegmentDataPointer; /* Bytes 0-7 */ + DAC960_ByteCount64_T SegmentByteCount; /* Bytes 8-15 */ +} +DAC960_V2_ScatterGatherSegment_T; + + +/* + Define the DAC960 V2 Firmware Data Transfer Memory Address structure. +*/ + +typedef union DAC960_V2_DataTransferMemoryAddress +{ + DAC960_V2_ScatterGatherSegment_T ScatterGatherSegments[2]; /* Bytes 0-31 */ + struct { + unsigned short ScatterGatherList0Length; /* Bytes 0-1 */ + unsigned short ScatterGatherList1Length; /* Bytes 2-3 */ + unsigned short ScatterGatherList2Length; /* Bytes 4-5 */ + unsigned short :16; /* Bytes 6-7 */ + DAC960_BusAddress64_T ScatterGatherList0Address; /* Bytes 8-15 */ + DAC960_BusAddress64_T ScatterGatherList1Address; /* Bytes 16-23 */ + DAC960_BusAddress64_T ScatterGatherList2Address; /* Bytes 24-31 */ + } ExtendedScatterGather; +} +DAC960_V2_DataTransferMemoryAddress_T; + + +/* + Define the 64 Byte DAC960 V2 Firmware Command Mailbox structure. +*/ + +typedef union DAC960_V2_CommandMailbox +{ + unsigned int Words[16]; /* Words 0-15 */ + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */ + unsigned char DataTransferPageNumber; /* Byte 7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + unsigned int :24; /* Bytes 16-18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char IOCTL_Opcode; /* Byte 21 */ + unsigned char Reserved[10]; /* Bytes 22-31 */ + DAC960_V2_DataTransferMemoryAddress_T + DataTransferMemoryAddress; /* Bytes 32-63 */ + } Common; + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + DAC960_ByteCount32_T DataTransferSize; /* Bytes 4-7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char CDBLength; /* Byte 21 */ + unsigned char SCSI_CDB[10]; /* Bytes 22-31 */ + DAC960_V2_DataTransferMemoryAddress_T + DataTransferMemoryAddress; /* Bytes 32-63 */ + } SCSI_10; + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + DAC960_ByteCount32_T DataTransferSize; /* Bytes 4-7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char CDBLength; /* Byte 21 */ + unsigned short :16; /* Bytes 22-23 */ + DAC960_BusAddress64_T SCSI_CDB_BusAddress; /* Bytes 24-31 */ + DAC960_V2_DataTransferMemoryAddress_T + DataTransferMemoryAddress; /* Bytes 32-63 */ + } SCSI_255; + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */ + unsigned char DataTransferPageNumber; /* Byte 7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + unsigned short :16; /* Bytes 16-17 */ + unsigned char ControllerNumber; /* Byte 18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char IOCTL_Opcode; /* Byte 21 */ + unsigned char Reserved[10]; /* Bytes 22-31 */ + DAC960_V2_DataTransferMemoryAddress_T + DataTransferMemoryAddress; /* Bytes 32-63 */ + } ControllerInfo; + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */ + unsigned char DataTransferPageNumber; /* Byte 7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char IOCTL_Opcode; /* Byte 21 */ + unsigned char Reserved[10]; /* Bytes 22-31 */ + DAC960_V2_DataTransferMemoryAddress_T + DataTransferMemoryAddress; /* Bytes 32-63 */ + } LogicalDeviceInfo; + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */ + unsigned char DataTransferPageNumber; /* Byte 7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char IOCTL_Opcode; /* Byte 21 */ + unsigned char Reserved[10]; /* Bytes 22-31 */ + DAC960_V2_DataTransferMemoryAddress_T + DataTransferMemoryAddress; /* Bytes 32-63 */ + } PhysicalDeviceInfo; + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */ + unsigned char DataTransferPageNumber; /* Byte 7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + unsigned short EventSequenceNumberHigh16; /* Bytes 16-17 */ + unsigned char ControllerNumber; /* Byte 18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char IOCTL_Opcode; /* Byte 21 */ + unsigned short EventSequenceNumberLow16; /* Bytes 22-23 */ + unsigned char Reserved[8]; /* Bytes 24-31 */ + DAC960_V2_DataTransferMemoryAddress_T + DataTransferMemoryAddress; /* Bytes 32-63 */ + } GetEvent; + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */ + unsigned char DataTransferPageNumber; /* Byte 7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char IOCTL_Opcode; /* Byte 21 */ + union { + DAC960_V2_LogicalDeviceState_T LogicalDeviceState; + DAC960_V2_PhysicalDeviceState_T PhysicalDeviceState; + } DeviceState; /* Byte 22 */ + unsigned char Reserved[9]; /* Bytes 23-31 */ + DAC960_V2_DataTransferMemoryAddress_T + DataTransferMemoryAddress; /* Bytes 32-63 */ + } SetDeviceState; + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */ + unsigned char DataTransferPageNumber; /* Byte 7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char IOCTL_Opcode; /* Byte 21 */ + bool RestoreConsistency:1; /* Byte 22 Bit 0 */ + bool InitializedAreaOnly:1; /* Byte 22 Bit 1 */ + unsigned char :6; /* Byte 22 Bits 2-7 */ + unsigned char Reserved[9]; /* Bytes 23-31 */ + DAC960_V2_DataTransferMemoryAddress_T + DataTransferMemoryAddress; /* Bytes 32-63 */ + } ConsistencyCheck; + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + unsigned char FirstCommandMailboxSizeKB; /* Byte 4 */ + unsigned char FirstStatusMailboxSizeKB; /* Byte 5 */ + unsigned char SecondCommandMailboxSizeKB; /* Byte 6 */ + unsigned char SecondStatusMailboxSizeKB; /* Byte 7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + unsigned int :24; /* Bytes 16-18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char IOCTL_Opcode; /* Byte 21 */ + unsigned char HealthStatusBufferSizeKB; /* Byte 22 */ + unsigned char :8; /* Byte 23 */ + DAC960_BusAddress64_T HealthStatusBufferBusAddress; /* Bytes 24-31 */ + DAC960_BusAddress64_T FirstCommandMailboxBusAddress; /* Bytes 32-39 */ + DAC960_BusAddress64_T FirstStatusMailboxBusAddress; /* Bytes 40-47 */ + DAC960_BusAddress64_T SecondCommandMailboxBusAddress; /* Bytes 48-55 */ + DAC960_BusAddress64_T SecondStatusMailboxBusAddress; /* Bytes 56-63 */ + } SetMemoryMailbox; + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */ + DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */ + DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */ + unsigned char DataTransferPageNumber; /* Byte 7 */ + DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */ + DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */ + DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */ + unsigned char RequestSenseSize; /* Byte 20 */ + unsigned char IOCTL_Opcode; /* Byte 21 */ + DAC960_V2_OperationDevice_T OperationDevice; /* Byte 22 */ + unsigned char Reserved[9]; /* Bytes 23-31 */ + DAC960_V2_DataTransferMemoryAddress_T + DataTransferMemoryAddress; /* Bytes 32-63 */ + } DeviceOperation; +} +DAC960_V2_CommandMailbox_T; + + +/* + Define the DAC960 Driver IOCTL requests. +*/ + +#define DAC960_IOCTL_GET_CONTROLLER_COUNT 0xDAC001 +#define DAC960_IOCTL_GET_CONTROLLER_INFO 0xDAC002 +#define DAC960_IOCTL_V1_EXECUTE_COMMAND 0xDAC003 +#define DAC960_IOCTL_V2_EXECUTE_COMMAND 0xDAC004 +#define DAC960_IOCTL_V2_GET_HEALTH_STATUS 0xDAC005 + + +/* + Define the DAC960_IOCTL_GET_CONTROLLER_INFO reply structure. +*/ + +typedef struct DAC960_ControllerInfo +{ + unsigned char ControllerNumber; + unsigned char FirmwareType; + unsigned char Channels; + unsigned char Targets; + unsigned char PCI_Bus; + unsigned char PCI_Device; + unsigned char PCI_Function; + unsigned char IRQ_Channel; + DAC960_PCI_Address_T PCI_Address; + unsigned char ModelName[20]; + unsigned char FirmwareVersion[12]; +} +DAC960_ControllerInfo_T; + + +/* + Define the User Mode DAC960_IOCTL_V1_EXECUTE_COMMAND request structure. +*/ + +typedef struct DAC960_V1_UserCommand +{ + unsigned char ControllerNumber; + DAC960_V1_CommandMailbox_T CommandMailbox; + int DataTransferLength; + void __user *DataTransferBuffer; + DAC960_V1_DCDB_T __user *DCDB; +} +DAC960_V1_UserCommand_T; + + +/* + Define the Kernel Mode DAC960_IOCTL_V1_EXECUTE_COMMAND request structure. +*/ + +typedef struct DAC960_V1_KernelCommand +{ + unsigned char ControllerNumber; + DAC960_V1_CommandMailbox_T CommandMailbox; + int DataTransferLength; + void *DataTransferBuffer; + DAC960_V1_DCDB_T *DCDB; + DAC960_V1_CommandStatus_T CommandStatus; + void (*CompletionFunction)(struct DAC960_V1_KernelCommand *); + void *CompletionData; +} +DAC960_V1_KernelCommand_T; + + +/* + Define the User Mode DAC960_IOCTL_V2_EXECUTE_COMMAND request structure. +*/ + +typedef struct DAC960_V2_UserCommand +{ + unsigned char ControllerNumber; + DAC960_V2_CommandMailbox_T CommandMailbox; + int DataTransferLength; + int RequestSenseLength; + void __user *DataTransferBuffer; + void __user *RequestSenseBuffer; +} +DAC960_V2_UserCommand_T; + + +/* + Define the Kernel Mode DAC960_IOCTL_V2_EXECUTE_COMMAND request structure. +*/ + +typedef struct DAC960_V2_KernelCommand +{ + unsigned char ControllerNumber; + DAC960_V2_CommandMailbox_T CommandMailbox; + int DataTransferLength; + int RequestSenseLength; + void *DataTransferBuffer; + void *RequestSenseBuffer; + DAC960_V2_CommandStatus_T CommandStatus; + void (*CompletionFunction)(struct DAC960_V2_KernelCommand *); + void *CompletionData; +} +DAC960_V2_KernelCommand_T; + + +/* + Define the User Mode DAC960_IOCTL_V2_GET_HEALTH_STATUS request structure. +*/ + +typedef struct DAC960_V2_GetHealthStatus +{ + unsigned char ControllerNumber; + DAC960_V2_HealthStatusBuffer_T __user *HealthStatusBuffer; +} +DAC960_V2_GetHealthStatus_T; + + +/* + Import the Kernel Mode IOCTL interface. +*/ + +extern int DAC960_KernelIOCTL(unsigned int Request, void *Argument); + + +/* + DAC960_DriverVersion protects the private portion of this file. +*/ + +#ifdef DAC960_DriverVersion + + +/* + Define the maximum Driver Queue Depth and Controller Queue Depth supported + by DAC960 V1 and V2 Firmware Controllers. +*/ + +#define DAC960_MaxDriverQueueDepth 511 +#define DAC960_MaxControllerQueueDepth 512 + + +/* + Define the maximum number of Scatter/Gather Segments supported for any + DAC960 V1 and V2 Firmware controller. +*/ + +#define DAC960_V1_ScatterGatherLimit 33 +#define DAC960_V2_ScatterGatherLimit 128 + + +/* + Define the number of Command Mailboxes and Status Mailboxes used by the + DAC960 V1 and V2 Firmware Memory Mailbox Interface. +*/ + +#define DAC960_V1_CommandMailboxCount 256 +#define DAC960_V1_StatusMailboxCount 1024 +#define DAC960_V2_CommandMailboxCount 512 +#define DAC960_V2_StatusMailboxCount 512 + + +/* + Define the DAC960 Controller Monitoring Timer Interval. +*/ + +#define DAC960_MonitoringTimerInterval (10 * HZ) + + +/* + Define the DAC960 Controller Secondary Monitoring Interval. +*/ + +#define DAC960_SecondaryMonitoringInterval (60 * HZ) + + +/* + Define the DAC960 Controller Health Status Monitoring Interval. +*/ + +#define DAC960_HealthStatusMonitoringInterval (1 * HZ) + + +/* + Define the DAC960 Controller Progress Reporting Interval. +*/ + +#define DAC960_ProgressReportingInterval (60 * HZ) + + +/* + Define the maximum number of Partitions allowed for each Logical Drive. +*/ + +#define DAC960_MaxPartitions 8 +#define DAC960_MaxPartitionsBits 3 + +/* + Define the DAC960 Controller fixed Block Size and Block Size Bits. +*/ + +#define DAC960_BlockSize 512 +#define DAC960_BlockSizeBits 9 + + +/* + Define the number of Command structures that should be allocated as a + group to optimize kernel memory allocation. +*/ + +#define DAC960_V1_CommandAllocationGroupSize 11 +#define DAC960_V2_CommandAllocationGroupSize 29 + + +/* + Define the Controller Line Buffer, Progress Buffer, User Message, and + Initial Status Buffer sizes. +*/ + +#define DAC960_LineBufferSize 100 +#define DAC960_ProgressBufferSize 200 +#define DAC960_UserMessageSize 200 +#define DAC960_InitialStatusBufferSize (8192-32) + + +/* + Define the DAC960 Controller Firmware Types. +*/ + +typedef enum +{ + DAC960_V1_Controller = 1, + DAC960_V2_Controller = 2 +} +DAC960_FirmwareType_T; + + +/* + Define the DAC960 Controller Hardware Types. +*/ + +typedef enum +{ + DAC960_BA_Controller = 1, /* eXtremeRAID 2000 */ + DAC960_LP_Controller = 2, /* AcceleRAID 352 */ + DAC960_LA_Controller = 3, /* DAC1164P */ + DAC960_PG_Controller = 4, /* DAC960PTL/PJ/PG */ + DAC960_PD_Controller = 5, /* DAC960PU/PD/PL/P */ + DAC960_P_Controller = 6, /* DAC960PU/PD/PL/P */ + DAC960_GEM_Controller = 7, /* AcceleRAID 4/5/600 */ +} +DAC960_HardwareType_T; + + +/* + Define the Driver Message Levels. +*/ + +typedef enum DAC960_MessageLevel +{ + DAC960_AnnounceLevel = 0, + DAC960_InfoLevel = 1, + DAC960_NoticeLevel = 2, + DAC960_WarningLevel = 3, + DAC960_ErrorLevel = 4, + DAC960_ProgressLevel = 5, + DAC960_CriticalLevel = 6, + DAC960_UserCriticalLevel = 7 +} +DAC960_MessageLevel_T; + +static char + *DAC960_MessageLevelMap[] = + { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING, + KERN_ERR, KERN_CRIT, KERN_CRIT, KERN_CRIT }; + + +/* + Define Driver Message macros. +*/ + +#define DAC960_Announce(Format, Arguments...) \ + DAC960_Message(DAC960_AnnounceLevel, Format, ##Arguments) + +#define DAC960_Info(Format, Arguments...) \ + DAC960_Message(DAC960_InfoLevel, Format, ##Arguments) + +#define DAC960_Notice(Format, Arguments...) \ + DAC960_Message(DAC960_NoticeLevel, Format, ##Arguments) + +#define DAC960_Warning(Format, Arguments...) \ + DAC960_Message(DAC960_WarningLevel, Format, ##Arguments) + +#define DAC960_Error(Format, Arguments...) \ + DAC960_Message(DAC960_ErrorLevel, Format, ##Arguments) + +#define DAC960_Progress(Format, Arguments...) \ + DAC960_Message(DAC960_ProgressLevel, Format, ##Arguments) + +#define DAC960_Critical(Format, Arguments...) \ + DAC960_Message(DAC960_CriticalLevel, Format, ##Arguments) + +#define DAC960_UserCritical(Format, Arguments...) \ + DAC960_Message(DAC960_UserCriticalLevel, Format, ##Arguments) + + +struct DAC960_privdata { + DAC960_HardwareType_T HardwareType; + DAC960_FirmwareType_T FirmwareType; + irq_handler_t InterruptHandler; + unsigned int MemoryWindowSize; +}; + + +/* + Define the DAC960 V1 Firmware Controller Status Mailbox structure. +*/ + +typedef union DAC960_V1_StatusMailbox +{ + unsigned int Word; /* Word 0 */ + struct { + DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 0 */ + unsigned char :7; /* Byte 1 Bits 0-6 */ + bool Valid:1; /* Byte 1 Bit 7 */ + DAC960_V1_CommandStatus_T CommandStatus; /* Bytes 2-3 */ + } Fields; +} +DAC960_V1_StatusMailbox_T; + + +/* + Define the DAC960 V2 Firmware Controller Status Mailbox structure. +*/ + +typedef union DAC960_V2_StatusMailbox +{ + unsigned int Words[2]; /* Words 0-1 */ + struct { + DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */ + DAC960_V2_CommandStatus_T CommandStatus; /* Byte 2 */ + unsigned char RequestSenseLength; /* Byte 3 */ + int DataTransferResidue; /* Bytes 4-7 */ + } Fields; +} +DAC960_V2_StatusMailbox_T; + + +/* + Define the DAC960 Driver Command Types. +*/ + +typedef enum +{ + DAC960_ReadCommand = 1, + DAC960_WriteCommand = 2, + DAC960_ReadRetryCommand = 3, + DAC960_WriteRetryCommand = 4, + DAC960_MonitoringCommand = 5, + DAC960_ImmediateCommand = 6, + DAC960_QueuedCommand = 7 +} +DAC960_CommandType_T; + + +/* + Define the DAC960 Driver Command structure. +*/ + +typedef struct DAC960_Command +{ + int CommandIdentifier; + DAC960_CommandType_T CommandType; + struct DAC960_Controller *Controller; + struct DAC960_Command *Next; + struct completion *Completion; + unsigned int LogicalDriveNumber; + unsigned int BlockNumber; + unsigned int BlockCount; + unsigned int SegmentCount; + int DmaDirection; + struct scatterlist *cmd_sglist; + struct request *Request; + union { + struct { + DAC960_V1_CommandMailbox_T CommandMailbox; + DAC960_V1_KernelCommand_T *KernelCommand; + DAC960_V1_CommandStatus_T CommandStatus; + DAC960_V1_ScatterGatherSegment_T *ScatterGatherList; + dma_addr_t ScatterGatherListDMA; + struct scatterlist ScatterList[DAC960_V1_ScatterGatherLimit]; + unsigned int EndMarker[0]; + } V1; + struct { + DAC960_V2_CommandMailbox_T CommandMailbox; + DAC960_V2_KernelCommand_T *KernelCommand; + DAC960_V2_CommandStatus_T CommandStatus; + unsigned char RequestSenseLength; + int DataTransferResidue; + DAC960_V2_ScatterGatherSegment_T *ScatterGatherList; + dma_addr_t ScatterGatherListDMA; + DAC960_SCSI_RequestSense_T *RequestSense; + dma_addr_t RequestSenseDMA; + struct scatterlist ScatterList[DAC960_V2_ScatterGatherLimit]; + unsigned int EndMarker[0]; + } V2; + } FW; +} +DAC960_Command_T; + + +/* + Define the DAC960 Driver Controller structure. +*/ + +typedef struct DAC960_Controller +{ + void __iomem *BaseAddress; + void __iomem *MemoryMappedAddress; + DAC960_FirmwareType_T FirmwareType; + DAC960_HardwareType_T HardwareType; + DAC960_IO_Address_T IO_Address; + DAC960_PCI_Address_T PCI_Address; + struct pci_dev *PCIDevice; + unsigned char ControllerNumber; + unsigned char ControllerName[4]; + unsigned char ModelName[20]; + unsigned char FullModelName[28]; + unsigned char FirmwareVersion[12]; + unsigned char Bus; + unsigned char Device; + unsigned char Function; + unsigned char IRQ_Channel; + unsigned char Channels; + unsigned char Targets; + unsigned char MemorySize; + unsigned char LogicalDriveCount; + unsigned short CommandAllocationGroupSize; + unsigned short ControllerQueueDepth; + unsigned short DriverQueueDepth; + unsigned short MaxBlocksPerCommand; + unsigned short ControllerScatterGatherLimit; + unsigned short DriverScatterGatherLimit; + u64 BounceBufferLimit; + unsigned int CombinedStatusBufferLength; + unsigned int InitialStatusLength; + unsigned int CurrentStatusLength; + unsigned int ProgressBufferLength; + unsigned int UserStatusLength; + struct dma_loaf DmaPages; + unsigned long MonitoringTimerCount; + unsigned long PrimaryMonitoringTime; + unsigned long SecondaryMonitoringTime; + unsigned long ShutdownMonitoringTimer; + unsigned long LastProgressReportTime; + unsigned long LastCurrentStatusTime; + bool ControllerInitialized; + bool MonitoringCommandDeferred; + bool EphemeralProgressMessage; + bool DriveSpinUpMessageDisplayed; + bool MonitoringAlertMode; + bool SuppressEnclosureMessages; + struct timer_list MonitoringTimer; + struct gendisk *disks[DAC960_MaxLogicalDrives]; + struct pci_pool *ScatterGatherPool; + DAC960_Command_T *FreeCommands; + unsigned char *CombinedStatusBuffer; + unsigned char *CurrentStatusBuffer; + struct request_queue *RequestQueue[DAC960_MaxLogicalDrives]; + int req_q_index; + spinlock_t queue_lock; + wait_queue_head_t CommandWaitQueue; + wait_queue_head_t HealthStatusWaitQueue; + DAC960_Command_T InitialCommand; + DAC960_Command_T *Commands[DAC960_MaxDriverQueueDepth]; + struct proc_dir_entry *ControllerProcEntry; + bool LogicalDriveInitiallyAccessible[DAC960_MaxLogicalDrives]; + void (*QueueCommand)(DAC960_Command_T *Command); + bool (*ReadControllerConfiguration)(struct DAC960_Controller *); + bool (*ReadDeviceConfiguration)(struct DAC960_Controller *); + bool (*ReportDeviceConfiguration)(struct DAC960_Controller *); + void (*QueueReadWriteCommand)(DAC960_Command_T *Command); + union { + struct { + unsigned char GeometryTranslationHeads; + unsigned char GeometryTranslationSectors; + unsigned char PendingRebuildFlag; + unsigned short StripeSize; + unsigned short SegmentSize; + unsigned short NewEventLogSequenceNumber; + unsigned short OldEventLogSequenceNumber; + unsigned short DeviceStateChannel; + unsigned short DeviceStateTargetID; + bool DualModeMemoryMailboxInterface; + bool BackgroundInitializationStatusSupported; + bool SAFTE_EnclosureManagementEnabled; + bool NeedLogicalDriveInformation; + bool NeedErrorTableInformation; + bool NeedDeviceStateInformation; + bool NeedDeviceInquiryInformation; + bool NeedDeviceSerialNumberInformation; + bool NeedRebuildProgress; + bool NeedConsistencyCheckProgress; + bool NeedBackgroundInitializationStatus; + bool StartDeviceStateScan; + bool RebuildProgressFirst; + bool RebuildFlagPending; + bool RebuildStatusPending; + + dma_addr_t FirstCommandMailboxDMA; + DAC960_V1_CommandMailbox_T *FirstCommandMailbox; + DAC960_V1_CommandMailbox_T *LastCommandMailbox; + DAC960_V1_CommandMailbox_T *NextCommandMailbox; + DAC960_V1_CommandMailbox_T *PreviousCommandMailbox1; + DAC960_V1_CommandMailbox_T *PreviousCommandMailbox2; + + dma_addr_t FirstStatusMailboxDMA; + DAC960_V1_StatusMailbox_T *FirstStatusMailbox; + DAC960_V1_StatusMailbox_T *LastStatusMailbox; + DAC960_V1_StatusMailbox_T *NextStatusMailbox; + + DAC960_V1_DCDB_T *MonitoringDCDB; + dma_addr_t MonitoringDCDB_DMA; + + DAC960_V1_Enquiry_T Enquiry; + DAC960_V1_Enquiry_T *NewEnquiry; + dma_addr_t NewEnquiryDMA; + + DAC960_V1_ErrorTable_T ErrorTable; + DAC960_V1_ErrorTable_T *NewErrorTable; + dma_addr_t NewErrorTableDMA; + + DAC960_V1_EventLogEntry_T *EventLogEntry; + dma_addr_t EventLogEntryDMA; + + DAC960_V1_RebuildProgress_T *RebuildProgress; + dma_addr_t RebuildProgressDMA; + DAC960_V1_CommandStatus_T LastRebuildStatus; + DAC960_V1_CommandStatus_T PendingRebuildStatus; + + DAC960_V1_LogicalDriveInformationArray_T LogicalDriveInformation; + DAC960_V1_LogicalDriveInformationArray_T *NewLogicalDriveInformation; + dma_addr_t NewLogicalDriveInformationDMA; + + DAC960_V1_BackgroundInitializationStatus_T + *BackgroundInitializationStatus; + dma_addr_t BackgroundInitializationStatusDMA; + DAC960_V1_BackgroundInitializationStatus_T + LastBackgroundInitializationStatus; + + DAC960_V1_DeviceState_T + DeviceState[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets]; + DAC960_V1_DeviceState_T *NewDeviceState; + dma_addr_t NewDeviceStateDMA; + + DAC960_SCSI_Inquiry_T + InquiryStandardData[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets]; + DAC960_SCSI_Inquiry_T *NewInquiryStandardData; + dma_addr_t NewInquiryStandardDataDMA; + + DAC960_SCSI_Inquiry_UnitSerialNumber_T + InquiryUnitSerialNumber[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets]; + DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber; + dma_addr_t NewInquiryUnitSerialNumberDMA; + + int DeviceResetCount[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets]; + bool DirectCommandActive[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets]; + } V1; + struct { + unsigned int StatusChangeCounter; + unsigned int NextEventSequenceNumber; + unsigned int PhysicalDeviceIndex; + bool NeedLogicalDeviceInformation; + bool NeedPhysicalDeviceInformation; + bool NeedDeviceSerialNumberInformation; + bool StartLogicalDeviceInformationScan; + bool StartPhysicalDeviceInformationScan; + struct pci_pool *RequestSensePool; + + dma_addr_t FirstCommandMailboxDMA; + DAC960_V2_CommandMailbox_T *FirstCommandMailbox; + DAC960_V2_CommandMailbox_T *LastCommandMailbox; + DAC960_V2_CommandMailbox_T *NextCommandMailbox; + DAC960_V2_CommandMailbox_T *PreviousCommandMailbox1; + DAC960_V2_CommandMailbox_T *PreviousCommandMailbox2; + + dma_addr_t FirstStatusMailboxDMA; + DAC960_V2_StatusMailbox_T *FirstStatusMailbox; + DAC960_V2_StatusMailbox_T *LastStatusMailbox; + DAC960_V2_StatusMailbox_T *NextStatusMailbox; + + dma_addr_t HealthStatusBufferDMA; + DAC960_V2_HealthStatusBuffer_T *HealthStatusBuffer; + + DAC960_V2_ControllerInfo_T ControllerInformation; + DAC960_V2_ControllerInfo_T *NewControllerInformation; + dma_addr_t NewControllerInformationDMA; + + DAC960_V2_LogicalDeviceInfo_T + *LogicalDeviceInformation[DAC960_MaxLogicalDrives]; + DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInformation; + dma_addr_t NewLogicalDeviceInformationDMA; + + DAC960_V2_PhysicalDeviceInfo_T + *PhysicalDeviceInformation[DAC960_V2_MaxPhysicalDevices]; + DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInformation; + dma_addr_t NewPhysicalDeviceInformationDMA; + + DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber; + dma_addr_t NewInquiryUnitSerialNumberDMA; + DAC960_SCSI_Inquiry_UnitSerialNumber_T + *InquiryUnitSerialNumber[DAC960_V2_MaxPhysicalDevices]; + + DAC960_V2_Event_T *Event; + dma_addr_t EventDMA; + + DAC960_V2_PhysicalToLogicalDevice_T *PhysicalToLogicalDevice; + dma_addr_t PhysicalToLogicalDeviceDMA; + + DAC960_V2_PhysicalDevice_T + LogicalDriveToVirtualDevice[DAC960_MaxLogicalDrives]; + bool LogicalDriveFoundDuringScan[DAC960_MaxLogicalDrives]; + } V2; + } FW; + unsigned char ProgressBuffer[DAC960_ProgressBufferSize]; + unsigned char UserStatusBuffer[DAC960_UserMessageSize]; +} +DAC960_Controller_T; + + +/* + Simplify access to Firmware Version Dependent Data Structure Components + and Functions. +*/ + +#define V1 FW.V1 +#define V2 FW.V2 +#define DAC960_QueueCommand(Command) \ + (Controller->QueueCommand)(Command) +#define DAC960_ReadControllerConfiguration(Controller) \ + (Controller->ReadControllerConfiguration)(Controller) +#define DAC960_ReadDeviceConfiguration(Controller) \ + (Controller->ReadDeviceConfiguration)(Controller) +#define DAC960_ReportDeviceConfiguration(Controller) \ + (Controller->ReportDeviceConfiguration)(Controller) +#define DAC960_QueueReadWriteCommand(Command) \ + (Controller->QueueReadWriteCommand)(Command) + +/* + * dma_addr_writeql is provided to write dma_addr_t types + * to a 64-bit pci address space register. The controller + * will accept having the register written as two 32-bit + * values. + * + * In HIGHMEM kernels, dma_addr_t is a 64-bit value. + * without HIGHMEM, dma_addr_t is a 32-bit value. + * + * The compiler should always fix up the assignment + * to u.wq appropriately, depending upon the size of + * dma_addr_t. + */ +static inline +void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address) +{ + union { + u64 wq; + uint wl[2]; + } u; + + u.wq = addr; + + writel(u.wl[0], write_address); + writel(u.wl[1], write_address + 4); +} + +/* + Define the DAC960 GEM Series Controller Interface Register Offsets. + */ + +#define DAC960_GEM_RegisterWindowSize 0x600 + +typedef enum +{ + DAC960_GEM_InboundDoorBellRegisterReadSetOffset = 0x214, + DAC960_GEM_InboundDoorBellRegisterClearOffset = 0x218, + DAC960_GEM_OutboundDoorBellRegisterReadSetOffset = 0x224, + DAC960_GEM_OutboundDoorBellRegisterClearOffset = 0x228, + DAC960_GEM_InterruptStatusRegisterOffset = 0x208, + DAC960_GEM_InterruptMaskRegisterReadSetOffset = 0x22C, + DAC960_GEM_InterruptMaskRegisterClearOffset = 0x230, + DAC960_GEM_CommandMailboxBusAddressOffset = 0x510, + DAC960_GEM_CommandStatusOffset = 0x518, + DAC960_GEM_ErrorStatusRegisterReadSetOffset = 0x224, + DAC960_GEM_ErrorStatusRegisterClearOffset = 0x228, +} +DAC960_GEM_RegisterOffsets_T; + +/* + Define the structure of the DAC960 GEM Series Inbound Door Bell + */ + +typedef union DAC960_GEM_InboundDoorBellRegister +{ + unsigned int All; + struct { + unsigned int :24; + bool HardwareMailboxNewCommand:1; + bool AcknowledgeHardwareMailboxStatus:1; + bool GenerateInterrupt:1; + bool ControllerReset:1; + bool MemoryMailboxNewCommand:1; + unsigned int :3; + } Write; + struct { + unsigned int :24; + bool HardwareMailboxFull:1; + bool InitializationInProgress:1; + unsigned int :6; + } Read; +} +DAC960_GEM_InboundDoorBellRegister_T; + +/* + Define the structure of the DAC960 GEM Series Outbound Door Bell Register. + */ +typedef union DAC960_GEM_OutboundDoorBellRegister +{ + unsigned int All; + struct { + unsigned int :24; + bool AcknowledgeHardwareMailboxInterrupt:1; + bool AcknowledgeMemoryMailboxInterrupt:1; + unsigned int :6; + } Write; + struct { + unsigned int :24; + bool HardwareMailboxStatusAvailable:1; + bool MemoryMailboxStatusAvailable:1; + unsigned int :6; + } Read; +} +DAC960_GEM_OutboundDoorBellRegister_T; + +/* + Define the structure of the DAC960 GEM Series Interrupt Mask Register. + */ +typedef union DAC960_GEM_InterruptMaskRegister +{ + unsigned int All; + struct { + unsigned int :16; + unsigned int :8; + unsigned int HardwareMailboxInterrupt:1; + unsigned int MemoryMailboxInterrupt:1; + unsigned int :6; + } Bits; +} +DAC960_GEM_InterruptMaskRegister_T; + +/* + Define the structure of the DAC960 GEM Series Error Status Register. + */ + +typedef union DAC960_GEM_ErrorStatusRegister +{ + unsigned int All; + struct { + unsigned int :24; + unsigned int :5; + bool ErrorStatusPending:1; + unsigned int :2; + } Bits; +} +DAC960_GEM_ErrorStatusRegister_T; + +/* + Define inline functions to provide an abstraction for reading and writing the + DAC960 GEM Series Controller Interface Registers. +*/ + +static inline +void DAC960_GEM_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true; + writel(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset); +} + +static inline +void DAC960_GEM_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true; + writel(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterClearOffset); +} + +static inline +void DAC960_GEM_GenerateInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.GenerateInterrupt = true; + writel(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset); +} + +static inline +void DAC960_GEM_ControllerReset(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.ControllerReset = true; + writel(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset); +} + +static inline +void DAC960_GEM_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true; + writel(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset); +} + +static inline +bool DAC960_GEM_HardwareMailboxFullP(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readl(ControllerBaseAddress + + DAC960_GEM_InboundDoorBellRegisterReadSetOffset); + return InboundDoorBellRegister.Read.HardwareMailboxFull; +} + +static inline +bool DAC960_GEM_InitializationInProgressP(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readl(ControllerBaseAddress + + DAC960_GEM_InboundDoorBellRegisterReadSetOffset); + return InboundDoorBellRegister.Read.InitializationInProgress; +} + +static inline +void DAC960_GEM_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true; + writel(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_GEM_OutboundDoorBellRegisterClearOffset); +} + +static inline +void DAC960_GEM_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true; + writel(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_GEM_OutboundDoorBellRegisterClearOffset); +} + +static inline +void DAC960_GEM_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true; + OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true; + writel(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_GEM_OutboundDoorBellRegisterClearOffset); +} + +static inline +bool DAC960_GEM_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readl(ControllerBaseAddress + + DAC960_GEM_OutboundDoorBellRegisterReadSetOffset); + return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable; +} + +static inline +bool DAC960_GEM_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readl(ControllerBaseAddress + + DAC960_GEM_OutboundDoorBellRegisterReadSetOffset); + return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable; +} + +static inline +void DAC960_GEM_EnableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = 0; + InterruptMaskRegister.Bits.HardwareMailboxInterrupt = true; + InterruptMaskRegister.Bits.MemoryMailboxInterrupt = true; + writel(InterruptMaskRegister.All, + ControllerBaseAddress + DAC960_GEM_InterruptMaskRegisterClearOffset); +} + +static inline +void DAC960_GEM_DisableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = 0; + InterruptMaskRegister.Bits.HardwareMailboxInterrupt = true; + InterruptMaskRegister.Bits.MemoryMailboxInterrupt = true; + writel(InterruptMaskRegister.All, + ControllerBaseAddress + DAC960_GEM_InterruptMaskRegisterReadSetOffset); +} + +static inline +bool DAC960_GEM_InterruptsEnabledP(void __iomem *ControllerBaseAddress) +{ + DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = + readl(ControllerBaseAddress + + DAC960_GEM_InterruptMaskRegisterReadSetOffset); + return !(InterruptMaskRegister.Bits.HardwareMailboxInterrupt || + InterruptMaskRegister.Bits.MemoryMailboxInterrupt); +} + +static inline +void DAC960_GEM_WriteCommandMailbox(DAC960_V2_CommandMailbox_T + *MemoryCommandMailbox, + DAC960_V2_CommandMailbox_T + *CommandMailbox) +{ + memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1], + sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int)); + wmb(); + MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0]; + mb(); +} + +static inline +void DAC960_GEM_WriteHardwareMailbox(void __iomem *ControllerBaseAddress, + dma_addr_t CommandMailboxDMA) +{ + dma_addr_writeql(CommandMailboxDMA, + ControllerBaseAddress + + DAC960_GEM_CommandMailboxBusAddressOffset); +} + +static inline DAC960_V2_CommandIdentifier_T +DAC960_GEM_ReadCommandIdentifier(void __iomem *ControllerBaseAddress) +{ + return readw(ControllerBaseAddress + DAC960_GEM_CommandStatusOffset); +} + +static inline DAC960_V2_CommandStatus_T +DAC960_GEM_ReadCommandStatus(void __iomem *ControllerBaseAddress) +{ + return readw(ControllerBaseAddress + DAC960_GEM_CommandStatusOffset + 2); +} + +static inline bool +DAC960_GEM_ReadErrorStatus(void __iomem *ControllerBaseAddress, + unsigned char *ErrorStatus, + unsigned char *Parameter0, + unsigned char *Parameter1) +{ + DAC960_GEM_ErrorStatusRegister_T ErrorStatusRegister; + ErrorStatusRegister.All = + readl(ControllerBaseAddress + DAC960_GEM_ErrorStatusRegisterReadSetOffset); + if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false; + ErrorStatusRegister.Bits.ErrorStatusPending = false; + *ErrorStatus = ErrorStatusRegister.All; + *Parameter0 = + readb(ControllerBaseAddress + DAC960_GEM_CommandMailboxBusAddressOffset + 0); + *Parameter1 = + readb(ControllerBaseAddress + DAC960_GEM_CommandMailboxBusAddressOffset + 1); + writel(0x03000000, ControllerBaseAddress + + DAC960_GEM_ErrorStatusRegisterClearOffset); + return true; +} + +/* + Define the DAC960 BA Series Controller Interface Register Offsets. +*/ + +#define DAC960_BA_RegisterWindowSize 0x80 + +typedef enum +{ + DAC960_BA_InboundDoorBellRegisterOffset = 0x60, + DAC960_BA_OutboundDoorBellRegisterOffset = 0x61, + DAC960_BA_InterruptStatusRegisterOffset = 0x30, + DAC960_BA_InterruptMaskRegisterOffset = 0x34, + DAC960_BA_CommandMailboxBusAddressOffset = 0x50, + DAC960_BA_CommandStatusOffset = 0x58, + DAC960_BA_ErrorStatusRegisterOffset = 0x63 +} +DAC960_BA_RegisterOffsets_T; + + +/* + Define the structure of the DAC960 BA Series Inbound Door Bell Register. +*/ + +typedef union DAC960_BA_InboundDoorBellRegister +{ + unsigned char All; + struct { + bool HardwareMailboxNewCommand:1; /* Bit 0 */ + bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */ + bool GenerateInterrupt:1; /* Bit 2 */ + bool ControllerReset:1; /* Bit 3 */ + bool MemoryMailboxNewCommand:1; /* Bit 4 */ + unsigned char :3; /* Bits 5-7 */ + } Write; + struct { + bool HardwareMailboxEmpty:1; /* Bit 0 */ + bool InitializationNotInProgress:1; /* Bit 1 */ + unsigned char :6; /* Bits 2-7 */ + } Read; +} +DAC960_BA_InboundDoorBellRegister_T; + + +/* + Define the structure of the DAC960 BA Series Outbound Door Bell Register. +*/ + +typedef union DAC960_BA_OutboundDoorBellRegister +{ + unsigned char All; + struct { + bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */ + bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */ + unsigned char :6; /* Bits 2-7 */ + } Write; + struct { + bool HardwareMailboxStatusAvailable:1; /* Bit 0 */ + bool MemoryMailboxStatusAvailable:1; /* Bit 1 */ + unsigned char :6; /* Bits 2-7 */ + } Read; +} +DAC960_BA_OutboundDoorBellRegister_T; + + +/* + Define the structure of the DAC960 BA Series Interrupt Mask Register. +*/ + +typedef union DAC960_BA_InterruptMaskRegister +{ + unsigned char All; + struct { + unsigned int :2; /* Bits 0-1 */ + bool DisableInterrupts:1; /* Bit 2 */ + bool DisableInterruptsI2O:1; /* Bit 3 */ + unsigned int :4; /* Bits 4-7 */ + } Bits; +} +DAC960_BA_InterruptMaskRegister_T; + + +/* + Define the structure of the DAC960 BA Series Error Status Register. +*/ + +typedef union DAC960_BA_ErrorStatusRegister +{ + unsigned char All; + struct { + unsigned int :2; /* Bits 0-1 */ + bool ErrorStatusPending:1; /* Bit 2 */ + unsigned int :5; /* Bits 3-7 */ + } Bits; +} +DAC960_BA_ErrorStatusRegister_T; + + +/* + Define inline functions to provide an abstraction for reading and writing the + DAC960 BA Series Controller Interface Registers. +*/ + +static inline +void DAC960_BA_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_BA_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_BA_GenerateInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.GenerateInterrupt = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_BA_ControllerReset(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.ControllerReset = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_BA_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset); +} + +static inline +bool DAC960_BA_HardwareMailboxFullP(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset); + return !InboundDoorBellRegister.Read.HardwareMailboxEmpty; +} + +static inline +bool DAC960_BA_InitializationInProgressP(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset); + return !InboundDoorBellRegister.Read.InitializationNotInProgress; +} + +static inline +void DAC960_BA_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true; + writeb(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset); +} + +static inline +void DAC960_BA_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true; + writeb(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset); +} + +static inline +void DAC960_BA_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true; + OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true; + writeb(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset); +} + +static inline +bool DAC960_BA_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset); + return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable; +} + +static inline +bool DAC960_BA_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset); + return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable; +} + +static inline +void DAC960_BA_EnableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = 0xFF; + InterruptMaskRegister.Bits.DisableInterrupts = false; + InterruptMaskRegister.Bits.DisableInterruptsI2O = true; + writeb(InterruptMaskRegister.All, + ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset); +} + +static inline +void DAC960_BA_DisableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = 0xFF; + InterruptMaskRegister.Bits.DisableInterrupts = true; + InterruptMaskRegister.Bits.DisableInterruptsI2O = true; + writeb(InterruptMaskRegister.All, + ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset); +} + +static inline +bool DAC960_BA_InterruptsEnabledP(void __iomem *ControllerBaseAddress) +{ + DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = + readb(ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset); + return !InterruptMaskRegister.Bits.DisableInterrupts; +} + +static inline +void DAC960_BA_WriteCommandMailbox(DAC960_V2_CommandMailbox_T + *MemoryCommandMailbox, + DAC960_V2_CommandMailbox_T + *CommandMailbox) +{ + memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1], + sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int)); + wmb(); + MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0]; + mb(); +} + + +static inline +void DAC960_BA_WriteHardwareMailbox(void __iomem *ControllerBaseAddress, + dma_addr_t CommandMailboxDMA) +{ + dma_addr_writeql(CommandMailboxDMA, + ControllerBaseAddress + + DAC960_BA_CommandMailboxBusAddressOffset); +} + +static inline DAC960_V2_CommandIdentifier_T +DAC960_BA_ReadCommandIdentifier(void __iomem *ControllerBaseAddress) +{ + return readw(ControllerBaseAddress + DAC960_BA_CommandStatusOffset); +} + +static inline DAC960_V2_CommandStatus_T +DAC960_BA_ReadCommandStatus(void __iomem *ControllerBaseAddress) +{ + return readw(ControllerBaseAddress + DAC960_BA_CommandStatusOffset + 2); +} + +static inline bool +DAC960_BA_ReadErrorStatus(void __iomem *ControllerBaseAddress, + unsigned char *ErrorStatus, + unsigned char *Parameter0, + unsigned char *Parameter1) +{ + DAC960_BA_ErrorStatusRegister_T ErrorStatusRegister; + ErrorStatusRegister.All = + readb(ControllerBaseAddress + DAC960_BA_ErrorStatusRegisterOffset); + if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false; + ErrorStatusRegister.Bits.ErrorStatusPending = false; + *ErrorStatus = ErrorStatusRegister.All; + *Parameter0 = + readb(ControllerBaseAddress + DAC960_BA_CommandMailboxBusAddressOffset + 0); + *Parameter1 = + readb(ControllerBaseAddress + DAC960_BA_CommandMailboxBusAddressOffset + 1); + writeb(0xFF, ControllerBaseAddress + DAC960_BA_ErrorStatusRegisterOffset); + return true; +} + + +/* + Define the DAC960 LP Series Controller Interface Register Offsets. +*/ + +#define DAC960_LP_RegisterWindowSize 0x80 + +typedef enum +{ + DAC960_LP_InboundDoorBellRegisterOffset = 0x20, + DAC960_LP_OutboundDoorBellRegisterOffset = 0x2C, + DAC960_LP_InterruptStatusRegisterOffset = 0x30, + DAC960_LP_InterruptMaskRegisterOffset = 0x34, + DAC960_LP_CommandMailboxBusAddressOffset = 0x10, + DAC960_LP_CommandStatusOffset = 0x18, + DAC960_LP_ErrorStatusRegisterOffset = 0x2E +} +DAC960_LP_RegisterOffsets_T; + + +/* + Define the structure of the DAC960 LP Series Inbound Door Bell Register. +*/ + +typedef union DAC960_LP_InboundDoorBellRegister +{ + unsigned char All; + struct { + bool HardwareMailboxNewCommand:1; /* Bit 0 */ + bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */ + bool GenerateInterrupt:1; /* Bit 2 */ + bool ControllerReset:1; /* Bit 3 */ + bool MemoryMailboxNewCommand:1; /* Bit 4 */ + unsigned char :3; /* Bits 5-7 */ + } Write; + struct { + bool HardwareMailboxFull:1; /* Bit 0 */ + bool InitializationInProgress:1; /* Bit 1 */ + unsigned char :6; /* Bits 2-7 */ + } Read; +} +DAC960_LP_InboundDoorBellRegister_T; + + +/* + Define the structure of the DAC960 LP Series Outbound Door Bell Register. +*/ + +typedef union DAC960_LP_OutboundDoorBellRegister +{ + unsigned char All; + struct { + bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */ + bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */ + unsigned char :6; /* Bits 2-7 */ + } Write; + struct { + bool HardwareMailboxStatusAvailable:1; /* Bit 0 */ + bool MemoryMailboxStatusAvailable:1; /* Bit 1 */ + unsigned char :6; /* Bits 2-7 */ + } Read; +} +DAC960_LP_OutboundDoorBellRegister_T; + + +/* + Define the structure of the DAC960 LP Series Interrupt Mask Register. +*/ + +typedef union DAC960_LP_InterruptMaskRegister +{ + unsigned char All; + struct { + unsigned int :2; /* Bits 0-1 */ + bool DisableInterrupts:1; /* Bit 2 */ + unsigned int :5; /* Bits 3-7 */ + } Bits; +} +DAC960_LP_InterruptMaskRegister_T; + + +/* + Define the structure of the DAC960 LP Series Error Status Register. +*/ + +typedef union DAC960_LP_ErrorStatusRegister +{ + unsigned char All; + struct { + unsigned int :2; /* Bits 0-1 */ + bool ErrorStatusPending:1; /* Bit 2 */ + unsigned int :5; /* Bits 3-7 */ + } Bits; +} +DAC960_LP_ErrorStatusRegister_T; + + +/* + Define inline functions to provide an abstraction for reading and writing the + DAC960 LP Series Controller Interface Registers. +*/ + +static inline +void DAC960_LP_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LP_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LP_GenerateInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.GenerateInterrupt = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LP_ControllerReset(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.ControllerReset = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LP_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset); +} + +static inline +bool DAC960_LP_HardwareMailboxFullP(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset); + return InboundDoorBellRegister.Read.HardwareMailboxFull; +} + +static inline +bool DAC960_LP_InitializationInProgressP(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset); + return InboundDoorBellRegister.Read.InitializationInProgress; +} + +static inline +void DAC960_LP_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true; + writeb(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LP_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true; + writeb(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LP_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true; + OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true; + writeb(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset); +} + +static inline +bool DAC960_LP_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset); + return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable; +} + +static inline +bool DAC960_LP_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset); + return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable; +} + +static inline +void DAC960_LP_EnableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = 0xFF; + InterruptMaskRegister.Bits.DisableInterrupts = false; + writeb(InterruptMaskRegister.All, + ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset); +} + +static inline +void DAC960_LP_DisableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = 0xFF; + InterruptMaskRegister.Bits.DisableInterrupts = true; + writeb(InterruptMaskRegister.All, + ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset); +} + +static inline +bool DAC960_LP_InterruptsEnabledP(void __iomem *ControllerBaseAddress) +{ + DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = + readb(ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset); + return !InterruptMaskRegister.Bits.DisableInterrupts; +} + +static inline +void DAC960_LP_WriteCommandMailbox(DAC960_V2_CommandMailbox_T + *MemoryCommandMailbox, + DAC960_V2_CommandMailbox_T + *CommandMailbox) +{ + memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1], + sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int)); + wmb(); + MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0]; + mb(); +} + +static inline +void DAC960_LP_WriteHardwareMailbox(void __iomem *ControllerBaseAddress, + dma_addr_t CommandMailboxDMA) +{ + dma_addr_writeql(CommandMailboxDMA, + ControllerBaseAddress + + DAC960_LP_CommandMailboxBusAddressOffset); +} + +static inline DAC960_V2_CommandIdentifier_T +DAC960_LP_ReadCommandIdentifier(void __iomem *ControllerBaseAddress) +{ + return readw(ControllerBaseAddress + DAC960_LP_CommandStatusOffset); +} + +static inline DAC960_V2_CommandStatus_T +DAC960_LP_ReadCommandStatus(void __iomem *ControllerBaseAddress) +{ + return readw(ControllerBaseAddress + DAC960_LP_CommandStatusOffset + 2); +} + +static inline bool +DAC960_LP_ReadErrorStatus(void __iomem *ControllerBaseAddress, + unsigned char *ErrorStatus, + unsigned char *Parameter0, + unsigned char *Parameter1) +{ + DAC960_LP_ErrorStatusRegister_T ErrorStatusRegister; + ErrorStatusRegister.All = + readb(ControllerBaseAddress + DAC960_LP_ErrorStatusRegisterOffset); + if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false; + ErrorStatusRegister.Bits.ErrorStatusPending = false; + *ErrorStatus = ErrorStatusRegister.All; + *Parameter0 = + readb(ControllerBaseAddress + DAC960_LP_CommandMailboxBusAddressOffset + 0); + *Parameter1 = + readb(ControllerBaseAddress + DAC960_LP_CommandMailboxBusAddressOffset + 1); + writeb(0xFF, ControllerBaseAddress + DAC960_LP_ErrorStatusRegisterOffset); + return true; +} + + +/* + Define the DAC960 LA Series Controller Interface Register Offsets. +*/ + +#define DAC960_LA_RegisterWindowSize 0x80 + +typedef enum +{ + DAC960_LA_InboundDoorBellRegisterOffset = 0x60, + DAC960_LA_OutboundDoorBellRegisterOffset = 0x61, + DAC960_LA_InterruptMaskRegisterOffset = 0x34, + DAC960_LA_CommandOpcodeRegisterOffset = 0x50, + DAC960_LA_CommandIdentifierRegisterOffset = 0x51, + DAC960_LA_MailboxRegister2Offset = 0x52, + DAC960_LA_MailboxRegister3Offset = 0x53, + DAC960_LA_MailboxRegister4Offset = 0x54, + DAC960_LA_MailboxRegister5Offset = 0x55, + DAC960_LA_MailboxRegister6Offset = 0x56, + DAC960_LA_MailboxRegister7Offset = 0x57, + DAC960_LA_MailboxRegister8Offset = 0x58, + DAC960_LA_MailboxRegister9Offset = 0x59, + DAC960_LA_MailboxRegister10Offset = 0x5A, + DAC960_LA_MailboxRegister11Offset = 0x5B, + DAC960_LA_MailboxRegister12Offset = 0x5C, + DAC960_LA_StatusCommandIdentifierRegOffset = 0x5D, + DAC960_LA_StatusRegisterOffset = 0x5E, + DAC960_LA_ErrorStatusRegisterOffset = 0x63 +} +DAC960_LA_RegisterOffsets_T; + + +/* + Define the structure of the DAC960 LA Series Inbound Door Bell Register. +*/ + +typedef union DAC960_LA_InboundDoorBellRegister +{ + unsigned char All; + struct { + bool HardwareMailboxNewCommand:1; /* Bit 0 */ + bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */ + bool GenerateInterrupt:1; /* Bit 2 */ + bool ControllerReset:1; /* Bit 3 */ + bool MemoryMailboxNewCommand:1; /* Bit 4 */ + unsigned char :3; /* Bits 5-7 */ + } Write; + struct { + bool HardwareMailboxEmpty:1; /* Bit 0 */ + bool InitializationNotInProgress:1; /* Bit 1 */ + unsigned char :6; /* Bits 2-7 */ + } Read; +} +DAC960_LA_InboundDoorBellRegister_T; + + +/* + Define the structure of the DAC960 LA Series Outbound Door Bell Register. +*/ + +typedef union DAC960_LA_OutboundDoorBellRegister +{ + unsigned char All; + struct { + bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */ + bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */ + unsigned char :6; /* Bits 2-7 */ + } Write; + struct { + bool HardwareMailboxStatusAvailable:1; /* Bit 0 */ + bool MemoryMailboxStatusAvailable:1; /* Bit 1 */ + unsigned char :6; /* Bits 2-7 */ + } Read; +} +DAC960_LA_OutboundDoorBellRegister_T; + + +/* + Define the structure of the DAC960 LA Series Interrupt Mask Register. +*/ + +typedef union DAC960_LA_InterruptMaskRegister +{ + unsigned char All; + struct { + unsigned char :2; /* Bits 0-1 */ + bool DisableInterrupts:1; /* Bit 2 */ + unsigned char :5; /* Bits 3-7 */ + } Bits; +} +DAC960_LA_InterruptMaskRegister_T; + + +/* + Define the structure of the DAC960 LA Series Error Status Register. +*/ + +typedef union DAC960_LA_ErrorStatusRegister +{ + unsigned char All; + struct { + unsigned int :2; /* Bits 0-1 */ + bool ErrorStatusPending:1; /* Bit 2 */ + unsigned int :5; /* Bits 3-7 */ + } Bits; +} +DAC960_LA_ErrorStatusRegister_T; + + +/* + Define inline functions to provide an abstraction for reading and writing the + DAC960 LA Series Controller Interface Registers. +*/ + +static inline +void DAC960_LA_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LA_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LA_GenerateInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.GenerateInterrupt = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LA_ControllerReset(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.ControllerReset = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LA_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset); +} + +static inline +bool DAC960_LA_HardwareMailboxFullP(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset); + return !InboundDoorBellRegister.Read.HardwareMailboxEmpty; +} + +static inline +bool DAC960_LA_InitializationInProgressP(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset); + return !InboundDoorBellRegister.Read.InitializationNotInProgress; +} + +static inline +void DAC960_LA_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true; + writeb(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LA_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true; + writeb(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset); +} + +static inline +void DAC960_LA_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true; + OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true; + writeb(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset); +} + +static inline +bool DAC960_LA_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset); + return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable; +} + +static inline +bool DAC960_LA_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset); + return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable; +} + +static inline +void DAC960_LA_EnableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = 0xFF; + InterruptMaskRegister.Bits.DisableInterrupts = false; + writeb(InterruptMaskRegister.All, + ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset); +} + +static inline +void DAC960_LA_DisableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = 0xFF; + InterruptMaskRegister.Bits.DisableInterrupts = true; + writeb(InterruptMaskRegister.All, + ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset); +} + +static inline +bool DAC960_LA_InterruptsEnabledP(void __iomem *ControllerBaseAddress) +{ + DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = + readb(ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset); + return !InterruptMaskRegister.Bits.DisableInterrupts; +} + +static inline +void DAC960_LA_WriteCommandMailbox(DAC960_V1_CommandMailbox_T + *MemoryCommandMailbox, + DAC960_V1_CommandMailbox_T + *CommandMailbox) +{ + MemoryCommandMailbox->Words[1] = CommandMailbox->Words[1]; + MemoryCommandMailbox->Words[2] = CommandMailbox->Words[2]; + MemoryCommandMailbox->Words[3] = CommandMailbox->Words[3]; + wmb(); + MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0]; + mb(); +} + +static inline +void DAC960_LA_WriteHardwareMailbox(void __iomem *ControllerBaseAddress, + DAC960_V1_CommandMailbox_T *CommandMailbox) +{ + writel(CommandMailbox->Words[0], + ControllerBaseAddress + DAC960_LA_CommandOpcodeRegisterOffset); + writel(CommandMailbox->Words[1], + ControllerBaseAddress + DAC960_LA_MailboxRegister4Offset); + writel(CommandMailbox->Words[2], + ControllerBaseAddress + DAC960_LA_MailboxRegister8Offset); + writeb(CommandMailbox->Bytes[12], + ControllerBaseAddress + DAC960_LA_MailboxRegister12Offset); +} + +static inline DAC960_V1_CommandIdentifier_T +DAC960_LA_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress) +{ + return readb(ControllerBaseAddress + + DAC960_LA_StatusCommandIdentifierRegOffset); +} + +static inline DAC960_V1_CommandStatus_T +DAC960_LA_ReadStatusRegister(void __iomem *ControllerBaseAddress) +{ + return readw(ControllerBaseAddress + DAC960_LA_StatusRegisterOffset); +} + +static inline bool +DAC960_LA_ReadErrorStatus(void __iomem *ControllerBaseAddress, + unsigned char *ErrorStatus, + unsigned char *Parameter0, + unsigned char *Parameter1) +{ + DAC960_LA_ErrorStatusRegister_T ErrorStatusRegister; + ErrorStatusRegister.All = + readb(ControllerBaseAddress + DAC960_LA_ErrorStatusRegisterOffset); + if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false; + ErrorStatusRegister.Bits.ErrorStatusPending = false; + *ErrorStatus = ErrorStatusRegister.All; + *Parameter0 = + readb(ControllerBaseAddress + DAC960_LA_CommandOpcodeRegisterOffset); + *Parameter1 = + readb(ControllerBaseAddress + DAC960_LA_CommandIdentifierRegisterOffset); + writeb(0xFF, ControllerBaseAddress + DAC960_LA_ErrorStatusRegisterOffset); + return true; +} + +/* + Define the DAC960 PG Series Controller Interface Register Offsets. +*/ + +#define DAC960_PG_RegisterWindowSize 0x2000 + +typedef enum +{ + DAC960_PG_InboundDoorBellRegisterOffset = 0x0020, + DAC960_PG_OutboundDoorBellRegisterOffset = 0x002C, + DAC960_PG_InterruptMaskRegisterOffset = 0x0034, + DAC960_PG_CommandOpcodeRegisterOffset = 0x1000, + DAC960_PG_CommandIdentifierRegisterOffset = 0x1001, + DAC960_PG_MailboxRegister2Offset = 0x1002, + DAC960_PG_MailboxRegister3Offset = 0x1003, + DAC960_PG_MailboxRegister4Offset = 0x1004, + DAC960_PG_MailboxRegister5Offset = 0x1005, + DAC960_PG_MailboxRegister6Offset = 0x1006, + DAC960_PG_MailboxRegister7Offset = 0x1007, + DAC960_PG_MailboxRegister8Offset = 0x1008, + DAC960_PG_MailboxRegister9Offset = 0x1009, + DAC960_PG_MailboxRegister10Offset = 0x100A, + DAC960_PG_MailboxRegister11Offset = 0x100B, + DAC960_PG_MailboxRegister12Offset = 0x100C, + DAC960_PG_StatusCommandIdentifierRegOffset = 0x1018, + DAC960_PG_StatusRegisterOffset = 0x101A, + DAC960_PG_ErrorStatusRegisterOffset = 0x103F +} +DAC960_PG_RegisterOffsets_T; + + +/* + Define the structure of the DAC960 PG Series Inbound Door Bell Register. +*/ + +typedef union DAC960_PG_InboundDoorBellRegister +{ + unsigned int All; + struct { + bool HardwareMailboxNewCommand:1; /* Bit 0 */ + bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */ + bool GenerateInterrupt:1; /* Bit 2 */ + bool ControllerReset:1; /* Bit 3 */ + bool MemoryMailboxNewCommand:1; /* Bit 4 */ + unsigned int :27; /* Bits 5-31 */ + } Write; + struct { + bool HardwareMailboxFull:1; /* Bit 0 */ + bool InitializationInProgress:1; /* Bit 1 */ + unsigned int :30; /* Bits 2-31 */ + } Read; +} +DAC960_PG_InboundDoorBellRegister_T; + + +/* + Define the structure of the DAC960 PG Series Outbound Door Bell Register. +*/ + +typedef union DAC960_PG_OutboundDoorBellRegister +{ + unsigned int All; + struct { + bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */ + bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */ + unsigned int :30; /* Bits 2-31 */ + } Write; + struct { + bool HardwareMailboxStatusAvailable:1; /* Bit 0 */ + bool MemoryMailboxStatusAvailable:1; /* Bit 1 */ + unsigned int :30; /* Bits 2-31 */ + } Read; +} +DAC960_PG_OutboundDoorBellRegister_T; + + +/* + Define the structure of the DAC960 PG Series Interrupt Mask Register. +*/ + +typedef union DAC960_PG_InterruptMaskRegister +{ + unsigned int All; + struct { + unsigned int MessageUnitInterruptMask1:2; /* Bits 0-1 */ + bool DisableInterrupts:1; /* Bit 2 */ + unsigned int MessageUnitInterruptMask2:5; /* Bits 3-7 */ + unsigned int Reserved0:24; /* Bits 8-31 */ + } Bits; +} +DAC960_PG_InterruptMaskRegister_T; + + +/* + Define the structure of the DAC960 PG Series Error Status Register. +*/ + +typedef union DAC960_PG_ErrorStatusRegister +{ + unsigned char All; + struct { + unsigned int :2; /* Bits 0-1 */ + bool ErrorStatusPending:1; /* Bit 2 */ + unsigned int :5; /* Bits 3-7 */ + } Bits; +} +DAC960_PG_ErrorStatusRegister_T; + + +/* + Define inline functions to provide an abstraction for reading and writing the + DAC960 PG Series Controller Interface Registers. +*/ + +static inline +void DAC960_PG_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true; + writel(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_PG_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true; + writel(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_PG_GenerateInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.GenerateInterrupt = true; + writel(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_PG_ControllerReset(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.ControllerReset = true; + writel(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_PG_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true; + writel(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset); +} + +static inline +bool DAC960_PG_HardwareMailboxFullP(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readl(ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset); + return InboundDoorBellRegister.Read.HardwareMailboxFull; +} + +static inline +bool DAC960_PG_InitializationInProgressP(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readl(ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset); + return InboundDoorBellRegister.Read.InitializationInProgress; +} + +static inline +void DAC960_PG_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true; + writel(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset); +} + +static inline +void DAC960_PG_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true; + writel(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset); +} + +static inline +void DAC960_PG_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true; + OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true; + writel(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset); +} + +static inline +bool DAC960_PG_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readl(ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset); + return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable; +} + +static inline +bool DAC960_PG_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readl(ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset); + return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable; +} + +static inline +void DAC960_PG_EnableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = 0; + InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3; + InterruptMaskRegister.Bits.DisableInterrupts = false; + InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F; + writel(InterruptMaskRegister.All, + ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset); +} + +static inline +void DAC960_PG_DisableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = 0; + InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3; + InterruptMaskRegister.Bits.DisableInterrupts = true; + InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F; + writel(InterruptMaskRegister.All, + ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset); +} + +static inline +bool DAC960_PG_InterruptsEnabledP(void __iomem *ControllerBaseAddress) +{ + DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister; + InterruptMaskRegister.All = + readl(ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset); + return !InterruptMaskRegister.Bits.DisableInterrupts; +} + +static inline +void DAC960_PG_WriteCommandMailbox(DAC960_V1_CommandMailbox_T + *MemoryCommandMailbox, + DAC960_V1_CommandMailbox_T + *CommandMailbox) +{ + MemoryCommandMailbox->Words[1] = CommandMailbox->Words[1]; + MemoryCommandMailbox->Words[2] = CommandMailbox->Words[2]; + MemoryCommandMailbox->Words[3] = CommandMailbox->Words[3]; + wmb(); + MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0]; + mb(); +} + +static inline +void DAC960_PG_WriteHardwareMailbox(void __iomem *ControllerBaseAddress, + DAC960_V1_CommandMailbox_T *CommandMailbox) +{ + writel(CommandMailbox->Words[0], + ControllerBaseAddress + DAC960_PG_CommandOpcodeRegisterOffset); + writel(CommandMailbox->Words[1], + ControllerBaseAddress + DAC960_PG_MailboxRegister4Offset); + writel(CommandMailbox->Words[2], + ControllerBaseAddress + DAC960_PG_MailboxRegister8Offset); + writeb(CommandMailbox->Bytes[12], + ControllerBaseAddress + DAC960_PG_MailboxRegister12Offset); +} + +static inline DAC960_V1_CommandIdentifier_T +DAC960_PG_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress) +{ + return readb(ControllerBaseAddress + + DAC960_PG_StatusCommandIdentifierRegOffset); +} + +static inline DAC960_V1_CommandStatus_T +DAC960_PG_ReadStatusRegister(void __iomem *ControllerBaseAddress) +{ + return readw(ControllerBaseAddress + DAC960_PG_StatusRegisterOffset); +} + +static inline bool +DAC960_PG_ReadErrorStatus(void __iomem *ControllerBaseAddress, + unsigned char *ErrorStatus, + unsigned char *Parameter0, + unsigned char *Parameter1) +{ + DAC960_PG_ErrorStatusRegister_T ErrorStatusRegister; + ErrorStatusRegister.All = + readb(ControllerBaseAddress + DAC960_PG_ErrorStatusRegisterOffset); + if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false; + ErrorStatusRegister.Bits.ErrorStatusPending = false; + *ErrorStatus = ErrorStatusRegister.All; + *Parameter0 = + readb(ControllerBaseAddress + DAC960_PG_CommandOpcodeRegisterOffset); + *Parameter1 = + readb(ControllerBaseAddress + DAC960_PG_CommandIdentifierRegisterOffset); + writeb(0, ControllerBaseAddress + DAC960_PG_ErrorStatusRegisterOffset); + return true; +} + +/* + Define the DAC960 PD Series Controller Interface Register Offsets. +*/ + +#define DAC960_PD_RegisterWindowSize 0x80 + +typedef enum +{ + DAC960_PD_CommandOpcodeRegisterOffset = 0x00, + DAC960_PD_CommandIdentifierRegisterOffset = 0x01, + DAC960_PD_MailboxRegister2Offset = 0x02, + DAC960_PD_MailboxRegister3Offset = 0x03, + DAC960_PD_MailboxRegister4Offset = 0x04, + DAC960_PD_MailboxRegister5Offset = 0x05, + DAC960_PD_MailboxRegister6Offset = 0x06, + DAC960_PD_MailboxRegister7Offset = 0x07, + DAC960_PD_MailboxRegister8Offset = 0x08, + DAC960_PD_MailboxRegister9Offset = 0x09, + DAC960_PD_MailboxRegister10Offset = 0x0A, + DAC960_PD_MailboxRegister11Offset = 0x0B, + DAC960_PD_MailboxRegister12Offset = 0x0C, + DAC960_PD_StatusCommandIdentifierRegOffset = 0x0D, + DAC960_PD_StatusRegisterOffset = 0x0E, + DAC960_PD_ErrorStatusRegisterOffset = 0x3F, + DAC960_PD_InboundDoorBellRegisterOffset = 0x40, + DAC960_PD_OutboundDoorBellRegisterOffset = 0x41, + DAC960_PD_InterruptEnableRegisterOffset = 0x43 +} +DAC960_PD_RegisterOffsets_T; + + +/* + Define the structure of the DAC960 PD Series Inbound Door Bell Register. +*/ + +typedef union DAC960_PD_InboundDoorBellRegister +{ + unsigned char All; + struct { + bool NewCommand:1; /* Bit 0 */ + bool AcknowledgeStatus:1; /* Bit 1 */ + bool GenerateInterrupt:1; /* Bit 2 */ + bool ControllerReset:1; /* Bit 3 */ + unsigned char :4; /* Bits 4-7 */ + } Write; + struct { + bool MailboxFull:1; /* Bit 0 */ + bool InitializationInProgress:1; /* Bit 1 */ + unsigned char :6; /* Bits 2-7 */ + } Read; +} +DAC960_PD_InboundDoorBellRegister_T; + + +/* + Define the structure of the DAC960 PD Series Outbound Door Bell Register. +*/ + +typedef union DAC960_PD_OutboundDoorBellRegister +{ + unsigned char All; + struct { + bool AcknowledgeInterrupt:1; /* Bit 0 */ + unsigned char :7; /* Bits 1-7 */ + } Write; + struct { + bool StatusAvailable:1; /* Bit 0 */ + unsigned char :7; /* Bits 1-7 */ + } Read; +} +DAC960_PD_OutboundDoorBellRegister_T; + + +/* + Define the structure of the DAC960 PD Series Interrupt Enable Register. +*/ + +typedef union DAC960_PD_InterruptEnableRegister +{ + unsigned char All; + struct { + bool EnableInterrupts:1; /* Bit 0 */ + unsigned char :7; /* Bits 1-7 */ + } Bits; +} +DAC960_PD_InterruptEnableRegister_T; + + +/* + Define the structure of the DAC960 PD Series Error Status Register. +*/ + +typedef union DAC960_PD_ErrorStatusRegister +{ + unsigned char All; + struct { + unsigned int :2; /* Bits 0-1 */ + bool ErrorStatusPending:1; /* Bit 2 */ + unsigned int :5; /* Bits 3-7 */ + } Bits; +} +DAC960_PD_ErrorStatusRegister_T; + + +/* + Define inline functions to provide an abstraction for reading and writing the + DAC960 PD Series Controller Interface Registers. +*/ + +static inline +void DAC960_PD_NewCommand(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.NewCommand = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_PD_AcknowledgeStatus(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.AcknowledgeStatus = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_PD_GenerateInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.GenerateInterrupt = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset); +} + +static inline +void DAC960_PD_ControllerReset(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = 0; + InboundDoorBellRegister.Write.ControllerReset = true; + writeb(InboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset); +} + +static inline +bool DAC960_PD_MailboxFullP(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset); + return InboundDoorBellRegister.Read.MailboxFull; +} + +static inline +bool DAC960_PD_InitializationInProgressP(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister; + InboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset); + return InboundDoorBellRegister.Read.InitializationInProgress; +} + +static inline +void DAC960_PD_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = 0; + OutboundDoorBellRegister.Write.AcknowledgeInterrupt = true; + writeb(OutboundDoorBellRegister.All, + ControllerBaseAddress + DAC960_PD_OutboundDoorBellRegisterOffset); +} + +static inline +bool DAC960_PD_StatusAvailableP(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister; + OutboundDoorBellRegister.All = + readb(ControllerBaseAddress + DAC960_PD_OutboundDoorBellRegisterOffset); + return OutboundDoorBellRegister.Read.StatusAvailable; +} + +static inline +void DAC960_PD_EnableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister; + InterruptEnableRegister.All = 0; + InterruptEnableRegister.Bits.EnableInterrupts = true; + writeb(InterruptEnableRegister.All, + ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset); +} + +static inline +void DAC960_PD_DisableInterrupts(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister; + InterruptEnableRegister.All = 0; + InterruptEnableRegister.Bits.EnableInterrupts = false; + writeb(InterruptEnableRegister.All, + ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset); +} + +static inline +bool DAC960_PD_InterruptsEnabledP(void __iomem *ControllerBaseAddress) +{ + DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister; + InterruptEnableRegister.All = + readb(ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset); + return InterruptEnableRegister.Bits.EnableInterrupts; +} + +static inline +void DAC960_PD_WriteCommandMailbox(void __iomem *ControllerBaseAddress, + DAC960_V1_CommandMailbox_T *CommandMailbox) +{ + writel(CommandMailbox->Words[0], + ControllerBaseAddress + DAC960_PD_CommandOpcodeRegisterOffset); + writel(CommandMailbox->Words[1], + ControllerBaseAddress + DAC960_PD_MailboxRegister4Offset); + writel(CommandMailbox->Words[2], + ControllerBaseAddress + DAC960_PD_MailboxRegister8Offset); + writeb(CommandMailbox->Bytes[12], + ControllerBaseAddress + DAC960_PD_MailboxRegister12Offset); +} + +static inline DAC960_V1_CommandIdentifier_T +DAC960_PD_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress) +{ + return readb(ControllerBaseAddress + + DAC960_PD_StatusCommandIdentifierRegOffset); +} + +static inline DAC960_V1_CommandStatus_T +DAC960_PD_ReadStatusRegister(void __iomem *ControllerBaseAddress) +{ + return readw(ControllerBaseAddress + DAC960_PD_StatusRegisterOffset); +} + +static inline bool +DAC960_PD_ReadErrorStatus(void __iomem *ControllerBaseAddress, + unsigned char *ErrorStatus, + unsigned char *Parameter0, + unsigned char *Parameter1) +{ + DAC960_PD_ErrorStatusRegister_T ErrorStatusRegister; + ErrorStatusRegister.All = + readb(ControllerBaseAddress + DAC960_PD_ErrorStatusRegisterOffset); + if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false; + ErrorStatusRegister.Bits.ErrorStatusPending = false; + *ErrorStatus = ErrorStatusRegister.All; + *Parameter0 = + readb(ControllerBaseAddress + DAC960_PD_CommandOpcodeRegisterOffset); + *Parameter1 = + readb(ControllerBaseAddress + DAC960_PD_CommandIdentifierRegisterOffset); + writeb(0, ControllerBaseAddress + DAC960_PD_ErrorStatusRegisterOffset); + return true; +} + +static inline void DAC960_P_To_PD_TranslateEnquiry(void *Enquiry) +{ + memcpy(Enquiry + 132, Enquiry + 36, 64); + memset(Enquiry + 36, 0, 96); +} + +static inline void DAC960_P_To_PD_TranslateDeviceState(void *DeviceState) +{ + memcpy(DeviceState + 2, DeviceState + 3, 1); + memmove(DeviceState + 4, DeviceState + 5, 2); + memmove(DeviceState + 6, DeviceState + 8, 4); +} + +static inline +void DAC960_PD_To_P_TranslateReadWriteCommand(DAC960_V1_CommandMailbox_T + *CommandMailbox) +{ + int LogicalDriveNumber = CommandMailbox->Type5.LD.LogicalDriveNumber; + CommandMailbox->Bytes[3] &= 0x7; + CommandMailbox->Bytes[3] |= CommandMailbox->Bytes[7] << 6; + CommandMailbox->Bytes[7] = LogicalDriveNumber; +} + +static inline +void DAC960_P_To_PD_TranslateReadWriteCommand(DAC960_V1_CommandMailbox_T + *CommandMailbox) +{ + int LogicalDriveNumber = CommandMailbox->Bytes[7]; + CommandMailbox->Bytes[7] = CommandMailbox->Bytes[3] >> 6; + CommandMailbox->Bytes[3] &= 0x7; + CommandMailbox->Bytes[3] |= LogicalDriveNumber << 3; +} + + +/* + Define prototypes for the forward referenced DAC960 Driver Internal Functions. +*/ + +static void DAC960_FinalizeController(DAC960_Controller_T *); +static void DAC960_V1_QueueReadWriteCommand(DAC960_Command_T *); +static void DAC960_V2_QueueReadWriteCommand(DAC960_Command_T *); +static void DAC960_RequestFunction(struct request_queue *); +static irqreturn_t DAC960_BA_InterruptHandler(int, void *); +static irqreturn_t DAC960_LP_InterruptHandler(int, void *); +static irqreturn_t DAC960_LA_InterruptHandler(int, void *); +static irqreturn_t DAC960_PG_InterruptHandler(int, void *); +static irqreturn_t DAC960_PD_InterruptHandler(int, void *); +static irqreturn_t DAC960_P_InterruptHandler(int, void *); +static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *); +static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *); +static void DAC960_MonitoringTimerFunction(unsigned long); +static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *, + DAC960_Controller_T *, ...); +static void DAC960_CreateProcEntries(DAC960_Controller_T *); +static void DAC960_DestroyProcEntries(DAC960_Controller_T *); + +#endif /* DAC960_DriverVersion */ diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig new file mode 100644 index 0000000..7955944 --- /dev/null +++ b/drivers/block/Kconfig @@ -0,0 +1,474 @@ +# +# Block device driver configuration +# + +menuconfig BLK_DEV + bool "Block devices" + depends on BLOCK + default y + ---help--- + Say Y here to get to see options for various different block device + drivers. This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled; + only do this if you know what you are doing. + +if BLK_DEV + +config BLK_DEV_FD + tristate "Normal floppy disk support" + depends on ARCH_MAY_HAVE_PC_FDC + ---help--- + If you want to use the floppy disk drive(s) of your PC under Linux, + say Y. Information about this driver, especially important for IBM + Thinkpad users, is contained in + <file:Documentation/blockdev/floppy.txt>. + That file also contains the location of the Floppy driver FAQ as + well as location of the fdutils package used to configure additional + parameters of the driver at run time. + + To compile this driver as a module, choose M here: the + module will be called floppy. + +config AMIGA_FLOPPY + tristate "Amiga floppy support" + depends on AMIGA + +config ATARI_FLOPPY + tristate "Atari floppy support" + depends on ATARI + +config MAC_FLOPPY + tristate "Support for PowerMac floppy" + depends on PPC_PMAC && !PPC_PMAC64 + help + If you have a SWIM-3 (Super Woz Integrated Machine 3; from Apple) + floppy controller, say Y here. Most commonly found in PowerMacs. + +config AMIGA_Z2RAM + tristate "Amiga Zorro II ramdisk support" + depends on ZORRO + help + This enables support for using Chip RAM and Zorro II RAM as a + ramdisk or as a swap partition. Say Y if you want to include this + driver in the kernel. + + To compile this driver as a module, choose M here: the + module will be called z2ram. + +config BLK_DEV_XD + tristate "XT hard disk support" + depends on ISA && ISA_DMA_API + select CHECK_SIGNATURE + help + Very old 8 bit hard disk controllers used in the IBM XT computer + will be supported if you say Y here. + + To compile this driver as a module, choose M here: the + module will be called xd. + + It's pretty unlikely that you have one of these: say N. + +config PARIDE + tristate "Parallel port IDE device support" + depends on PARPORT_PC + ---help--- + There are many external CD-ROM and disk devices that connect through + your computer's parallel port. Most of them are actually IDE devices + using a parallel port IDE adapter. This option enables the PARIDE + subsystem which contains drivers for many of these external drives. + Read <file:Documentation/blockdev/paride.txt> for more information. + + If you have said Y to the "Parallel-port support" configuration + option, you may share a single port between your printer and other + parallel port devices. Answer Y to build PARIDE support into your + kernel, or M if you would like to build it as a loadable module. If + your parallel port support is in a loadable module, you must build + PARIDE as a module. If you built PARIDE support into your kernel, + you may still build the individual protocol modules and high-level + drivers as loadable modules. If you build this support as a module, + it will be called paride. + + To use the PARIDE support, you must say Y or M here and also to at + least one high-level driver (e.g. "Parallel port IDE disks", + "Parallel port ATAPI CD-ROMs", "Parallel port ATAPI disks" etc.) and + to at least one protocol driver (e.g. "ATEN EH-100 protocol", + "MicroSolutions backpack protocol", "DataStor Commuter protocol" + etc.). + +config GDROM + tristate "SEGA Dreamcast GD-ROM drive" + depends on SH_DREAMCAST + help + A standard SEGA Dreamcast comes with a modified CD ROM drive called a + "GD-ROM" by SEGA to signify it is capable of reading special disks + with up to 1 GB of data. This drive will also read standard CD ROM + disks. Select this option to access any disks in your GD ROM drive. + Most users will want to say "Y" here. + You can also build this as a module which will be called gdrom.ko + +source "drivers/block/paride/Kconfig" + +config BLK_CPQ_DA + tristate "Compaq SMART2 support" + depends on PCI && VIRT_TO_BUS + help + This is the driver for Compaq Smart Array controllers. Everyone + using these boards should say Y here. See the file + <file:Documentation/blockdev/cpqarray.txt> for the current list of + boards supported by this driver, and for further information on the + use of this driver. + +config BLK_CPQ_CISS_DA + tristate "Compaq Smart Array 5xxx support" + depends on PCI + help + This is the driver for Compaq Smart Array 5xxx controllers. + Everyone using these boards should say Y here. + See <file:Documentation/blockdev/cciss.txt> for the current list of + boards supported by this driver, and for further information + on the use of this driver. + +config CISS_SCSI_TAPE + bool "SCSI tape drive support for Smart Array 5xxx" + depends on BLK_CPQ_CISS_DA && PROC_FS + depends on SCSI=y || SCSI=BLK_CPQ_CISS_DA + help + When enabled (Y), this option allows SCSI tape drives and SCSI medium + changers (tape robots) to be accessed via a Compaq 5xxx array + controller. (See <file:Documentation/blockdev/cciss.txt> for more details.) + + "SCSI support" and "SCSI tape support" must also be enabled for this + option to work. + + When this option is disabled (N), the SCSI portion of the driver + is not compiled. + +config BLK_DEV_DAC960 + tristate "Mylex DAC960/DAC1100 PCI RAID Controller support" + depends on PCI + help + This driver adds support for the Mylex DAC960, AcceleRAID, and + eXtremeRAID PCI RAID controllers. See the file + <file:Documentation/blockdev/README.DAC960> for further information + about this driver. + + To compile this driver as a module, choose M here: the + module will be called DAC960. + +config BLK_DEV_UMEM + tristate "Micro Memory MM5415 Battery Backed RAM support (EXPERIMENTAL)" + depends on PCI && EXPERIMENTAL + ---help--- + Saying Y here will include support for the MM5415 family of + battery backed (Non-volatile) RAM cards. + <http://www.umem.com/> + + The cards appear as block devices that can be partitioned into + as many as 15 partitions. + + To compile this driver as a module, choose M here: the + module will be called umem. + + The umem driver has not yet been allocated a MAJOR number, so + one is chosen dynamically. + +config BLK_DEV_UBD + bool "Virtual block device" + depends on UML + ---help--- + The User-Mode Linux port includes a driver called UBD which will let + you access arbitrary files on the host computer as block devices. + Unless you know that you do not need such virtual block devices say + Y here. + +config BLK_DEV_UBD_SYNC + bool "Always do synchronous disk IO for UBD" + depends on BLK_DEV_UBD + ---help--- + Writes to the virtual block device are not immediately written to the + host's disk; this may cause problems if, for example, the User-Mode + Linux 'Virtual Machine' uses a journalling filesystem and the host + computer crashes. + + Synchronous operation (i.e. always writing data to the host's disk + immediately) is configurable on a per-UBD basis by using a special + kernel command line option. Alternatively, you can say Y here to + turn on synchronous operation by default for all block devices. + + If you're running a journalling file system (like reiserfs, for + example) in your virtual machine, you will want to say Y here. If + you care for the safety of the data in your virtual machine, Y is a + wise choice too. In all other cases (for example, if you're just + playing around with User-Mode Linux) you can choose N. + +config BLK_DEV_COW_COMMON + bool + default BLK_DEV_UBD + +config BLK_DEV_LOOP + tristate "Loopback device support" + ---help--- + Saying Y here will allow you to use a regular file as a block + device; you can then create a file system on that block device and + mount it just as you would mount other block devices such as hard + drive partitions, CD-ROM drives or floppy drives. The loop devices + are block special device files with major number 7 and typically + called /dev/loop0, /dev/loop1 etc. + + This is useful if you want to check an ISO 9660 file system before + burning the CD, or if you want to use floppy images without first + writing them to floppy. Furthermore, some Linux distributions avoid + the need for a dedicated Linux partition by keeping their complete + root file system inside a DOS FAT file using this loop device + driver. + + To use the loop device, you need the losetup utility, found in the + util-linux package, see + <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>. + + The loop device driver can also be used to "hide" a file system in + a disk partition, floppy, or regular file, either using encryption + (scrambling the data) or steganography (hiding the data in the low + bits of, say, a sound file). This is also safe if the file resides + on a remote file server. + + There are several ways of encrypting disks. Some of these require + kernel patches. The vanilla kernel offers the cryptoloop option + and a Device Mapper target (which is superior, as it supports all + file systems). If you want to use the cryptoloop, say Y to both + LOOP and CRYPTOLOOP, and make sure you have a recent (version 2.12 + or later) version of util-linux. Additionally, be aware that + the cryptoloop is not safe for storing journaled filesystems. + + Note that this loop device has nothing to do with the loopback + device used for network connections from the machine to itself. + + To compile this driver as a module, choose M here: the + module will be called loop. + + Most users will answer N here. + +config BLK_DEV_CRYPTOLOOP + tristate "Cryptoloop Support" + select CRYPTO + select CRYPTO_CBC + depends on BLK_DEV_LOOP + ---help--- + Say Y here if you want to be able to use the ciphers that are + provided by the CryptoAPI as loop transformation. This might be + used as hard disk encryption. + + WARNING: This device is not safe for journaled file systems like + ext3 or Reiserfs. Please use the Device Mapper crypto module + instead, which can be configured to be on-disk compatible with the + cryptoloop device. + +config BLK_DEV_NBD + tristate "Network block device support" + depends on NET + ---help--- + Saying Y here will allow your computer to be a client for network + block devices, i.e. it will be able to use block devices exported by + servers (mount file systems on them etc.). Communication between + client and server works over TCP/IP networking, but to the client + program this is hidden: it looks like a regular local file access to + a block device special file such as /dev/nd0. + + Network block devices also allows you to run a block-device in + userland (making server and client physically the same computer, + communicating using the loopback network device). + + Read <file:Documentation/blockdev/nbd.txt> for more information, + especially about where to find the server code, which runs in user + space and does not need special kernel support. + + Note that this has nothing to do with the network file systems NFS + or Coda; you can say N here even if you intend to use NFS or Coda. + + To compile this driver as a module, choose M here: the + module will be called nbd. + + If unsure, say N. + +config BLK_DEV_SX8 + tristate "Promise SATA SX8 support" + depends on PCI + ---help--- + Saying Y or M here will enable support for the + Promise SATA SX8 controllers. + + Use devices /dev/sx8/$N and /dev/sx8/$Np$M. + +config BLK_DEV_UB + tristate "Low Performance USB Block driver" + depends on USB + help + This driver supports certain USB attached storage devices + such as flash keys. + + If you enable this driver, it is recommended to avoid conflicts + with usb-storage by enabling USB_LIBUSUAL. + + If unsure, say N. + +config BLK_DEV_RAM + tristate "RAM block device support" + ---help--- + Saying Y here will allow you to use a portion of your RAM memory as + a block device, so that you can make file systems on it, read and + write to it and do all the other things that you can do with normal + block devices (such as hard drives). It is usually used to load and + store a copy of a minimal root file system off of a floppy into RAM + during the initial install of Linux. + + Note that the kernel command line option "ramdisk=XX" is now obsolete. + For details, read <file:Documentation/blockdev/ramdisk.txt>. + + To compile this driver as a module, choose M here: the + module will be called rd. + + Most normal users won't need the RAM disk functionality, and can + thus say N here. + +config BLK_DEV_RAM_COUNT + int "Default number of RAM disks" + default "16" + depends on BLK_DEV_RAM + help + The default value is 16 RAM disks. Change this if you know what you + are doing. If you boot from a filesystem that needs to be extracted + in memory, you will need at least one RAM disk (e.g. root on cramfs). + +config BLK_DEV_RAM_SIZE + int "Default RAM disk size (kbytes)" + depends on BLK_DEV_RAM + default "4096" + help + The default value is 4096 kilobytes. Only change this if you know + what you are doing. + +config BLK_DEV_XIP + bool "Support XIP filesystems on RAM block device" + depends on BLK_DEV_RAM + default n + help + Support XIP filesystems (such as ext2 with XIP support on) on + top of block ram device. This will slightly enlarge the kernel, and + will prevent RAM block device backing store memory from being + allocated from highmem (only a problem for highmem systems). + +config RD_BZIP2 + bool "Initial ramdisk compressed using bzip2" + default n + depends on BLK_DEV_INITRD=y + help + Support loading of a bzip2 encoded initial ramdisk or cpio buffer + If unsure, say N. + +config RD_LZMA + bool "Initial ramdisk compressed using lzma" + default n + depends on BLK_DEV_INITRD=y + help + Support loading of a lzma encoded initial ramdisk or cpio buffer + If unsure, say N. + +config RD_GZIP + bool "Initial ramdisk compressed using gzip" + default y + depends on BLK_DEV_INITRD=y + select ZLIB_INFLATE + help + Support loading of a gzip encoded initial ramdisk or cpio buffer. + If unsure, say Y. + +config CDROM_PKTCDVD + tristate "Packet writing on CD/DVD media" + depends on !UML + help + If you have a CDROM/DVD drive that supports packet writing, say + Y to include support. It should work with any MMC/Mt Fuji + compliant ATAPI or SCSI drive, which is just about any newer + DVD/CD writer. + + Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs + is possible. + DVD-RW disks must be in restricted overwrite mode. + + See the file <file:Documentation/cdrom/packet-writing.txt> + for further information on the use of this driver. + + To compile this driver as a module, choose M here: the + module will be called pktcdvd. + +config CDROM_PKTCDVD_BUFFERS + int "Free buffers for data gathering" + depends on CDROM_PKTCDVD + default "8" + help + This controls the maximum number of active concurrent packets. More + concurrent packets can increase write performance, but also require + more memory. Each concurrent packet will require approximately 64Kb + of non-swappable kernel memory, memory which will be allocated when + a disc is opened for writing. + +config CDROM_PKTCDVD_WCACHE + bool "Enable write caching (EXPERIMENTAL)" + depends on CDROM_PKTCDVD && EXPERIMENTAL + help + If enabled, write caching will be set for the CD-R/W device. For now + this option is dangerous unless the CD-RW media is known good, as we + don't do deferred write error handling yet. + +config ATA_OVER_ETH + tristate "ATA over Ethernet support" + depends on NET + help + This driver provides Support for ATA over Ethernet block + devices like the Coraid EtherDrive (R) Storage Blade. + +config SUNVDC + tristate "Sun Virtual Disk Client support" + depends on SUN_LDOMS + help + Support for virtual disk devices as a client under Sun + Logical Domains. + +source "drivers/s390/block/Kconfig" + +config XILINX_SYSACE + tristate "Xilinx SystemACE support" + depends on 4xx + help + Include support for the Xilinx SystemACE CompactFlash interface + +config XEN_BLKDEV_FRONTEND + tristate "Xen virtual block device support" + depends on XEN + default y + help + This driver implements the front-end of the Xen virtual + block device driver. It communicates with a back-end driver + in another domain which drives the actual block device. + +config VIRTIO_BLK + tristate "Virtio block driver (EXPERIMENTAL)" + depends on EXPERIMENTAL && VIRTIO + ---help--- + This is the virtual block driver for virtio. It can be used with + lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. + +config BLK_DEV_HD + bool "Very old hard disk (MFM/RLL/IDE) driver" + depends on HAVE_IDE + depends on !ARM || ARCH_RPC || ARCH_SHARK || BROKEN + help + This is a very old hard disk driver that lacks the enhanced + functionality of the newer ones. + + It is required for systems with ancient MFM/RLL/ESDI drives. + + If unsure, say N. + +endif # BLK_DEV diff --git a/drivers/block/Makefile b/drivers/block/Makefile new file mode 100644 index 0000000..204332b --- /dev/null +++ b/drivers/block/Makefile @@ -0,0 +1,34 @@ +# +# Makefile for the kernel block device drivers. +# +# 12 June 2000, Christoph Hellwig <hch@infradead.org> +# Rewritten to use lists instead of if-statements. +# + +obj-$(CONFIG_MAC_FLOPPY) += swim3.o +obj-$(CONFIG_BLK_DEV_FD) += floppy.o +obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o +obj-$(CONFIG_PS3_DISK) += ps3disk.o +obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o +obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o +obj-$(CONFIG_BLK_DEV_RAM) += brd.o +obj-$(CONFIG_BLK_DEV_LOOP) += loop.o +obj-$(CONFIG_BLK_DEV_XD) += xd.o +obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o +obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o +obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o +obj-$(CONFIG_XILINX_SYSACE) += xsysace.o +obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o +obj-$(CONFIG_SUNVDC) += sunvdc.o + +obj-$(CONFIG_BLK_DEV_UMEM) += umem.o +obj-$(CONFIG_BLK_DEV_NBD) += nbd.o +obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o +obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o + +obj-$(CONFIG_VIODASD) += viodasd.o +obj-$(CONFIG_BLK_DEV_SX8) += sx8.o +obj-$(CONFIG_BLK_DEV_UB) += ub.o +obj-$(CONFIG_BLK_DEV_HD) += hd.o + +obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c new file mode 100644 index 0000000..4b1d4ac --- /dev/null +++ b/drivers/block/amiflop.c @@ -0,0 +1,1857 @@ +/* + * linux/amiga/amiflop.c + * + * Copyright (C) 1993 Greg Harp + * Portions of this driver are based on code contributed by Brad Pepers + * + * revised 28.5.95 by Joerg Dorchain + * - now no bugs(?) any more for both HD & DD + * - added support for 40 Track 5.25" drives, 80-track hopefully behaves + * like 3.5" dd (no way to test - are there any 5.25" drives out there + * that work on an A4000?) + * - wrote formatting routine (maybe dirty, but works) + * + * june/july 1995 added ms-dos support by Joerg Dorchain + * (portions based on messydos.device and various contributors) + * - currently only 9 and 18 sector disks + * + * - fixed a bug with the internal trackbuffer when using multiple + * disks the same time + * - made formatting a bit safer + * - added command line and machine based default for "silent" df0 + * + * december 1995 adapted for 1.2.13pl4 by Joerg Dorchain + * - works but I think it's inefficient. (look in redo_fd_request) + * But the changes were very efficient. (only three and a half lines) + * + * january 1996 added special ioctl for tracking down read/write problems + * - usage ioctl(d, RAW_TRACK, ptr); the raw track buffer (MFM-encoded data + * is copied to area. (area should be large enough since no checking is + * done - 30K is currently sufficient). return the actual size of the + * trackbuffer + * - replaced udelays() by a timer (CIAA timer B) for the waits + * needed for the disk mechanic. + * + * february 1996 fixed error recovery and multiple disk access + * - both got broken the first time I tampered with the driver :-( + * - still not safe, but better than before + * + * revised Marts 3rd, 1996 by Jes Sorensen for use in the 1.3.28 kernel. + * - Minor changes to accept the kdev_t. + * - Replaced some more udelays with ms_delays. Udelay is just a loop, + * and so the delay will be different depending on the given + * processor :-( + * - The driver could use a major cleanup because of the new + * major/minor handling that came with kdev_t. It seems to work for + * the time being, but I can't guarantee that it will stay like + * that when we start using 16 (24?) bit minors. + * + * restructured jan 1997 by Joerg Dorchain + * - Fixed Bug accessing multiple disks + * - some code cleanup + * - added trackbuffer for each drive to speed things up + * - fixed some race conditions (who finds the next may send it to me ;-) + */ + +#include <linux/module.h> + +#include <linux/fd.h> +#include <linux/hdreg.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/amifdreg.h> +#include <linux/amifd.h> +#include <linux/buffer_head.h> +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/interrupt.h> + +#include <asm/setup.h> +#include <asm/uaccess.h> +#include <asm/amigahw.h> +#include <asm/amigaints.h> +#include <asm/irq.h> + +#undef DEBUG /* print _LOTS_ of infos */ + +#define RAW_IOCTL +#ifdef RAW_IOCTL +#define IOCTL_RAW_TRACK 0x5254524B /* 'RTRK' */ +#endif + +/* + * Defines + */ + +/* + * Error codes + */ +#define FD_OK 0 /* operation succeeded */ +#define FD_ERROR -1 /* general error (seek, read, write, etc) */ +#define FD_NOUNIT 1 /* unit does not exist */ +#define FD_UNITBUSY 2 /* unit already active */ +#define FD_NOTACTIVE 3 /* unit is not active */ +#define FD_NOTREADY 4 /* unit is not ready (motor not on/no disk) */ + +#define MFM_NOSYNC 1 +#define MFM_HEADER 2 +#define MFM_DATA 3 +#define MFM_TRACK 4 + +/* + * Floppy ID values + */ +#define FD_NODRIVE 0x00000000 /* response when no unit is present */ +#define FD_DD_3 0xffffffff /* double-density 3.5" (880K) drive */ +#define FD_HD_3 0x55555555 /* high-density 3.5" (1760K) drive */ +#define FD_DD_5 0xaaaaaaaa /* double-density 5.25" (440K) drive */ + +static unsigned long int fd_def_df0 = FD_DD_3; /* default for df0 if it doesn't identify */ + +module_param(fd_def_df0, ulong, 0); +MODULE_LICENSE("GPL"); + +static struct request_queue *floppy_queue; +#define QUEUE (floppy_queue) +#define CURRENT elv_next_request(floppy_queue) + +/* + * Macros + */ +#define MOTOR_ON (ciab.prb &= ~DSKMOTOR) +#define MOTOR_OFF (ciab.prb |= DSKMOTOR) +#define SELECT(mask) (ciab.prb &= ~mask) +#define DESELECT(mask) (ciab.prb |= mask) +#define SELMASK(drive) (1 << (3 + (drive & 3))) + +static struct fd_drive_type drive_types[] = { +/* code name tr he rdsz wrsz sm pc1 pc2 sd st st*/ +/* warning: times are now in milliseconds (ms) */ +{ FD_DD_3, "DD 3.5", 80, 2, 14716, 13630, 1, 80,161, 3, 18, 1}, +{ FD_HD_3, "HD 3.5", 80, 2, 28344, 27258, 2, 80,161, 3, 18, 1}, +{ FD_DD_5, "DD 5.25", 40, 2, 14716, 13630, 1, 40, 81, 6, 30, 2}, +{ FD_NODRIVE, "No Drive", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} +}; +static int num_dr_types = ARRAY_SIZE(drive_types); + +static int amiga_read(int), dos_read(int); +static void amiga_write(int), dos_write(int); +static struct fd_data_type data_types[] = { + { "Amiga", 11 , amiga_read, amiga_write}, + { "MS-Dos", 9, dos_read, dos_write} +}; + +/* current info on each unit */ +static struct amiga_floppy_struct unit[FD_MAX_UNITS]; + +static struct timer_list flush_track_timer[FD_MAX_UNITS]; +static struct timer_list post_write_timer; +static struct timer_list motor_on_timer; +static struct timer_list motor_off_timer[FD_MAX_UNITS]; +static int on_attempts; + +/* Synchronization of FDC access */ +/* request loop (trackbuffer) */ +static volatile int fdc_busy = -1; +static volatile int fdc_nested; +static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); + +static DECLARE_WAIT_QUEUE_HEAD(motor_wait); + +static volatile int selected = -1; /* currently selected drive */ + +static int writepending; +static int writefromint; +static char *raw_buf; + +static DEFINE_SPINLOCK(amiflop_lock); + +#define RAW_BUF_SIZE 30000 /* size of raw disk data */ + +/* + * These are global variables, as that's the easiest way to give + * information to interrupts. They are the data used for the current + * request. + */ +static volatile char block_flag; +static DECLARE_WAIT_QUEUE_HEAD(wait_fd_block); + +/* MS-Dos MFM Coding tables (should go quick and easy) */ +static unsigned char mfmencode[16]={ + 0x2a, 0x29, 0x24, 0x25, 0x12, 0x11, 0x14, 0x15, + 0x4a, 0x49, 0x44, 0x45, 0x52, 0x51, 0x54, 0x55 +}; +static unsigned char mfmdecode[128]; + +/* floppy internal millisecond timer stuff */ +static volatile int ms_busy = -1; +static DECLARE_WAIT_QUEUE_HEAD(ms_wait); +#define MS_TICKS ((amiga_eclock+50)/1000) + +/* + * Note that MAX_ERRORS=X doesn't imply that we retry every bad read + * max X times - some types of errors increase the errorcount by 2 or + * even 3, so we might actually retry only X/2 times before giving up. + */ +#define MAX_ERRORS 12 + +#define custom amiga_custom + +/* Prevent "aliased" accesses. */ +static int fd_ref[4] = { 0,0,0,0 }; +static int fd_device[4] = { 0, 0, 0, 0 }; + +/* + * Here come the actual hardware access and helper functions. + * They are not reentrant and single threaded because all drives + * share the same hardware and the same trackbuffer. + */ + +/* Milliseconds timer */ + +static irqreturn_t ms_isr(int irq, void *dummy) +{ + ms_busy = -1; + wake_up(&ms_wait); + return IRQ_HANDLED; +} + +/* all waits are queued up + A more generic routine would do a schedule a la timer.device */ +static void ms_delay(int ms) +{ + unsigned long flags; + int ticks; + if (ms > 0) { + local_irq_save(flags); + while (ms_busy == 0) + sleep_on(&ms_wait); + ms_busy = 0; + local_irq_restore(flags); + ticks = MS_TICKS*ms-1; + ciaa.tblo=ticks%256; + ciaa.tbhi=ticks/256; + ciaa.crb=0x19; /*count eclock, force load, one-shoot, start */ + sleep_on(&ms_wait); + } +} + +/* Hardware semaphore */ + +/* returns true when we would get the semaphore */ +static inline int try_fdc(int drive) +{ + drive &= 3; + return ((fdc_busy < 0) || (fdc_busy == drive)); +} + +static void get_fdc(int drive) +{ + unsigned long flags; + + drive &= 3; +#ifdef DEBUG + printk("get_fdc: drive %d fdc_busy %d fdc_nested %d\n",drive,fdc_busy,fdc_nested); +#endif + local_irq_save(flags); + while (!try_fdc(drive)) + sleep_on(&fdc_wait); + fdc_busy = drive; + fdc_nested++; + local_irq_restore(flags); +} + +static inline void rel_fdc(void) +{ +#ifdef DEBUG + if (fdc_nested == 0) + printk("fd: unmatched rel_fdc\n"); + printk("rel_fdc: fdc_busy %d fdc_nested %d\n",fdc_busy,fdc_nested); +#endif + fdc_nested--; + if (fdc_nested == 0) { + fdc_busy = -1; + wake_up(&fdc_wait); + } +} + +static void fd_select (int drive) +{ + unsigned char prb = ~0; + + drive&=3; +#ifdef DEBUG + printk("selecting %d\n",drive); +#endif + if (drive == selected) + return; + get_fdc(drive); + selected = drive; + + if (unit[drive].track % 2 != 0) + prb &= ~DSKSIDE; + if (unit[drive].motor == 1) + prb &= ~DSKMOTOR; + ciab.prb |= (SELMASK(0)|SELMASK(1)|SELMASK(2)|SELMASK(3)); + ciab.prb = prb; + prb &= ~SELMASK(drive); + ciab.prb = prb; + rel_fdc(); +} + +static void fd_deselect (int drive) +{ + unsigned char prb; + unsigned long flags; + + drive&=3; +#ifdef DEBUG + printk("deselecting %d\n",drive); +#endif + if (drive != selected) { + printk(KERN_WARNING "Deselecting drive %d while %d was selected!\n",drive,selected); + return; + } + + get_fdc(drive); + local_irq_save(flags); + + selected = -1; + + prb = ciab.prb; + prb |= (SELMASK(0)|SELMASK(1)|SELMASK(2)|SELMASK(3)); + ciab.prb = prb; + + local_irq_restore (flags); + rel_fdc(); + +} + +static void motor_on_callback(unsigned long nr) +{ + if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) { + wake_up (&motor_wait); + } else { + motor_on_timer.expires = jiffies + HZ/10; + add_timer(&motor_on_timer); + } +} + +static int fd_motor_on(int nr) +{ + nr &= 3; + + del_timer(motor_off_timer + nr); + + if (!unit[nr].motor) { + unit[nr].motor = 1; + fd_select(nr); + + motor_on_timer.data = nr; + mod_timer(&motor_on_timer, jiffies + HZ/2); + + on_attempts = 10; + sleep_on (&motor_wait); + fd_deselect(nr); + } + + if (on_attempts == 0) { + on_attempts = -1; +#if 0 + printk (KERN_ERR "motor_on failed, turning motor off\n"); + fd_motor_off (nr); + return 0; +#else + printk (KERN_WARNING "DSKRDY not set after 1.5 seconds - assuming drive is spinning notwithstanding\n"); +#endif + } + + return 1; +} + +static void fd_motor_off(unsigned long drive) +{ + long calledfromint; +#ifdef MODULE + long decusecount; + + decusecount = drive & 0x40000000; +#endif + calledfromint = drive & 0x80000000; + drive&=3; + if (calledfromint && !try_fdc(drive)) { + /* We would be blocked in an interrupt, so try again later */ + motor_off_timer[drive].expires = jiffies + 1; + add_timer(motor_off_timer + drive); + return; + } + unit[drive].motor = 0; + fd_select(drive); + udelay (1); + fd_deselect(drive); +} + +static void floppy_off (unsigned int nr) +{ + int drive; + + drive = nr & 3; + /* called this way it is always from interrupt */ + motor_off_timer[drive].data = nr | 0x80000000; + mod_timer(motor_off_timer + drive, jiffies + 3*HZ); +} + +static int fd_calibrate(int drive) +{ + unsigned char prb; + int n; + + drive &= 3; + get_fdc(drive); + if (!fd_motor_on (drive)) + return 0; + fd_select (drive); + prb = ciab.prb; + prb |= DSKSIDE; + prb &= ~DSKDIREC; + ciab.prb = prb; + for (n = unit[drive].type->tracks/2; n != 0; --n) { + if (ciaa.pra & DSKTRACK0) + break; + prb &= ~DSKSTEP; + ciab.prb = prb; + prb |= DSKSTEP; + udelay (2); + ciab.prb = prb; + ms_delay(unit[drive].type->step_delay); + } + ms_delay (unit[drive].type->settle_time); + prb |= DSKDIREC; + n = unit[drive].type->tracks + 20; + for (;;) { + prb &= ~DSKSTEP; + ciab.prb = prb; + prb |= DSKSTEP; + udelay (2); + ciab.prb = prb; + ms_delay(unit[drive].type->step_delay + 1); + if ((ciaa.pra & DSKTRACK0) == 0) + break; + if (--n == 0) { + printk (KERN_ERR "fd%d: calibrate failed, turning motor off\n", drive); + fd_motor_off (drive); + unit[drive].track = -1; + rel_fdc(); + return 0; + } + } + unit[drive].track = 0; + ms_delay(unit[drive].type->settle_time); + + rel_fdc(); + fd_deselect(drive); + return 1; +} + +static int fd_seek(int drive, int track) +{ + unsigned char prb; + int cnt; + +#ifdef DEBUG + printk("seeking drive %d to track %d\n",drive,track); +#endif + drive &= 3; + get_fdc(drive); + if (unit[drive].track == track) { + rel_fdc(); + return 1; + } + if (!fd_motor_on(drive)) { + rel_fdc(); + return 0; + } + if (unit[drive].track < 0 && !fd_calibrate(drive)) { + rel_fdc(); + return 0; + } + + fd_select (drive); + cnt = unit[drive].track/2 - track/2; + prb = ciab.prb; + prb |= DSKSIDE | DSKDIREC; + if (track % 2 != 0) + prb &= ~DSKSIDE; + if (cnt < 0) { + cnt = - cnt; + prb &= ~DSKDIREC; + } + ciab.prb = prb; + if (track % 2 != unit[drive].track % 2) + ms_delay (unit[drive].type->side_time); + unit[drive].track = track; + if (cnt == 0) { + rel_fdc(); + fd_deselect(drive); + return 1; + } + do { + prb &= ~DSKSTEP; + ciab.prb = prb; + prb |= DSKSTEP; + udelay (1); + ciab.prb = prb; + ms_delay (unit[drive].type->step_delay); + } while (--cnt != 0); + ms_delay (unit[drive].type->settle_time); + + rel_fdc(); + fd_deselect(drive); + return 1; +} + +static unsigned long fd_get_drive_id(int drive) +{ + int i; + ulong id = 0; + + drive&=3; + get_fdc(drive); + /* set up for ID */ + MOTOR_ON; + udelay(2); + SELECT(SELMASK(drive)); + udelay(2); + DESELECT(SELMASK(drive)); + udelay(2); + MOTOR_OFF; + udelay(2); + SELECT(SELMASK(drive)); + udelay(2); + DESELECT(SELMASK(drive)); + udelay(2); + + /* loop and read disk ID */ + for (i=0; i<32; i++) { + SELECT(SELMASK(drive)); + udelay(2); + + /* read and store value of DSKRDY */ + id <<= 1; + id |= (ciaa.pra & DSKRDY) ? 0 : 1; /* cia regs are low-active! */ + + DESELECT(SELMASK(drive)); + } + + rel_fdc(); + + /* + * RB: At least A500/A2000's df0: don't identify themselves. + * As every (real) Amiga has at least a 3.5" DD drive as df0: + * we default to that if df0: doesn't identify as a certain + * type. + */ + if(drive == 0 && id == FD_NODRIVE) + { + id = fd_def_df0; + printk(KERN_NOTICE "fd: drive 0 didn't identify, setting default %08lx\n", (ulong)fd_def_df0); + } + /* return the ID value */ + return (id); +} + +static irqreturn_t fd_block_done(int irq, void *dummy) +{ + if (block_flag) + custom.dsklen = 0x4000; + + if (block_flag == 2) { /* writing */ + writepending = 2; + post_write_timer.expires = jiffies + 1; /* at least 2 ms */ + post_write_timer.data = selected; + add_timer(&post_write_timer); + } + else { /* reading */ + block_flag = 0; + wake_up (&wait_fd_block); + } + return IRQ_HANDLED; +} + +static void raw_read(int drive) +{ + drive&=3; + get_fdc(drive); + while (block_flag) + sleep_on(&wait_fd_block); + fd_select(drive); + /* setup adkcon bits correctly */ + custom.adkcon = ADK_MSBSYNC; + custom.adkcon = ADK_SETCLR|ADK_WORDSYNC|ADK_FAST; + + custom.dsksync = MFM_SYNC; + + custom.dsklen = 0; + custom.dskptr = (u_char *)ZTWO_PADDR((u_char *)raw_buf); + custom.dsklen = unit[drive].type->read_size/sizeof(short) | DSKLEN_DMAEN; + custom.dsklen = unit[drive].type->read_size/sizeof(short) | DSKLEN_DMAEN; + + block_flag = 1; + + while (block_flag) + sleep_on (&wait_fd_block); + + custom.dsklen = 0; + fd_deselect(drive); + rel_fdc(); +} + +static int raw_write(int drive) +{ + ushort adk; + + drive&=3; + get_fdc(drive); /* corresponds to rel_fdc() in post_write() */ + if ((ciaa.pra & DSKPROT) == 0) { + rel_fdc(); + return 0; + } + while (block_flag) + sleep_on(&wait_fd_block); + fd_select(drive); + /* clear adkcon bits */ + custom.adkcon = ADK_PRECOMP1|ADK_PRECOMP0|ADK_WORDSYNC|ADK_MSBSYNC; + /* set appropriate adkcon bits */ + adk = ADK_SETCLR|ADK_FAST; + if ((ulong)unit[drive].track >= unit[drive].type->precomp2) + adk |= ADK_PRECOMP1; + else if ((ulong)unit[drive].track >= unit[drive].type->precomp1) + adk |= ADK_PRECOMP0; + custom.adkcon = adk; + + custom.dsklen = DSKLEN_WRITE; + custom.dskptr = (u_char *)ZTWO_PADDR((u_char *)raw_buf); + custom.dsklen = unit[drive].type->write_size/sizeof(short) | DSKLEN_DMAEN|DSKLEN_WRITE; + custom.dsklen = unit[drive].type->write_size/sizeof(short) | DSKLEN_DMAEN|DSKLEN_WRITE; + + block_flag = 2; + return 1; +} + +/* + * to be called at least 2ms after the write has finished but before any + * other access to the hardware. + */ +static void post_write (unsigned long drive) +{ +#ifdef DEBUG + printk("post_write for drive %ld\n",drive); +#endif + drive &= 3; + custom.dsklen = 0; + block_flag = 0; + writepending = 0; + writefromint = 0; + unit[drive].dirty = 0; + wake_up(&wait_fd_block); + fd_deselect(drive); + rel_fdc(); /* corresponds to get_fdc() in raw_write */ +} + + +/* + * The following functions are to convert the block contents into raw data + * written to disk and vice versa. + * (Add other formats here ;-)) + */ + +static unsigned long scan_sync(unsigned long raw, unsigned long end) +{ + ushort *ptr = (ushort *)raw, *endp = (ushort *)end; + + while (ptr < endp && *ptr++ != 0x4489) + ; + if (ptr < endp) { + while (*ptr == 0x4489 && ptr < endp) + ptr++; + return (ulong)ptr; + } + return 0; +} + +static inline unsigned long checksum(unsigned long *addr, int len) +{ + unsigned long csum = 0; + + len /= sizeof(*addr); + while (len-- > 0) + csum ^= *addr++; + csum = ((csum>>1) & 0x55555555) ^ (csum & 0x55555555); + + return csum; +} + +static unsigned long decode (unsigned long *data, unsigned long *raw, + int len) +{ + ulong *odd, *even; + + /* convert length from bytes to longwords */ + len >>= 2; + odd = raw; + even = odd + len; + + /* prepare return pointer */ + raw += len * 2; + + do { + *data++ = ((*odd++ & 0x55555555) << 1) | (*even++ & 0x55555555); + } while (--len != 0); + + return (ulong)raw; +} + +struct header { + unsigned char magic; + unsigned char track; + unsigned char sect; + unsigned char ord; + unsigned char labels[16]; + unsigned long hdrchk; + unsigned long datachk; +}; + +static int amiga_read(int drive) +{ + unsigned long raw; + unsigned long end; + int scnt; + unsigned long csum; + struct header hdr; + + drive&=3; + raw = (long) raw_buf; + end = raw + unit[drive].type->read_size; + + for (scnt = 0;scnt < unit[drive].dtype->sects * unit[drive].type->sect_mult; scnt++) { + if (!(raw = scan_sync(raw, end))) { + printk (KERN_INFO "can't find sync for sector %d\n", scnt); + return MFM_NOSYNC; + } + + raw = decode ((ulong *)&hdr.magic, (ulong *)raw, 4); + raw = decode ((ulong *)&hdr.labels, (ulong *)raw, 16); + raw = decode ((ulong *)&hdr.hdrchk, (ulong *)raw, 4); + raw = decode ((ulong *)&hdr.datachk, (ulong *)raw, 4); + csum = checksum((ulong *)&hdr, + (char *)&hdr.hdrchk-(char *)&hdr); + +#ifdef DEBUG + printk ("(%x,%d,%d,%d) (%lx,%lx,%lx,%lx) %lx %lx\n", + hdr.magic, hdr.track, hdr.sect, hdr.ord, + *(ulong *)&hdr.labels[0], *(ulong *)&hdr.labels[4], + *(ulong *)&hdr.labels[8], *(ulong *)&hdr.labels[12], + hdr.hdrchk, hdr.datachk); +#endif + + if (hdr.hdrchk != csum) { + printk(KERN_INFO "MFM_HEADER: %08lx,%08lx\n", hdr.hdrchk, csum); + return MFM_HEADER; + } + + /* verify track */ + if (hdr.track != unit[drive].track) { + printk(KERN_INFO "MFM_TRACK: %d, %d\n", hdr.track, unit[drive].track); + return MFM_TRACK; + } + + raw = decode ((ulong *)(unit[drive].trackbuf + hdr.sect*512), + (ulong *)raw, 512); + csum = checksum((ulong *)(unit[drive].trackbuf + hdr.sect*512), 512); + + if (hdr.datachk != csum) { + printk(KERN_INFO "MFM_DATA: (%x:%d:%d:%d) sc=%d %lx, %lx\n", + hdr.magic, hdr.track, hdr.sect, hdr.ord, scnt, + hdr.datachk, csum); + printk (KERN_INFO "data=(%lx,%lx,%lx,%lx)\n", + ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[0], + ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[1], + ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[2], + ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[3]); + return MFM_DATA; + } + } + + return 0; +} + +static void encode(unsigned long data, unsigned long *dest) +{ + unsigned long data2; + + data &= 0x55555555; + data2 = data ^ 0x55555555; + data |= ((data2 >> 1) | 0x80000000) & (data2 << 1); + + if (*(dest - 1) & 0x00000001) + data &= 0x7FFFFFFF; + + *dest = data; +} + +static void encode_block(unsigned long *dest, unsigned long *src, int len) +{ + int cnt, to_cnt = 0; + unsigned long data; + + /* odd bits */ + for (cnt = 0; cnt < len / 4; cnt++) { + data = src[cnt] >> 1; + encode(data, dest + to_cnt++); + } + + /* even bits */ + for (cnt = 0; cnt < len / 4; cnt++) { + data = src[cnt]; + encode(data, dest + to_cnt++); + } +} + +static unsigned long *putsec(int disk, unsigned long *raw, int cnt) +{ + struct header hdr; + int i; + + disk&=3; + *raw = (raw[-1]&1) ? 0x2AAAAAAA : 0xAAAAAAAA; + raw++; + *raw++ = 0x44894489; + + hdr.magic = 0xFF; + hdr.track = unit[disk].track; + hdr.sect = cnt; + hdr.ord = unit[disk].dtype->sects * unit[disk].type->sect_mult - cnt; + for (i = 0; i < 16; i++) + hdr.labels[i] = 0; + hdr.hdrchk = checksum((ulong *)&hdr, + (char *)&hdr.hdrchk-(char *)&hdr); + hdr.datachk = checksum((ulong *)(unit[disk].trackbuf+cnt*512), 512); + + encode_block(raw, (ulong *)&hdr.magic, 4); + raw += 2; + encode_block(raw, (ulong *)&hdr.labels, 16); + raw += 8; + encode_block(raw, (ulong *)&hdr.hdrchk, 4); + raw += 2; + encode_block(raw, (ulong *)&hdr.datachk, 4); + raw += 2; + encode_block(raw, (ulong *)(unit[disk].trackbuf+cnt*512), 512); + raw += 256; + + return raw; +} + +static void amiga_write(int disk) +{ + unsigned int cnt; + unsigned long *ptr = (unsigned long *)raw_buf; + + disk&=3; + /* gap space */ + for (cnt = 0; cnt < 415 * unit[disk].type->sect_mult; cnt++) + *ptr++ = 0xaaaaaaaa; + + /* sectors */ + for (cnt = 0; cnt < unit[disk].dtype->sects * unit[disk].type->sect_mult; cnt++) + ptr = putsec (disk, ptr, cnt); + *(ushort *)ptr = (ptr[-1]&1) ? 0x2AA8 : 0xAAA8; +} + + +struct dos_header { + unsigned char track, /* 0-80 */ + side, /* 0-1 */ + sec, /* 0-...*/ + len_desc;/* 2 */ + unsigned short crc; /* on 68000 we got an alignment problem, + but this compiler solves it by adding silently + adding a pad byte so data won't fit + and this took about 3h to discover.... */ + unsigned char gap1[22]; /* for longword-alignedness (0x4e) */ +}; + +/* crc routines are borrowed from the messydos-handler */ + +/* excerpt from the messydos-device +; The CRC is computed not only over the actual data, but including +; the SYNC mark (3 * $a1) and the 'ID/DATA - Address Mark' ($fe/$fb). +; As we don't read or encode these fields into our buffers, we have to +; preload the registers containing the CRC with the values they would have +; after stepping over these fields. +; +; How CRCs "really" work: +; +; First, you should regard a bitstring as a series of coefficients of +; polynomials. We calculate with these polynomials in modulo-2 +; arithmetic, in which both add and subtract are done the same as +; exclusive-or. Now, we modify our data (a very long polynomial) in +; such a way that it becomes divisible by the CCITT-standard 16-bit +; 16 12 5 +; polynomial: x + x + x + 1, represented by $11021. The easiest +; way to do this would be to multiply (using proper arithmetic) our +; datablock with $11021. So we have: +; data * $11021 = +; data * ($10000 + $1021) = +; data * $10000 + data * $1021 +; The left part of this is simple: Just add two 0 bytes. But then +; the right part (data $1021) remains difficult and even could have +; a carry into the left part. The solution is to use a modified +; multiplication, which has a result that is not correct, but with +; a difference of any multiple of $11021. We then only need to keep +; the 16 least significant bits of the result. +; +; The following algorithm does this for us: +; +; unsigned char *data, c, crclo, crchi; +; while (not done) { +; c = *data++ + crchi; +; crchi = (@ c) >> 8 + crclo; +; crclo = @ c; +; } +; +; Remember, + is done with EOR, the @ operator is in two tables (high +; and low byte separately), which is calculated as +; +; $1021 * (c & $F0) +; xor $1021 * (c & $0F) +; xor $1021 * (c >> 4) (* is regular multiplication) +; +; +; Anyway, the end result is the same as the remainder of the division of +; the data by $11021. I am afraid I need to study theory a bit more... + + +my only works was to code this from manx to C.... + +*/ + +static ushort dos_crc(void * data_a3, int data_d0, int data_d1, int data_d3) +{ + static unsigned char CRCTable1[] = { + 0x00,0x10,0x20,0x30,0x40,0x50,0x60,0x70,0x81,0x91,0xa1,0xb1,0xc1,0xd1,0xe1,0xf1, + 0x12,0x02,0x32,0x22,0x52,0x42,0x72,0x62,0x93,0x83,0xb3,0xa3,0xd3,0xc3,0xf3,0xe3, + 0x24,0x34,0x04,0x14,0x64,0x74,0x44,0x54,0xa5,0xb5,0x85,0x95,0xe5,0xf5,0xc5,0xd5, + 0x36,0x26,0x16,0x06,0x76,0x66,0x56,0x46,0xb7,0xa7,0x97,0x87,0xf7,0xe7,0xd7,0xc7, + 0x48,0x58,0x68,0x78,0x08,0x18,0x28,0x38,0xc9,0xd9,0xe9,0xf9,0x89,0x99,0xa9,0xb9, + 0x5a,0x4a,0x7a,0x6a,0x1a,0x0a,0x3a,0x2a,0xdb,0xcb,0xfb,0xeb,0x9b,0x8b,0xbb,0xab, + 0x6c,0x7c,0x4c,0x5c,0x2c,0x3c,0x0c,0x1c,0xed,0xfd,0xcd,0xdd,0xad,0xbd,0x8d,0x9d, + 0x7e,0x6e,0x5e,0x4e,0x3e,0x2e,0x1e,0x0e,0xff,0xef,0xdf,0xcf,0xbf,0xaf,0x9f,0x8f, + 0x91,0x81,0xb1,0xa1,0xd1,0xc1,0xf1,0xe1,0x10,0x00,0x30,0x20,0x50,0x40,0x70,0x60, + 0x83,0x93,0xa3,0xb3,0xc3,0xd3,0xe3,0xf3,0x02,0x12,0x22,0x32,0x42,0x52,0x62,0x72, + 0xb5,0xa5,0x95,0x85,0xf5,0xe5,0xd5,0xc5,0x34,0x24,0x14,0x04,0x74,0x64,0x54,0x44, + 0xa7,0xb7,0x87,0x97,0xe7,0xf7,0xc7,0xd7,0x26,0x36,0x06,0x16,0x66,0x76,0x46,0x56, + 0xd9,0xc9,0xf9,0xe9,0x99,0x89,0xb9,0xa9,0x58,0x48,0x78,0x68,0x18,0x08,0x38,0x28, + 0xcb,0xdb,0xeb,0xfb,0x8b,0x9b,0xab,0xbb,0x4a,0x5a,0x6a,0x7a,0x0a,0x1a,0x2a,0x3a, + 0xfd,0xed,0xdd,0xcd,0xbd,0xad,0x9d,0x8d,0x7c,0x6c,0x5c,0x4c,0x3c,0x2c,0x1c,0x0c, + 0xef,0xff,0xcf,0xdf,0xaf,0xbf,0x8f,0x9f,0x6e,0x7e,0x4e,0x5e,0x2e,0x3e,0x0e,0x1e + }; + + static unsigned char CRCTable2[] = { + 0x00,0x21,0x42,0x63,0x84,0xa5,0xc6,0xe7,0x08,0x29,0x4a,0x6b,0x8c,0xad,0xce,0xef, + 0x31,0x10,0x73,0x52,0xb5,0x94,0xf7,0xd6,0x39,0x18,0x7b,0x5a,0xbd,0x9c,0xff,0xde, + 0x62,0x43,0x20,0x01,0xe6,0xc7,0xa4,0x85,0x6a,0x4b,0x28,0x09,0xee,0xcf,0xac,0x8d, + 0x53,0x72,0x11,0x30,0xd7,0xf6,0x95,0xb4,0x5b,0x7a,0x19,0x38,0xdf,0xfe,0x9d,0xbc, + 0xc4,0xe5,0x86,0xa7,0x40,0x61,0x02,0x23,0xcc,0xed,0x8e,0xaf,0x48,0x69,0x0a,0x2b, + 0xf5,0xd4,0xb7,0x96,0x71,0x50,0x33,0x12,0xfd,0xdc,0xbf,0x9e,0x79,0x58,0x3b,0x1a, + 0xa6,0x87,0xe4,0xc5,0x22,0x03,0x60,0x41,0xae,0x8f,0xec,0xcd,0x2a,0x0b,0x68,0x49, + 0x97,0xb6,0xd5,0xf4,0x13,0x32,0x51,0x70,0x9f,0xbe,0xdd,0xfc,0x1b,0x3a,0x59,0x78, + 0x88,0xa9,0xca,0xeb,0x0c,0x2d,0x4e,0x6f,0x80,0xa1,0xc2,0xe3,0x04,0x25,0x46,0x67, + 0xb9,0x98,0xfb,0xda,0x3d,0x1c,0x7f,0x5e,0xb1,0x90,0xf3,0xd2,0x35,0x14,0x77,0x56, + 0xea,0xcb,0xa8,0x89,0x6e,0x4f,0x2c,0x0d,0xe2,0xc3,0xa0,0x81,0x66,0x47,0x24,0x05, + 0xdb,0xfa,0x99,0xb8,0x5f,0x7e,0x1d,0x3c,0xd3,0xf2,0x91,0xb0,0x57,0x76,0x15,0x34, + 0x4c,0x6d,0x0e,0x2f,0xc8,0xe9,0x8a,0xab,0x44,0x65,0x06,0x27,0xc0,0xe1,0x82,0xa3, + 0x7d,0x5c,0x3f,0x1e,0xf9,0xd8,0xbb,0x9a,0x75,0x54,0x37,0x16,0xf1,0xd0,0xb3,0x92, + 0x2e,0x0f,0x6c,0x4d,0xaa,0x8b,0xe8,0xc9,0x26,0x07,0x64,0x45,0xa2,0x83,0xe0,0xc1, + 0x1f,0x3e,0x5d,0x7c,0x9b,0xba,0xd9,0xf8,0x17,0x36,0x55,0x74,0x93,0xb2,0xd1,0xf0 + }; + +/* look at the asm-code - what looks in C a bit strange is almost as good as handmade */ + register int i; + register unsigned char *CRCT1, *CRCT2, *data, c, crch, crcl; + + CRCT1=CRCTable1; + CRCT2=CRCTable2; + data=data_a3; + crcl=data_d1; + crch=data_d0; + for (i=data_d3; i>=0; i--) { + c = (*data++) ^ crch; + crch = CRCT1[c] ^ crcl; + crcl = CRCT2[c]; + } + return (crch<<8)|crcl; +} + +static inline ushort dos_hdr_crc (struct dos_header *hdr) +{ + return dos_crc(&(hdr->track), 0xb2, 0x30, 3); /* precomputed magic */ +} + +static inline ushort dos_data_crc(unsigned char *data) +{ + return dos_crc(data, 0xe2, 0x95 ,511); /* precomputed magic */ +} + +static inline unsigned char dos_decode_byte(ushort word) +{ + register ushort w2; + register unsigned char byte; + register unsigned char *dec = mfmdecode; + + w2=word; + w2>>=8; + w2&=127; + byte = dec[w2]; + byte <<= 4; + w2 = word & 127; + byte |= dec[w2]; + return byte; +} + +static unsigned long dos_decode(unsigned char *data, unsigned short *raw, int len) +{ + int i; + + for (i = 0; i < len; i++) + *data++=dos_decode_byte(*raw++); + return ((ulong)raw); +} + +#ifdef DEBUG +static void dbg(unsigned long ptr) +{ + printk("raw data @%08lx: %08lx, %08lx ,%08lx, %08lx\n", ptr, + ((ulong *)ptr)[0], ((ulong *)ptr)[1], + ((ulong *)ptr)[2], ((ulong *)ptr)[3]); +} +#endif + +static int dos_read(int drive) +{ + unsigned long end; + unsigned long raw; + int scnt; + unsigned short crc,data_crc[2]; + struct dos_header hdr; + + drive&=3; + raw = (long) raw_buf; + end = raw + unit[drive].type->read_size; + + for (scnt=0; scnt < unit[drive].dtype->sects * unit[drive].type->sect_mult; scnt++) { + do { /* search for the right sync of each sec-hdr */ + if (!(raw = scan_sync (raw, end))) { + printk(KERN_INFO "dos_read: no hdr sync on " + "track %d, unit %d for sector %d\n", + unit[drive].track,drive,scnt); + return MFM_NOSYNC; + } +#ifdef DEBUG + dbg(raw); +#endif + } while (*((ushort *)raw)!=0x5554); /* loop usually only once done */ + raw+=2; /* skip over headermark */ + raw = dos_decode((unsigned char *)&hdr,(ushort *) raw,8); + crc = dos_hdr_crc(&hdr); + +#ifdef DEBUG + printk("(%3d,%d,%2d,%d) %x\n", hdr.track, hdr.side, + hdr.sec, hdr.len_desc, hdr.crc); +#endif + + if (crc != hdr.crc) { + printk(KERN_INFO "dos_read: MFM_HEADER %04x,%04x\n", + hdr.crc, crc); + return MFM_HEADER; + } + if (hdr.track != unit[drive].track/unit[drive].type->heads) { + printk(KERN_INFO "dos_read: MFM_TRACK %d, %d\n", + hdr.track, + unit[drive].track/unit[drive].type->heads); + return MFM_TRACK; + } + + if (hdr.side != unit[drive].track%unit[drive].type->heads) { + printk(KERN_INFO "dos_read: MFM_SIDE %d, %d\n", + hdr.side, + unit[drive].track%unit[drive].type->heads); + return MFM_TRACK; + } + + if (hdr.len_desc != 2) { + printk(KERN_INFO "dos_read: unknown sector len " + "descriptor %d\n", hdr.len_desc); + return MFM_DATA; + } +#ifdef DEBUG + printk("hdr accepted\n"); +#endif + if (!(raw = scan_sync (raw, end))) { + printk(KERN_INFO "dos_read: no data sync on track " + "%d, unit %d for sector%d, disk sector %d\n", + unit[drive].track, drive, scnt, hdr.sec); + return MFM_NOSYNC; + } +#ifdef DEBUG + dbg(raw); +#endif + + if (*((ushort *)raw)!=0x5545) { + printk(KERN_INFO "dos_read: no data mark after " + "sync (%d,%d,%d,%d) sc=%d\n", + hdr.track,hdr.side,hdr.sec,hdr.len_desc,scnt); + return MFM_NOSYNC; + } + + raw+=2; /* skip data mark (included in checksum) */ + raw = dos_decode((unsigned char *)(unit[drive].trackbuf + (hdr.sec - 1) * 512), (ushort *) raw, 512); + raw = dos_decode((unsigned char *)data_crc,(ushort *) raw,4); + crc = dos_data_crc(unit[drive].trackbuf + (hdr.sec - 1) * 512); + + if (crc != data_crc[0]) { + printk(KERN_INFO "dos_read: MFM_DATA (%d,%d,%d,%d) " + "sc=%d, %x %x\n", hdr.track, hdr.side, + hdr.sec, hdr.len_desc, scnt,data_crc[0], crc); + printk(KERN_INFO "data=(%lx,%lx,%lx,%lx,...)\n", + ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[0], + ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[1], + ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[2], + ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[3]); + return MFM_DATA; + } + } + return 0; +} + +static inline ushort dos_encode_byte(unsigned char byte) +{ + register unsigned char *enc, b2, b1; + register ushort word; + + enc=mfmencode; + b1=byte; + b2=b1>>4; + b1&=15; + word=enc[b2] <<8 | enc [b1]; + return (word|((word&(256|64)) ? 0: 128)); +} + +static void dos_encode_block(ushort *dest, unsigned char *src, int len) +{ + int i; + + for (i = 0; i < len; i++) { + *dest=dos_encode_byte(*src++); + *dest|=((dest[-1]&1)||(*dest&0x4000))? 0: 0x8000; + dest++; + } +} + +static unsigned long *ms_putsec(int drive, unsigned long *raw, int cnt) +{ + static struct dos_header hdr={0,0,0,2,0, + {78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78}}; + int i; + static ushort crc[2]={0,0x4e4e}; + + drive&=3; +/* id gap 1 */ +/* the MFM word before is always 9254 */ + for(i=0;i<6;i++) + *raw++=0xaaaaaaaa; +/* 3 sync + 1 headermark */ + *raw++=0x44894489; + *raw++=0x44895554; + +/* fill in the variable parts of the header */ + hdr.track=unit[drive].track/unit[drive].type->heads; + hdr.side=unit[drive].track%unit[drive].type->heads; + hdr.sec=cnt+1; + hdr.crc=dos_hdr_crc(&hdr); + +/* header (without "magic") and id gap 2*/ + dos_encode_block((ushort *)raw,(unsigned char *) &hdr.track,28); + raw+=14; + +/*id gap 3 */ + for(i=0;i<6;i++) + *raw++=0xaaaaaaaa; + +/* 3 syncs and 1 datamark */ + *raw++=0x44894489; + *raw++=0x44895545; + +/* data */ + dos_encode_block((ushort *)raw, + (unsigned char *)unit[drive].trackbuf+cnt*512,512); + raw+=256; + +/*data crc + jd's special gap (long words :-/) */ + crc[0]=dos_data_crc(unit[drive].trackbuf+cnt*512); + dos_encode_block((ushort *) raw,(unsigned char *)crc,4); + raw+=2; + +/* data gap */ + for(i=0;i<38;i++) + *raw++=0x92549254; + + return raw; /* wrote 652 MFM words */ +} + +static void dos_write(int disk) +{ + int cnt; + unsigned long raw = (unsigned long) raw_buf; + unsigned long *ptr=(unsigned long *)raw; + + disk&=3; +/* really gap4 + indexgap , but we write it first and round it up */ + for (cnt=0;cnt<425;cnt++) + *ptr++=0x92549254; + +/* the following is just guessed */ + if (unit[disk].type->sect_mult==2) /* check for HD-Disks */ + for(cnt=0;cnt<473;cnt++) + *ptr++=0x92549254; + +/* now the index marks...*/ + for (cnt=0;cnt<20;cnt++) + *ptr++=0x92549254; + for (cnt=0;cnt<6;cnt++) + *ptr++=0xaaaaaaaa; + *ptr++=0x52245224; + *ptr++=0x52245552; + for (cnt=0;cnt<20;cnt++) + *ptr++=0x92549254; + +/* sectors */ + for(cnt = 0; cnt < unit[disk].dtype->sects * unit[disk].type->sect_mult; cnt++) + ptr=ms_putsec(disk,ptr,cnt); + + *(ushort *)ptr = 0xaaa8; /* MFM word before is always 0x9254 */ +} + +/* + * Here comes the high level stuff (i.e. the filesystem interface) + * and helper functions. + * Normally this should be the only part that has to be adapted to + * different kernel versions. + */ + +/* FIXME: this assumes the drive is still spinning - + * which is only true if we complete writing a track within three seconds + */ +static void flush_track_callback(unsigned long nr) +{ + nr&=3; + writefromint = 1; + if (!try_fdc(nr)) { + /* we might block in an interrupt, so try again later */ + flush_track_timer[nr].expires = jiffies + 1; + add_timer(flush_track_timer + nr); + return; + } + get_fdc(nr); + (*unit[nr].dtype->write_fkt)(nr); + if (!raw_write(nr)) { + printk (KERN_NOTICE "floppy disk write protected\n"); + writefromint = 0; + writepending = 0; + } + rel_fdc(); +} + +static int non_int_flush_track (unsigned long nr) +{ + unsigned long flags; + + nr&=3; + writefromint = 0; + del_timer(&post_write_timer); + get_fdc(nr); + if (!fd_motor_on(nr)) { + writepending = 0; + rel_fdc(); + return 0; + } + local_irq_save(flags); + if (writepending != 2) { + local_irq_restore(flags); + (*unit[nr].dtype->write_fkt)(nr); + if (!raw_write(nr)) { + printk (KERN_NOTICE "floppy disk write protected " + "in write!\n"); + writepending = 0; + return 0; + } + while (block_flag == 2) + sleep_on (&wait_fd_block); + } + else { + local_irq_restore(flags); + ms_delay(2); /* 2 ms post_write delay */ + post_write(nr); + } + rel_fdc(); + return 1; +} + +static int get_track(int drive, int track) +{ + int error, errcnt; + + drive&=3; + if (unit[drive].track == track) + return 0; + get_fdc(drive); + if (!fd_motor_on(drive)) { + rel_fdc(); + return -1; + } + + if (unit[drive].dirty == 1) { + del_timer (flush_track_timer + drive); + non_int_flush_track (drive); + } + errcnt = 0; + while (errcnt < MAX_ERRORS) { + if (!fd_seek(drive, track)) + return -1; + raw_read(drive); + error = (*unit[drive].dtype->read_fkt)(drive); + if (error == 0) { + rel_fdc(); + return 0; + } + /* Read Error Handling: recalibrate and try again */ + unit[drive].track = -1; + errcnt++; + } + rel_fdc(); + return -1; +} + +static void redo_fd_request(void) +{ + unsigned int cnt, block, track, sector; + int drive; + struct amiga_floppy_struct *floppy; + char *data; + unsigned long flags; + + repeat: + if (!CURRENT) { + /* Nothing left to do */ + return; + } + + floppy = CURRENT->rq_disk->private_data; + drive = floppy - unit; + + /* Here someone could investigate to be more efficient */ + for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) { +#ifdef DEBUG + printk("fd: sector %ld + %d requested for %s\n", + CURRENT->sector,cnt, + (rq_data_dir(CURRENT) == READ) ? "read" : "write"); +#endif + block = CURRENT->sector + cnt; + if ((int)block > floppy->blocks) { + end_request(CURRENT, 0); + goto repeat; + } + + track = block / (floppy->dtype->sects * floppy->type->sect_mult); + sector = block % (floppy->dtype->sects * floppy->type->sect_mult); + data = CURRENT->buffer + 512 * cnt; +#ifdef DEBUG + printk("access to track %d, sector %d, with buffer at " + "0x%08lx\n", track, sector, data); +#endif + + if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) { + printk(KERN_WARNING "do_fd_request: unknown command\n"); + end_request(CURRENT, 0); + goto repeat; + } + if (get_track(drive, track) == -1) { + end_request(CURRENT, 0); + goto repeat; + } + + switch (rq_data_dir(CURRENT)) { + case READ: + memcpy(data, floppy->trackbuf + sector * 512, 512); + break; + + case WRITE: + memcpy(floppy->trackbuf + sector * 512, data, 512); + + /* keep the drive spinning while writes are scheduled */ + if (!fd_motor_on(drive)) { + end_request(CURRENT, 0); + goto repeat; + } + /* + * setup a callback to write the track buffer + * after a short (1 tick) delay. + */ + local_irq_save(flags); + + floppy->dirty = 1; + /* reset the timer */ + mod_timer (flush_track_timer + drive, jiffies + 1); + local_irq_restore(flags); + break; + } + } + CURRENT->nr_sectors -= CURRENT->current_nr_sectors; + CURRENT->sector += CURRENT->current_nr_sectors; + + end_request(CURRENT, 1); + goto repeat; +} + +static void do_fd_request(struct request_queue * q) +{ + redo_fd_request(); +} + +static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + int drive = MINOR(bdev->bd_dev) & 3; + + geo->heads = unit[drive].type->heads; + geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult; + geo->cylinders = unit[drive].type->tracks; + return 0; +} + +static int fd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long param) +{ + struct amiga_floppy_struct *p = bdev->bd_disk->private_data; + int drive = p - unit; + static struct floppy_struct getprm; + void __user *argp = (void __user *)param; + + switch(cmd){ + case FDFMTBEG: + get_fdc(drive); + if (fd_ref[drive] > 1) { + rel_fdc(); + return -EBUSY; + } + fsync_bdev(bdev); + if (fd_motor_on(drive) == 0) { + rel_fdc(); + return -ENODEV; + } + if (fd_calibrate(drive) == 0) { + rel_fdc(); + return -ENXIO; + } + floppy_off(drive); + rel_fdc(); + break; + case FDFMTTRK: + if (param < p->type->tracks * p->type->heads) + { + get_fdc(drive); + if (fd_seek(drive,param) != 0){ + memset(p->trackbuf, FD_FILL_BYTE, + p->dtype->sects * p->type->sect_mult * 512); + non_int_flush_track(drive); + } + floppy_off(drive); + rel_fdc(); + } + else + return -EINVAL; + break; + case FDFMTEND: + floppy_off(drive); + invalidate_bdev(bdev); + break; + case FDGETPRM: + memset((void *)&getprm, 0, sizeof (getprm)); + getprm.track=p->type->tracks; + getprm.head=p->type->heads; + getprm.sect=p->dtype->sects * p->type->sect_mult; + getprm.size=p->blocks; + if (copy_to_user(argp, &getprm, sizeof(struct floppy_struct))) + return -EFAULT; + break; + case FDSETPRM: + case FDDEFPRM: + return -EINVAL; + case FDFLUSH: /* unconditionally, even if not needed */ + del_timer (flush_track_timer + drive); + non_int_flush_track(drive); + break; +#ifdef RAW_IOCTL + case IOCTL_RAW_TRACK: + if (copy_to_user(argp, raw_buf, p->type->read_size)) + return -EFAULT; + else + return p->type->read_size; +#endif + default: + printk(KERN_DEBUG "fd_ioctl: unknown cmd %d for drive %d.", + cmd, drive); + return -ENOSYS; + } + return 0; +} + +static void fd_probe(int dev) +{ + unsigned long code; + int type; + int drive; + + drive = dev & 3; + code = fd_get_drive_id(drive); + + /* get drive type */ + for (type = 0; type < num_dr_types; type++) + if (drive_types[type].code == code) + break; + + if (type >= num_dr_types) { + printk(KERN_WARNING "fd_probe: unsupported drive type " + "%08lx found\n", code); + unit[drive].type = &drive_types[num_dr_types-1]; /* FD_NODRIVE */ + return; + } + + unit[drive].type = drive_types + type; + unit[drive].track = -1; + + unit[drive].disk = -1; + unit[drive].motor = 0; + unit[drive].busy = 0; + unit[drive].status = -1; +} + +/* + * floppy_open check for aliasing (/dev/fd0 can be the same as + * /dev/PS0 etc), and disallows simultaneous access to the same + * drive with different device numbers. + */ +static int floppy_open(struct block_device *bdev, fmode_t mode) +{ + int drive = MINOR(bdev->bd_dev) & 3; + int system = (MINOR(bdev->bd_dev) & 4) >> 2; + int old_dev; + unsigned long flags; + + old_dev = fd_device[drive]; + + if (fd_ref[drive] && old_dev != system) + return -EBUSY; + + if (mode & (FMODE_READ|FMODE_WRITE)) { + check_disk_change(bdev); + if (mode & FMODE_WRITE) { + int wrprot; + + get_fdc(drive); + fd_select (drive); + wrprot = !(ciaa.pra & DSKPROT); + fd_deselect (drive); + rel_fdc(); + + if (wrprot) + return -EROFS; + } + } + + local_irq_save(flags); + fd_ref[drive]++; + fd_device[drive] = system; + local_irq_restore(flags); + + unit[drive].dtype=&data_types[system]; + unit[drive].blocks=unit[drive].type->heads*unit[drive].type->tracks* + data_types[system].sects*unit[drive].type->sect_mult; + set_capacity(unit[drive].gendisk, unit[drive].blocks); + + printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive, + unit[drive].type->name, data_types[system].name); + + return 0; +} + +static int floppy_release(struct gendisk *disk, fmode_t mode) +{ + struct amiga_floppy_struct *p = disk->private_data; + int drive = p - unit; + + if (unit[drive].dirty == 1) { + del_timer (flush_track_timer + drive); + non_int_flush_track (drive); + } + + if (!fd_ref[drive]--) { + printk(KERN_CRIT "floppy_release with fd_ref == 0"); + fd_ref[drive] = 0; + } +#ifdef MODULE +/* the mod_use counter is handled this way */ + floppy_off (drive | 0x40000000); +#endif + return 0; +} + +/* + * floppy-change is never called from an interrupt, so we can relax a bit + * here, sleep etc. Note that floppy-on tries to set current_DOR to point + * to the desired drive, but it will probably not survive the sleep if + * several floppies are used at the same time: thus the loop. + */ +static int amiga_floppy_change(struct gendisk *disk) +{ + struct amiga_floppy_struct *p = disk->private_data; + int drive = p - unit; + int changed; + static int first_time = 1; + + if (first_time) + changed = first_time--; + else { + get_fdc(drive); + fd_select (drive); + changed = !(ciaa.pra & DSKCHANGE); + fd_deselect (drive); + rel_fdc(); + } + + if (changed) { + fd_probe(drive); + p->track = -1; + p->dirty = 0; + writepending = 0; /* if this was true before, too bad! */ + writefromint = 0; + return 1; + } + return 0; +} + +static struct block_device_operations floppy_fops = { + .owner = THIS_MODULE, + .open = floppy_open, + .release = floppy_release, + .locked_ioctl = fd_ioctl, + .getgeo = fd_getgeo, + .media_changed = amiga_floppy_change, +}; + +static int __init fd_probe_drives(void) +{ + int drive,drives,nomem; + + printk(KERN_INFO "FD: probing units\n" KERN_INFO "found "); + drives=0; + nomem=0; + for(drive=0;drive<FD_MAX_UNITS;drive++) { + struct gendisk *disk; + fd_probe(drive); + if (unit[drive].type->code == FD_NODRIVE) + continue; + disk = alloc_disk(1); + if (!disk) { + unit[drive].type->code = FD_NODRIVE; + continue; + } + unit[drive].gendisk = disk; + drives++; + if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) { + printk("no mem for "); + unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */ + drives--; + nomem = 1; + } + printk("fd%d ",drive); + disk->major = FLOPPY_MAJOR; + disk->first_minor = drive; + disk->fops = &floppy_fops; + sprintf(disk->disk_name, "fd%d", drive); + disk->private_data = &unit[drive]; + disk->queue = floppy_queue; + set_capacity(disk, 880*2); + add_disk(disk); + } + if ((drives > 0) || (nomem == 0)) { + if (drives == 0) + printk("no drives"); + printk("\n"); + return drives; + } + printk("\n"); + return -ENOMEM; +} + +static struct kobject *floppy_find(dev_t dev, int *part, void *data) +{ + int drive = *part & 3; + if (unit[drive].type->code == FD_NODRIVE) + return NULL; + *part = 0; + return get_disk(unit[drive].gendisk); +} + +static int __init amiga_floppy_init(void) +{ + int i, ret; + + if (!MACH_IS_AMIGA) + return -ENODEV; + + if (!AMIGAHW_PRESENT(AMI_FLOPPY)) + return -ENODEV; + + if (register_blkdev(FLOPPY_MAJOR,"fd")) + return -EBUSY; + + /* + * We request DSKPTR, DSKLEN and DSKDATA only, because the other + * floppy registers are too spreaded over the custom register space + */ + ret = -EBUSY; + if (!request_mem_region(CUSTOM_PHYSADDR+0x20, 8, "amiflop [Paula]")) { + printk("fd: cannot get floppy registers\n"); + goto out_blkdev; + } + + ret = -ENOMEM; + if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) == + NULL) { + printk("fd: cannot get chip mem buffer\n"); + goto out_memregion; + } + + ret = -EBUSY; + if (request_irq(IRQ_AMIGA_DSKBLK, fd_block_done, 0, "floppy_dma", NULL)) { + printk("fd: cannot get irq for dma\n"); + goto out_irq; + } + + if (request_irq(IRQ_AMIGA_CIAA_TB, ms_isr, 0, "floppy_timer", NULL)) { + printk("fd: cannot get irq for timer\n"); + goto out_irq2; + } + + ret = -ENOMEM; + floppy_queue = blk_init_queue(do_fd_request, &amiflop_lock); + if (!floppy_queue) + goto out_queue; + + ret = -ENODEV; + if (fd_probe_drives() < 1) /* No usable drives */ + goto out_probe; + + blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, + floppy_find, NULL, NULL); + + /* initialize variables */ + init_timer(&motor_on_timer); + motor_on_timer.expires = 0; + motor_on_timer.data = 0; + motor_on_timer.function = motor_on_callback; + for (i = 0; i < FD_MAX_UNITS; i++) { + init_timer(&motor_off_timer[i]); + motor_off_timer[i].expires = 0; + motor_off_timer[i].data = i|0x80000000; + motor_off_timer[i].function = fd_motor_off; + init_timer(&flush_track_timer[i]); + flush_track_timer[i].expires = 0; + flush_track_timer[i].data = i; + flush_track_timer[i].function = flush_track_callback; + + unit[i].track = -1; + } + + init_timer(&post_write_timer); + post_write_timer.expires = 0; + post_write_timer.data = 0; + post_write_timer.function = post_write; + + for (i = 0; i < 128; i++) + mfmdecode[i]=255; + for (i = 0; i < 16; i++) + mfmdecode[mfmencode[i]]=i; + + /* make sure that disk DMA is enabled */ + custom.dmacon = DMAF_SETCLR | DMAF_DISK; + + /* init ms timer */ + ciaa.crb = 8; /* one-shot, stop */ + return 0; + +out_probe: + blk_cleanup_queue(floppy_queue); +out_queue: + free_irq(IRQ_AMIGA_CIAA_TB, NULL); +out_irq2: + free_irq(IRQ_AMIGA_DSKBLK, NULL); +out_irq: + amiga_chip_free(raw_buf); +out_memregion: + release_mem_region(CUSTOM_PHYSADDR+0x20, 8); +out_blkdev: + unregister_blkdev(FLOPPY_MAJOR,"fd"); + return ret; +} + +module_init(amiga_floppy_init); +#ifdef MODULE + +#if 0 /* not safe to unload */ +void cleanup_module(void) +{ + int i; + + for( i = 0; i < FD_MAX_UNITS; i++) { + if (unit[i].type->code != FD_NODRIVE) { + del_gendisk(unit[i].gendisk); + put_disk(unit[i].gendisk); + kfree(unit[i].trackbuf); + } + } + blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); + free_irq(IRQ_AMIGA_CIAA_TB, NULL); + free_irq(IRQ_AMIGA_DSKBLK, NULL); + custom.dmacon = DMAF_DISK; /* disable DMA */ + amiga_chip_free(raw_buf); + blk_cleanup_queue(floppy_queue); + release_mem_region(CUSTOM_PHYSADDR+0x20, 8); + unregister_blkdev(FLOPPY_MAJOR, "fd"); +} +#endif + +#else +static int __init amiga_floppy_setup (char *str) +{ + int n; + if (!MACH_IS_AMIGA) + return 0; + if (!get_option(&str, &n)) + return 0; + printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n); + fd_def_df0 = n; + return 1; +} + +__setup("floppy=", amiga_floppy_setup); +#endif diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile new file mode 100644 index 0000000..e76d997 --- /dev/null +++ b/drivers/block/aoe/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for ATA over Ethernet +# + +obj-$(CONFIG_ATA_OVER_ETH) += aoe.o +aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h new file mode 100644 index 0000000..8e54c1f --- /dev/null +++ b/drivers/block/aoe/aoe.h @@ -0,0 +1,204 @@ +/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +#define VERSION "47" +#define AOE_MAJOR 152 +#define DEVICE_NAME "aoe" + +/* set AOE_PARTITIONS to 1 to use whole-disks only + * default is 16, which is 15 partitions plus the whole disk + */ +#ifndef AOE_PARTITIONS +#define AOE_PARTITIONS (16) +#endif + +#define SYSMINOR(aoemajor, aoeminor) ((aoemajor) * NPERSHELF + (aoeminor)) +#define AOEMAJOR(sysminor) ((sysminor) / NPERSHELF) +#define AOEMINOR(sysminor) ((sysminor) % NPERSHELF) +#define WHITESPACE " \t\v\f\n" + +enum { + AOECMD_ATA, + AOECMD_CFG, + AOECMD_VEND_MIN = 0xf0, + + AOEFL_RSP = (1<<3), + AOEFL_ERR = (1<<2), + + AOEAFL_EXT = (1<<6), + AOEAFL_DEV = (1<<4), + AOEAFL_ASYNC = (1<<1), + AOEAFL_WRITE = (1<<0), + + AOECCMD_READ = 0, + AOECCMD_TEST, + AOECCMD_PTEST, + AOECCMD_SET, + AOECCMD_FSET, + + AOE_HVER = 0x10, +}; + +struct aoe_hdr { + unsigned char dst[6]; + unsigned char src[6]; + __be16 type; + unsigned char verfl; + unsigned char err; + __be16 major; + unsigned char minor; + unsigned char cmd; + __be32 tag; +}; + +struct aoe_atahdr { + unsigned char aflags; + unsigned char errfeat; + unsigned char scnt; + unsigned char cmdstat; + unsigned char lba0; + unsigned char lba1; + unsigned char lba2; + unsigned char lba3; + unsigned char lba4; + unsigned char lba5; + unsigned char res[2]; +}; + +struct aoe_cfghdr { + __be16 bufcnt; + __be16 fwver; + unsigned char scnt; + unsigned char aoeccmd; + unsigned char cslen[2]; +}; + +enum { + DEVFL_UP = 1, /* device is installed in system and ready for AoE->ATA commands */ + DEVFL_TKILL = (1<<1), /* flag for timer to know when to kill self */ + DEVFL_EXT = (1<<2), /* device accepts lba48 commands */ + DEVFL_CLOSEWAIT = (1<<3), /* device is waiting for all closes to revalidate */ + DEVFL_GDALLOC = (1<<4), /* need to alloc gendisk */ + DEVFL_KICKME = (1<<5), /* slow polling network card catch */ + DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */ + + BUFFL_FAIL = 1, +}; + +enum { + DEFAULTBCNT = 2 * 512, /* 2 sectors */ + NPERSHELF = 16, /* number of slots per shelf address */ + FREETAG = -1, + MIN_BUFS = 16, + NTARGETS = 8, + NAOEIFS = 8, + NSKBPOOLMAX = 128, + + TIMERTICK = HZ / 10, + MINTIMER = HZ >> 2, + MAXTIMER = HZ << 1, + HELPWAIT = 20, +}; + +struct buf { + struct list_head bufs; + ulong stime; /* for disk stats */ + ulong flags; + ulong nframesout; + ulong resid; + ulong bv_resid; + ulong bv_off; + sector_t sector; + struct bio *bio; + struct bio_vec *bv; +}; + +struct frame { + int tag; + ulong waited; + struct buf *buf; + char *bufaddr; + ulong bcnt; + sector_t lba; + struct sk_buff *skb; +}; + +struct aoeif { + struct net_device *nd; + unsigned char lost; + unsigned char lostjumbo; + ushort maxbcnt; +}; + +struct aoetgt { + unsigned char addr[6]; + ushort nframes; + struct frame *frames; + struct aoeif ifs[NAOEIFS]; + struct aoeif *ifp; /* current aoeif in use */ + ushort nout; + ushort maxout; + u16 lasttag; /* last tag sent */ + u16 useme; + ulong lastwadj; /* last window adjustment */ + int wpkts, rpkts; + int dataref; +}; + +struct aoedev { + struct aoedev *next; + ulong sysminor; + ulong aoemajor; + u16 aoeminor; + u16 flags; + u16 nopen; /* (bd_openers isn't available without sleeping) */ + u16 rttavg; /* round trip average of requests/responses */ + u16 mintimer; + u16 fw_ver; /* version of blade's firmware */ + struct work_struct work;/* disk create work struct */ + struct gendisk *gd; + struct request_queue blkq; + struct hd_geometry geo; + sector_t ssize; + struct timer_list timer; + spinlock_t lock; + struct sk_buff_head sendq; + struct sk_buff_head skbpool; + mempool_t *bufpool; /* for deadlock-free Buf allocation */ + struct list_head bufq; /* queue of bios to work on */ + struct buf *inprocess; /* the one we're currently working on */ + struct aoetgt *targets[NTARGETS]; + struct aoetgt **tgt; /* target in use when working */ + struct aoetgt **htgt; /* target needing rexmit assistance */ +}; + + +int aoeblk_init(void); +void aoeblk_exit(void); +void aoeblk_gdalloc(void *); +void aoedisk_rm_sysfs(struct aoedev *d); + +int aoechr_init(void); +void aoechr_exit(void); +void aoechr_error(char *); + +void aoecmd_work(struct aoedev *d); +void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); +void aoecmd_ata_rsp(struct sk_buff *); +void aoecmd_cfg_rsp(struct sk_buff *); +void aoecmd_sleepwork(struct work_struct *); +void aoecmd_cleanslate(struct aoedev *); +struct sk_buff *aoecmd_ata_id(struct aoedev *); + +int aoedev_init(void); +void aoedev_exit(void); +struct aoedev *aoedev_by_aoeaddr(int maj, int min); +struct aoedev *aoedev_by_sysminor_m(ulong sysminor); +void aoedev_downdev(struct aoedev *d); +int aoedev_flush(const char __user *str, size_t size); + +int aoenet_init(void); +void aoenet_exit(void); +void aoenet_xmit(struct sk_buff_head *); +int is_aoe_netif(struct net_device *ifp); +int set_aoe_iflist(const char __user *str, size_t size); + +unsigned long long mac_addr(char addr[6]); diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c new file mode 100644 index 0000000..1747dd2 --- /dev/null +++ b/drivers/block/aoe/aoeblk.c @@ -0,0 +1,317 @@ +/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* + * aoeblk.c + * block device routines + */ + +#include <linux/hdreg.h> +#include <linux/blkdev.h> +#include <linux/backing-dev.h> +#include <linux/fs.h> +#include <linux/ioctl.h> +#include <linux/genhd.h> +#include <linux/netdevice.h> +#include "aoe.h" + +static struct kmem_cache *buf_pool_cache; + +static ssize_t aoedisk_show_state(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct gendisk *disk = dev_to_disk(dev); + struct aoedev *d = disk->private_data; + + return snprintf(page, PAGE_SIZE, + "%s%s\n", + (d->flags & DEVFL_UP) ? "up" : "down", + (d->flags & DEVFL_KICKME) ? ",kickme" : + (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : ""); + /* I'd rather see nopen exported so we can ditch closewait */ +} +static ssize_t aoedisk_show_mac(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct gendisk *disk = dev_to_disk(dev); + struct aoedev *d = disk->private_data; + struct aoetgt *t = d->targets[0]; + + if (t == NULL) + return snprintf(page, PAGE_SIZE, "none\n"); + return snprintf(page, PAGE_SIZE, "%012llx\n", mac_addr(t->addr)); +} +static ssize_t aoedisk_show_netif(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct gendisk *disk = dev_to_disk(dev); + struct aoedev *d = disk->private_data; + struct net_device *nds[8], **nd, **nnd, **ne; + struct aoetgt **t, **te; + struct aoeif *ifp, *e; + char *p; + + memset(nds, 0, sizeof nds); + nd = nds; + ne = nd + ARRAY_SIZE(nds); + t = d->targets; + te = t + NTARGETS; + for (; t < te && *t; t++) { + ifp = (*t)->ifs; + e = ifp + NAOEIFS; + for (; ifp < e && ifp->nd; ifp++) { + for (nnd = nds; nnd < nd; nnd++) + if (*nnd == ifp->nd) + break; + if (nnd == nd && nd != ne) + *nd++ = ifp->nd; + } + } + + ne = nd; + nd = nds; + if (*nd == NULL) + return snprintf(page, PAGE_SIZE, "none\n"); + for (p = page; nd < ne; nd++) + p += snprintf(p, PAGE_SIZE - (p-page), "%s%s", + p == page ? "" : ",", (*nd)->name); + p += snprintf(p, PAGE_SIZE - (p-page), "\n"); + return p-page; +} +/* firmware version */ +static ssize_t aoedisk_show_fwver(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct gendisk *disk = dev_to_disk(dev); + struct aoedev *d = disk->private_data; + + return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver); +} + +static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL); +static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL); +static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL); +static struct device_attribute dev_attr_firmware_version = { + .attr = { .name = "firmware-version", .mode = S_IRUGO }, + .show = aoedisk_show_fwver, +}; + +static struct attribute *aoe_attrs[] = { + &dev_attr_state.attr, + &dev_attr_mac.attr, + &dev_attr_netif.attr, + &dev_attr_firmware_version.attr, + NULL, +}; + +static const struct attribute_group attr_group = { + .attrs = aoe_attrs, +}; + +static int +aoedisk_add_sysfs(struct aoedev *d) +{ + return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group); +} +void +aoedisk_rm_sysfs(struct aoedev *d) +{ + sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group); +} + +static int +aoeblk_open(struct block_device *bdev, fmode_t mode) +{ + struct aoedev *d = bdev->bd_disk->private_data; + ulong flags; + + spin_lock_irqsave(&d->lock, flags); + if (d->flags & DEVFL_UP) { + d->nopen++; + spin_unlock_irqrestore(&d->lock, flags); + return 0; + } + spin_unlock_irqrestore(&d->lock, flags); + return -ENODEV; +} + +static int +aoeblk_release(struct gendisk *disk, fmode_t mode) +{ + struct aoedev *d = disk->private_data; + ulong flags; + + spin_lock_irqsave(&d->lock, flags); + + if (--d->nopen == 0) { + spin_unlock_irqrestore(&d->lock, flags); + aoecmd_cfg(d->aoemajor, d->aoeminor); + return 0; + } + spin_unlock_irqrestore(&d->lock, flags); + + return 0; +} + +static int +aoeblk_make_request(struct request_queue *q, struct bio *bio) +{ + struct sk_buff_head queue; + struct aoedev *d; + struct buf *buf; + ulong flags; + + blk_queue_bounce(q, &bio); + + if (bio == NULL) { + printk(KERN_ERR "aoe: bio is NULL\n"); + BUG(); + return 0; + } + d = bio->bi_bdev->bd_disk->private_data; + if (d == NULL) { + printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); + BUG(); + bio_endio(bio, -ENXIO); + return 0; + } else if (bio->bi_io_vec == NULL) { + printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); + BUG(); + bio_endio(bio, -ENXIO); + return 0; + } + buf = mempool_alloc(d->bufpool, GFP_NOIO); + if (buf == NULL) { + printk(KERN_INFO "aoe: buf allocation failure\n"); + bio_endio(bio, -ENOMEM); + return 0; + } + memset(buf, 0, sizeof(*buf)); + INIT_LIST_HEAD(&buf->bufs); + buf->stime = jiffies; + buf->bio = bio; + buf->resid = bio->bi_size; + buf->sector = bio->bi_sector; + buf->bv = &bio->bi_io_vec[bio->bi_idx]; + buf->bv_resid = buf->bv->bv_len; + WARN_ON(buf->bv_resid == 0); + buf->bv_off = buf->bv->bv_offset; + + spin_lock_irqsave(&d->lock, flags); + + if ((d->flags & DEVFL_UP) == 0) { + printk(KERN_INFO "aoe: device %ld.%d is not up\n", + d->aoemajor, d->aoeminor); + spin_unlock_irqrestore(&d->lock, flags); + mempool_free(buf, d->bufpool); + bio_endio(bio, -ENXIO); + return 0; + } + + list_add_tail(&buf->bufs, &d->bufq); + + aoecmd_work(d); + __skb_queue_head_init(&queue); + skb_queue_splice_init(&d->sendq, &queue); + + spin_unlock_irqrestore(&d->lock, flags); + aoenet_xmit(&queue); + + return 0; +} + +static int +aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct aoedev *d = bdev->bd_disk->private_data; + + if ((d->flags & DEVFL_UP) == 0) { + printk(KERN_ERR "aoe: disk not up\n"); + return -ENODEV; + } + + geo->cylinders = d->geo.cylinders; + geo->heads = d->geo.heads; + geo->sectors = d->geo.sectors; + return 0; +} + +static struct block_device_operations aoe_bdops = { + .open = aoeblk_open, + .release = aoeblk_release, + .getgeo = aoeblk_getgeo, + .owner = THIS_MODULE, +}; + +/* alloc_disk and add_disk can sleep */ +void +aoeblk_gdalloc(void *vp) +{ + struct aoedev *d = vp; + struct gendisk *gd; + ulong flags; + + gd = alloc_disk(AOE_PARTITIONS); + if (gd == NULL) { + printk(KERN_ERR + "aoe: cannot allocate disk structure for %ld.%d\n", + d->aoemajor, d->aoeminor); + goto err; + } + + d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); + if (d->bufpool == NULL) { + printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n", + d->aoemajor, d->aoeminor); + goto err_disk; + } + + blk_queue_make_request(&d->blkq, aoeblk_make_request); + if (bdi_init(&d->blkq.backing_dev_info)) + goto err_mempool; + spin_lock_irqsave(&d->lock, flags); + gd->major = AOE_MAJOR; + gd->first_minor = d->sysminor * AOE_PARTITIONS; + gd->fops = &aoe_bdops; + gd->private_data = d; + set_capacity(gd, d->ssize); + snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", + d->aoemajor, d->aoeminor); + + gd->queue = &d->blkq; + d->gd = gd; + d->flags &= ~DEVFL_GDALLOC; + d->flags |= DEVFL_UP; + + spin_unlock_irqrestore(&d->lock, flags); + + add_disk(gd); + aoedisk_add_sysfs(d); + return; + +err_mempool: + mempool_destroy(d->bufpool); +err_disk: + put_disk(gd); +err: + spin_lock_irqsave(&d->lock, flags); + d->flags &= ~DEVFL_GDALLOC; + spin_unlock_irqrestore(&d->lock, flags); +} + +void +aoeblk_exit(void) +{ + kmem_cache_destroy(buf_pool_cache); +} + +int __init +aoeblk_init(void) +{ + buf_pool_cache = kmem_cache_create("aoe_bufs", + sizeof(struct buf), + 0, 0, NULL); + if (buf_pool_cache == NULL) + return -ENOMEM; + + return 0; +} + diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c new file mode 100644 index 0000000..200efc4 --- /dev/null +++ b/drivers/block/aoe/aoechr.c @@ -0,0 +1,304 @@ +/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* + * aoechr.c + * AoE character device driver + */ + +#include <linux/hdreg.h> +#include <linux/blkdev.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/smp_lock.h> +#include <linux/skbuff.h> +#include "aoe.h" + +enum { + //MINOR_STAT = 1, (moved to sysfs) + MINOR_ERR = 2, + MINOR_DISCOVER, + MINOR_INTERFACES, + MINOR_REVALIDATE, + MINOR_FLUSH, + MSGSZ = 2048, + NMSG = 100, /* message backlog to retain */ +}; + +struct aoe_chardev { + ulong minor; + char name[32]; +}; + +enum { EMFL_VALID = 1 }; + +struct ErrMsg { + short flags; + short len; + char *msg; +}; + +static struct ErrMsg emsgs[NMSG]; +static int emsgs_head_idx, emsgs_tail_idx; +static struct completion emsgs_comp; +static spinlock_t emsgs_lock; +static int nblocked_emsgs_readers; +static struct class *aoe_class; +static struct aoe_chardev chardevs[] = { + { MINOR_ERR, "err" }, + { MINOR_DISCOVER, "discover" }, + { MINOR_INTERFACES, "interfaces" }, + { MINOR_REVALIDATE, "revalidate" }, + { MINOR_FLUSH, "flush" }, +}; + +static int +discover(void) +{ + aoecmd_cfg(0xffff, 0xff); + return 0; +} + +static int +interfaces(const char __user *str, size_t size) +{ + if (set_aoe_iflist(str, size)) { + printk(KERN_ERR + "aoe: could not set interface list: too many interfaces\n"); + return -EINVAL; + } + return 0; +} + +static int +revalidate(const char __user *str, size_t size) +{ + int major, minor, n; + ulong flags; + struct aoedev *d; + struct sk_buff *skb; + char buf[16]; + + if (size >= sizeof buf) + return -EINVAL; + buf[sizeof buf - 1] = '\0'; + if (copy_from_user(buf, str, size)) + return -EFAULT; + + /* should be e%d.%d format */ + n = sscanf(buf, "e%d.%d", &major, &minor); + if (n != 2) { + printk(KERN_ERR "aoe: invalid device specification\n"); + return -EINVAL; + } + d = aoedev_by_aoeaddr(major, minor); + if (!d) + return -EINVAL; + spin_lock_irqsave(&d->lock, flags); + aoecmd_cleanslate(d); +loop: + skb = aoecmd_ata_id(d); + spin_unlock_irqrestore(&d->lock, flags); + /* try again if we are able to sleep a bit, + * otherwise give up this revalidation + */ + if (!skb && !msleep_interruptible(200)) { + spin_lock_irqsave(&d->lock, flags); + goto loop; + } + if (skb) { + struct sk_buff_head queue; + __skb_queue_head_init(&queue); + __skb_queue_tail(&queue, skb); + aoenet_xmit(&queue); + } + aoecmd_cfg(major, minor); + return 0; +} + +void +aoechr_error(char *msg) +{ + struct ErrMsg *em; + char *mp; + ulong flags, n; + + n = strlen(msg); + + spin_lock_irqsave(&emsgs_lock, flags); + + em = emsgs + emsgs_tail_idx; + if ((em->flags & EMFL_VALID)) { +bail: spin_unlock_irqrestore(&emsgs_lock, flags); + return; + } + + mp = kmalloc(n, GFP_ATOMIC); + if (mp == NULL) { + printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n); + goto bail; + } + + memcpy(mp, msg, n); + em->msg = mp; + em->flags |= EMFL_VALID; + em->len = n; + + emsgs_tail_idx++; + emsgs_tail_idx %= ARRAY_SIZE(emsgs); + + spin_unlock_irqrestore(&emsgs_lock, flags); + + if (nblocked_emsgs_readers) + complete(&emsgs_comp); +} + +static ssize_t +aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp) +{ + int ret = -EINVAL; + + switch ((unsigned long) filp->private_data) { + default: + printk(KERN_INFO "aoe: can't write to that file.\n"); + break; + case MINOR_DISCOVER: + ret = discover(); + break; + case MINOR_INTERFACES: + ret = interfaces(buf, cnt); + break; + case MINOR_REVALIDATE: + ret = revalidate(buf, cnt); + break; + case MINOR_FLUSH: + ret = aoedev_flush(buf, cnt); + } + if (ret == 0) + ret = cnt; + return ret; +} + +static int +aoechr_open(struct inode *inode, struct file *filp) +{ + int n, i; + + lock_kernel(); + n = iminor(inode); + filp->private_data = (void *) (unsigned long) n; + + for (i = 0; i < ARRAY_SIZE(chardevs); ++i) + if (chardevs[i].minor == n) { + unlock_kernel(); + return 0; + } + unlock_kernel(); + return -EINVAL; +} + +static int +aoechr_rel(struct inode *inode, struct file *filp) +{ + return 0; +} + +static ssize_t +aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off) +{ + unsigned long n; + char *mp; + struct ErrMsg *em; + ssize_t len; + ulong flags; + + n = (unsigned long) filp->private_data; + if (n != MINOR_ERR) + return -EFAULT; + + spin_lock_irqsave(&emsgs_lock, flags); + + for (;;) { + em = emsgs + emsgs_head_idx; + if ((em->flags & EMFL_VALID) != 0) + break; + if (filp->f_flags & O_NDELAY) { + spin_unlock_irqrestore(&emsgs_lock, flags); + return -EAGAIN; + } + nblocked_emsgs_readers++; + + spin_unlock_irqrestore(&emsgs_lock, flags); + + n = wait_for_completion_interruptible(&emsgs_comp); + + spin_lock_irqsave(&emsgs_lock, flags); + + nblocked_emsgs_readers--; + + if (n) { + spin_unlock_irqrestore(&emsgs_lock, flags); + return -ERESTARTSYS; + } + } + if (em->len > cnt) { + spin_unlock_irqrestore(&emsgs_lock, flags); + return -EAGAIN; + } + mp = em->msg; + len = em->len; + em->msg = NULL; + em->flags &= ~EMFL_VALID; + + emsgs_head_idx++; + emsgs_head_idx %= ARRAY_SIZE(emsgs); + + spin_unlock_irqrestore(&emsgs_lock, flags); + + n = copy_to_user(buf, mp, len); + kfree(mp); + return n == 0 ? len : -EFAULT; +} + +static const struct file_operations aoe_fops = { + .write = aoechr_write, + .read = aoechr_read, + .open = aoechr_open, + .release = aoechr_rel, + .owner = THIS_MODULE, +}; + +int __init +aoechr_init(void) +{ + int n, i; + + n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops); + if (n < 0) { + printk(KERN_ERR "aoe: can't register char device\n"); + return n; + } + init_completion(&emsgs_comp); + spin_lock_init(&emsgs_lock); + aoe_class = class_create(THIS_MODULE, "aoe"); + if (IS_ERR(aoe_class)) { + unregister_chrdev(AOE_MAJOR, "aoechr"); + return PTR_ERR(aoe_class); + } + for (i = 0; i < ARRAY_SIZE(chardevs); ++i) + device_create(aoe_class, NULL, + MKDEV(AOE_MAJOR, chardevs[i].minor), NULL, + chardevs[i].name); + + return 0; +} + +void +aoechr_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(chardevs); ++i) + device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor)); + class_destroy(aoe_class); + unregister_chrdev(AOE_MAJOR, "aoechr"); +} + diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c new file mode 100644 index 0000000..71ff78c --- /dev/null +++ b/drivers/block/aoe/aoecmd.c @@ -0,0 +1,1086 @@ +/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* + * aoecmd.c + * Filesystem request handling methods + */ + +#include <linux/hdreg.h> +#include <linux/blkdev.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/genhd.h> +#include <linux/moduleparam.h> +#include <net/net_namespace.h> +#include <asm/unaligned.h> +#include "aoe.h" + +static int aoe_deadsecs = 60 * 3; +module_param(aoe_deadsecs, int, 0644); +MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev."); + +static int aoe_maxout = 16; +module_param(aoe_maxout, int, 0644); +MODULE_PARM_DESC(aoe_maxout, + "Only aoe_maxout outstanding packets for every MAC on eX.Y."); + +static struct sk_buff * +new_skb(ulong len) +{ + struct sk_buff *skb; + + skb = alloc_skb(len, GFP_ATOMIC); + if (skb) { + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->protocol = __constant_htons(ETH_P_AOE); + skb->priority = 0; + skb->next = skb->prev = NULL; + + /* tell the network layer not to perform IP checksums + * or to get the NIC to do it + */ + skb->ip_summed = CHECKSUM_NONE; + } + return skb; +} + +static struct frame * +getframe(struct aoetgt *t, int tag) +{ + struct frame *f, *e; + + f = t->frames; + e = f + t->nframes; + for (; f<e; f++) + if (f->tag == tag) + return f; + return NULL; +} + +/* + * Leave the top bit clear so we have tagspace for userland. + * The bottom 16 bits are the xmit tick for rexmit/rttavg processing. + * This driver reserves tag -1 to mean "unused frame." + */ +static int +newtag(struct aoetgt *t) +{ + register ulong n; + + n = jiffies & 0xffff; + return n |= (++t->lasttag & 0x7fff) << 16; +} + +static int +aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h) +{ + u32 host_tag = newtag(t); + + memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); + memcpy(h->dst, t->addr, sizeof h->dst); + h->type = __constant_cpu_to_be16(ETH_P_AOE); + h->verfl = AOE_HVER; + h->major = cpu_to_be16(d->aoemajor); + h->minor = d->aoeminor; + h->cmd = AOECMD_ATA; + h->tag = cpu_to_be32(host_tag); + + return host_tag; +} + +static inline void +put_lba(struct aoe_atahdr *ah, sector_t lba) +{ + ah->lba0 = lba; + ah->lba1 = lba >>= 8; + ah->lba2 = lba >>= 8; + ah->lba3 = lba >>= 8; + ah->lba4 = lba >>= 8; + ah->lba5 = lba >>= 8; +} + +static void +ifrotate(struct aoetgt *t) +{ + t->ifp++; + if (t->ifp >= &t->ifs[NAOEIFS] || t->ifp->nd == NULL) + t->ifp = t->ifs; + if (t->ifp->nd == NULL) { + printk(KERN_INFO "aoe: no interface to rotate to\n"); + BUG(); + } +} + +static void +skb_pool_put(struct aoedev *d, struct sk_buff *skb) +{ + __skb_queue_tail(&d->skbpool, skb); +} + +static struct sk_buff * +skb_pool_get(struct aoedev *d) +{ + struct sk_buff *skb = skb_peek(&d->skbpool); + + if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) { + __skb_unlink(skb, &d->skbpool); + return skb; + } + if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX && + (skb = new_skb(ETH_ZLEN))) + return skb; + + return NULL; +} + +/* freeframe is where we do our load balancing so it's a little hairy. */ +static struct frame * +freeframe(struct aoedev *d) +{ + struct frame *f, *e, *rf; + struct aoetgt **t; + struct sk_buff *skb; + + if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */ + printk(KERN_ERR "aoe: NULL TARGETS!\n"); + return NULL; + } + t = d->tgt; + t++; + if (t >= &d->targets[NTARGETS] || !*t) + t = d->targets; + for (;;) { + if ((*t)->nout < (*t)->maxout + && t != d->htgt + && (*t)->ifp->nd) { + rf = NULL; + f = (*t)->frames; + e = f + (*t)->nframes; + for (; f < e; f++) { + if (f->tag != FREETAG) + continue; + skb = f->skb; + if (!skb + && !(f->skb = skb = new_skb(ETH_ZLEN))) + continue; + if (atomic_read(&skb_shinfo(skb)->dataref) + != 1) { + if (!rf) + rf = f; + continue; + } +gotone: skb_shinfo(skb)->nr_frags = skb->data_len = 0; + skb_trim(skb, 0); + d->tgt = t; + ifrotate(*t); + return f; + } + /* Work can be done, but the network layer is + holding our precious packets. Try to grab + one from the pool. */ + f = rf; + if (f == NULL) { /* more paranoia */ + printk(KERN_ERR + "aoe: freeframe: %s.\n", + "unexpected null rf"); + d->flags |= DEVFL_KICKME; + return NULL; + } + skb = skb_pool_get(d); + if (skb) { + skb_pool_put(d, f->skb); + f->skb = skb; + goto gotone; + } + (*t)->dataref++; + if ((*t)->nout == 0) + d->flags |= DEVFL_KICKME; + } + if (t == d->tgt) /* we've looped and found nada */ + break; + t++; + if (t >= &d->targets[NTARGETS] || !*t) + t = d->targets; + } + return NULL; +} + +static int +aoecmd_ata_rw(struct aoedev *d) +{ + struct frame *f; + struct aoe_hdr *h; + struct aoe_atahdr *ah; + struct buf *buf; + struct bio_vec *bv; + struct aoetgt *t; + struct sk_buff *skb; + ulong bcnt; + char writebit, extbit; + + writebit = 0x10; + extbit = 0x4; + + f = freeframe(d); + if (f == NULL) + return 0; + t = *d->tgt; + buf = d->inprocess; + bv = buf->bv; + bcnt = t->ifp->maxbcnt; + if (bcnt == 0) + bcnt = DEFAULTBCNT; + if (bcnt > buf->bv_resid) + bcnt = buf->bv_resid; + /* initialize the headers & frame */ + skb = f->skb; + h = (struct aoe_hdr *) skb_mac_header(skb); + ah = (struct aoe_atahdr *) (h+1); + skb_put(skb, sizeof *h + sizeof *ah); + memset(h, 0, skb->len); + f->tag = aoehdr_atainit(d, t, h); + t->nout++; + f->waited = 0; + f->buf = buf; + f->bufaddr = page_address(bv->bv_page) + buf->bv_off; + f->bcnt = bcnt; + f->lba = buf->sector; + + /* set up ata header */ + ah->scnt = bcnt >> 9; + put_lba(ah, buf->sector); + if (d->flags & DEVFL_EXT) { + ah->aflags |= AOEAFL_EXT; + } else { + extbit = 0; + ah->lba3 &= 0x0f; + ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ + } + if (bio_data_dir(buf->bio) == WRITE) { + skb_fill_page_desc(skb, 0, bv->bv_page, buf->bv_off, bcnt); + ah->aflags |= AOEAFL_WRITE; + skb->len += bcnt; + skb->data_len = bcnt; + t->wpkts++; + } else { + t->rpkts++; + writebit = 0; + } + + ah->cmdstat = WIN_READ | writebit | extbit; + + /* mark all tracking fields and load out */ + buf->nframesout += 1; + buf->bv_off += bcnt; + buf->bv_resid -= bcnt; + buf->resid -= bcnt; + buf->sector += bcnt >> 9; + if (buf->resid == 0) { + d->inprocess = NULL; + } else if (buf->bv_resid == 0) { + buf->bv = ++bv; + buf->bv_resid = bv->bv_len; + WARN_ON(buf->bv_resid == 0); + buf->bv_off = bv->bv_offset; + } + + skb->dev = t->ifp->nd; + skb = skb_clone(skb, GFP_ATOMIC); + if (skb) + __skb_queue_tail(&d->sendq, skb); + return 1; +} + +/* some callers cannot sleep, and they can call this function, + * transmitting the packets later, when interrupts are on + */ +static void +aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue) +{ + struct aoe_hdr *h; + struct aoe_cfghdr *ch; + struct sk_buff *skb; + struct net_device *ifp; + + read_lock(&dev_base_lock); + for_each_netdev(&init_net, ifp) { + dev_hold(ifp); + if (!is_aoe_netif(ifp)) + goto cont; + + skb = new_skb(sizeof *h + sizeof *ch); + if (skb == NULL) { + printk(KERN_INFO "aoe: skb alloc failure\n"); + goto cont; + } + skb_put(skb, sizeof *h + sizeof *ch); + skb->dev = ifp; + __skb_queue_tail(queue, skb); + h = (struct aoe_hdr *) skb_mac_header(skb); + memset(h, 0, sizeof *h + sizeof *ch); + + memset(h->dst, 0xff, sizeof h->dst); + memcpy(h->src, ifp->dev_addr, sizeof h->src); + h->type = __constant_cpu_to_be16(ETH_P_AOE); + h->verfl = AOE_HVER; + h->major = cpu_to_be16(aoemajor); + h->minor = aoeminor; + h->cmd = AOECMD_CFG; + +cont: + dev_put(ifp); + } + read_unlock(&dev_base_lock); +} + +static void +resend(struct aoedev *d, struct aoetgt *t, struct frame *f) +{ + struct sk_buff *skb; + struct aoe_hdr *h; + struct aoe_atahdr *ah; + char buf[128]; + u32 n; + + ifrotate(t); + n = newtag(t); + skb = f->skb; + h = (struct aoe_hdr *) skb_mac_header(skb); + ah = (struct aoe_atahdr *) (h+1); + + snprintf(buf, sizeof buf, + "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x " + "s=%012llx d=%012llx nout=%d\n", + "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n, + mac_addr(h->src), + mac_addr(h->dst), t->nout); + aoechr_error(buf); + + f->tag = n; + h->tag = cpu_to_be32(n); + memcpy(h->dst, t->addr, sizeof h->dst); + memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); + + switch (ah->cmdstat) { + default: + break; + case WIN_READ: + case WIN_READ_EXT: + case WIN_WRITE: + case WIN_WRITE_EXT: + put_lba(ah, f->lba); + + n = f->bcnt; + if (n > DEFAULTBCNT) + n = DEFAULTBCNT; + ah->scnt = n >> 9; + if (ah->aflags & AOEAFL_WRITE) { + skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), + offset_in_page(f->bufaddr), n); + skb->len = sizeof *h + sizeof *ah + n; + skb->data_len = n; + } + } + skb->dev = t->ifp->nd; + skb = skb_clone(skb, GFP_ATOMIC); + if (skb == NULL) + return; + __skb_queue_tail(&d->sendq, skb); +} + +static int +tsince(int tag) +{ + int n; + + n = jiffies & 0xffff; + n -= tag & 0xffff; + if (n < 0) + n += 1<<16; + return n; +} + +static struct aoeif * +getif(struct aoetgt *t, struct net_device *nd) +{ + struct aoeif *p, *e; + + p = t->ifs; + e = p + NAOEIFS; + for (; p < e; p++) + if (p->nd == nd) + return p; + return NULL; +} + +static struct aoeif * +addif(struct aoetgt *t, struct net_device *nd) +{ + struct aoeif *p; + + p = getif(t, NULL); + if (!p) + return NULL; + p->nd = nd; + p->maxbcnt = DEFAULTBCNT; + p->lost = 0; + p->lostjumbo = 0; + return p; +} + +static void +ejectif(struct aoetgt *t, struct aoeif *ifp) +{ + struct aoeif *e; + ulong n; + + e = t->ifs + NAOEIFS - 1; + n = (e - ifp) * sizeof *ifp; + memmove(ifp, ifp+1, n); + e->nd = NULL; +} + +static int +sthtith(struct aoedev *d) +{ + struct frame *f, *e, *nf; + struct sk_buff *skb; + struct aoetgt *ht = *d->htgt; + + f = ht->frames; + e = f + ht->nframes; + for (; f < e; f++) { + if (f->tag == FREETAG) + continue; + nf = freeframe(d); + if (!nf) + return 0; + skb = nf->skb; + *nf = *f; + f->skb = skb; + f->tag = FREETAG; + nf->waited = 0; + ht->nout--; + (*d->tgt)->nout++; + resend(d, *d->tgt, nf); + } + /* he's clean, he's useless. take away his interfaces */ + memset(ht->ifs, 0, sizeof ht->ifs); + d->htgt = NULL; + return 1; +} + +static inline unsigned char +ata_scnt(unsigned char *packet) { + struct aoe_hdr *h; + struct aoe_atahdr *ah; + + h = (struct aoe_hdr *) packet; + ah = (struct aoe_atahdr *) (h+1); + return ah->scnt; +} + +static void +rexmit_timer(ulong vp) +{ + struct sk_buff_head queue; + struct aoedev *d; + struct aoetgt *t, **tt, **te; + struct aoeif *ifp; + struct frame *f, *e; + register long timeout; + ulong flags, n; + + d = (struct aoedev *) vp; + + /* timeout is always ~150% of the moving average */ + timeout = d->rttavg; + timeout += timeout >> 1; + + spin_lock_irqsave(&d->lock, flags); + + if (d->flags & DEVFL_TKILL) { + spin_unlock_irqrestore(&d->lock, flags); + return; + } + tt = d->targets; + te = tt + NTARGETS; + for (; tt < te && *tt; tt++) { + t = *tt; + f = t->frames; + e = f + t->nframes; + for (; f < e; f++) { + if (f->tag == FREETAG + || tsince(f->tag) < timeout) + continue; + n = f->waited += timeout; + n /= HZ; + if (n > aoe_deadsecs) { + /* waited too long. device failure. */ + aoedev_downdev(d); + break; + } + + if (n > HELPWAIT /* see if another target can help */ + && (tt != d->targets || d->targets[1])) + d->htgt = tt; + + if (t->nout == t->maxout) { + if (t->maxout > 1) + t->maxout--; + t->lastwadj = jiffies; + } + + ifp = getif(t, f->skb->dev); + if (ifp && ++ifp->lost > (t->nframes << 1) + && (ifp != t->ifs || t->ifs[1].nd)) { + ejectif(t, ifp); + ifp = NULL; + } + + if (ata_scnt(skb_mac_header(f->skb)) > DEFAULTBCNT / 512 + && ifp && ++ifp->lostjumbo > (t->nframes << 1) + && ifp->maxbcnt != DEFAULTBCNT) { + printk(KERN_INFO + "aoe: e%ld.%d: " + "too many lost jumbo on " + "%s:%012llx - " + "falling back to %d frames.\n", + d->aoemajor, d->aoeminor, + ifp->nd->name, mac_addr(t->addr), + DEFAULTBCNT); + ifp->maxbcnt = 0; + } + resend(d, t, f); + } + + /* window check */ + if (t->nout == t->maxout + && t->maxout < t->nframes + && (jiffies - t->lastwadj)/HZ > 10) { + t->maxout++; + t->lastwadj = jiffies; + } + } + + if (!skb_queue_empty(&d->sendq)) { + n = d->rttavg <<= 1; + if (n > MAXTIMER) + d->rttavg = MAXTIMER; + } + + if (d->flags & DEVFL_KICKME || d->htgt) { + d->flags &= ~DEVFL_KICKME; + aoecmd_work(d); + } + + __skb_queue_head_init(&queue); + skb_queue_splice_init(&d->sendq, &queue); + + d->timer.expires = jiffies + TIMERTICK; + add_timer(&d->timer); + + spin_unlock_irqrestore(&d->lock, flags); + + aoenet_xmit(&queue); +} + +/* enters with d->lock held */ +void +aoecmd_work(struct aoedev *d) +{ + struct buf *buf; +loop: + if (d->htgt && !sthtith(d)) + return; + if (d->inprocess == NULL) { + if (list_empty(&d->bufq)) + return; + buf = container_of(d->bufq.next, struct buf, bufs); + list_del(d->bufq.next); + d->inprocess = buf; + } + if (aoecmd_ata_rw(d)) + goto loop; +} + +/* this function performs work that has been deferred until sleeping is OK + */ +void +aoecmd_sleepwork(struct work_struct *work) +{ + struct aoedev *d = container_of(work, struct aoedev, work); + + if (d->flags & DEVFL_GDALLOC) + aoeblk_gdalloc(d); + + if (d->flags & DEVFL_NEWSIZE) { + struct block_device *bd; + unsigned long flags; + u64 ssize; + + ssize = get_capacity(d->gd); + bd = bdget_disk(d->gd, 0); + + if (bd) { + mutex_lock(&bd->bd_inode->i_mutex); + i_size_write(bd->bd_inode, (loff_t)ssize<<9); + mutex_unlock(&bd->bd_inode->i_mutex); + bdput(bd); + } + spin_lock_irqsave(&d->lock, flags); + d->flags |= DEVFL_UP; + d->flags &= ~DEVFL_NEWSIZE; + spin_unlock_irqrestore(&d->lock, flags); + } +} + +static void +ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id) +{ + u64 ssize; + u16 n; + + /* word 83: command set supported */ + n = get_unaligned_le16(&id[83 << 1]); + + /* word 86: command set/feature enabled */ + n |= get_unaligned_le16(&id[86 << 1]); + + if (n & (1<<10)) { /* bit 10: LBA 48 */ + d->flags |= DEVFL_EXT; + + /* word 100: number lba48 sectors */ + ssize = get_unaligned_le64(&id[100 << 1]); + + /* set as in ide-disk.c:init_idedisk_capacity */ + d->geo.cylinders = ssize; + d->geo.cylinders /= (255 * 63); + d->geo.heads = 255; + d->geo.sectors = 63; + } else { + d->flags &= ~DEVFL_EXT; + + /* number lba28 sectors */ + ssize = get_unaligned_le32(&id[60 << 1]); + + /* NOTE: obsolete in ATA 6 */ + d->geo.cylinders = get_unaligned_le16(&id[54 << 1]); + d->geo.heads = get_unaligned_le16(&id[55 << 1]); + d->geo.sectors = get_unaligned_le16(&id[56 << 1]); + } + + if (d->ssize != ssize) + printk(KERN_INFO + "aoe: %012llx e%ld.%d v%04x has %llu sectors\n", + mac_addr(t->addr), + d->aoemajor, d->aoeminor, + d->fw_ver, (long long)ssize); + d->ssize = ssize; + d->geo.start = 0; + if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) + return; + if (d->gd != NULL) { + set_capacity(d->gd, ssize); + d->flags |= DEVFL_NEWSIZE; + } else + d->flags |= DEVFL_GDALLOC; + schedule_work(&d->work); +} + +static void +calc_rttavg(struct aoedev *d, int rtt) +{ + register long n; + + n = rtt; + if (n < 0) { + n = -rtt; + if (n < MINTIMER) + n = MINTIMER; + else if (n > MAXTIMER) + n = MAXTIMER; + d->mintimer += (n - d->mintimer) >> 1; + } else if (n < d->mintimer) + n = d->mintimer; + else if (n > MAXTIMER) + n = MAXTIMER; + + /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */ + n -= d->rttavg; + d->rttavg += n >> 2; +} + +static struct aoetgt * +gettgt(struct aoedev *d, char *addr) +{ + struct aoetgt **t, **e; + + t = d->targets; + e = t + NTARGETS; + for (; t < e && *t; t++) + if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0) + return *t; + return NULL; +} + +static inline void +diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector) +{ + unsigned long n_sect = bio->bi_size >> 9; + const int rw = bio_data_dir(bio); + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = disk_map_sector_rcu(disk, sector); + + part_stat_inc(cpu, part, ios[rw]); + part_stat_add(cpu, part, ticks[rw], duration); + part_stat_add(cpu, part, sectors[rw], n_sect); + part_stat_add(cpu, part, io_ticks, duration); + + part_stat_unlock(); +} + +void +aoecmd_ata_rsp(struct sk_buff *skb) +{ + struct sk_buff_head queue; + struct aoedev *d; + struct aoe_hdr *hin, *hout; + struct aoe_atahdr *ahin, *ahout; + struct frame *f; + struct buf *buf; + struct aoetgt *t; + struct aoeif *ifp; + register long n; + ulong flags; + char ebuf[128]; + u16 aoemajor; + + hin = (struct aoe_hdr *) skb_mac_header(skb); + aoemajor = get_unaligned_be16(&hin->major); + d = aoedev_by_aoeaddr(aoemajor, hin->minor); + if (d == NULL) { + snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response " + "for unknown device %d.%d\n", + aoemajor, hin->minor); + aoechr_error(ebuf); + return; + } + + spin_lock_irqsave(&d->lock, flags); + + n = get_unaligned_be32(&hin->tag); + t = gettgt(d, hin->src); + if (t == NULL) { + printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n", + d->aoemajor, d->aoeminor, mac_addr(hin->src)); + spin_unlock_irqrestore(&d->lock, flags); + return; + } + f = getframe(t, n); + if (f == NULL) { + calc_rttavg(d, -tsince(n)); + spin_unlock_irqrestore(&d->lock, flags); + snprintf(ebuf, sizeof ebuf, + "%15s e%d.%d tag=%08x@%08lx\n", + "unexpected rsp", + get_unaligned_be16(&hin->major), + hin->minor, + get_unaligned_be32(&hin->tag), + jiffies); + aoechr_error(ebuf); + return; + } + + calc_rttavg(d, tsince(f->tag)); + + ahin = (struct aoe_atahdr *) (hin+1); + hout = (struct aoe_hdr *) skb_mac_header(f->skb); + ahout = (struct aoe_atahdr *) (hout+1); + buf = f->buf; + + if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */ + printk(KERN_ERR + "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n", + ahout->cmdstat, ahin->cmdstat, + d->aoemajor, d->aoeminor); + if (buf) + buf->flags |= BUFFL_FAIL; + } else { + if (d->htgt && t == *d->htgt) /* I'll help myself, thank you. */ + d->htgt = NULL; + n = ahout->scnt << 9; + switch (ahout->cmdstat) { + case WIN_READ: + case WIN_READ_EXT: + if (skb->len - sizeof *hin - sizeof *ahin < n) { + printk(KERN_ERR + "aoe: %s. skb->len=%d need=%ld\n", + "runt data size in read", skb->len, n); + /* fail frame f? just returning will rexmit. */ + spin_unlock_irqrestore(&d->lock, flags); + return; + } + memcpy(f->bufaddr, ahin+1, n); + case WIN_WRITE: + case WIN_WRITE_EXT: + ifp = getif(t, skb->dev); + if (ifp) { + ifp->lost = 0; + if (n > DEFAULTBCNT) + ifp->lostjumbo = 0; + } + if (f->bcnt -= n) { + f->lba += n >> 9; + f->bufaddr += n; + resend(d, t, f); + goto xmit; + } + break; + case WIN_IDENTIFY: + if (skb->len - sizeof *hin - sizeof *ahin < 512) { + printk(KERN_INFO + "aoe: runt data size in ataid. skb->len=%d\n", + skb->len); + spin_unlock_irqrestore(&d->lock, flags); + return; + } + ataid_complete(d, t, (char *) (ahin+1)); + break; + default: + printk(KERN_INFO + "aoe: unrecognized ata command %2.2Xh for %d.%d\n", + ahout->cmdstat, + get_unaligned_be16(&hin->major), + hin->minor); + } + } + + if (buf && --buf->nframesout == 0 && buf->resid == 0) { + diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector); + n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; + bio_endio(buf->bio, n); + mempool_free(buf, d->bufpool); + } + + f->buf = NULL; + f->tag = FREETAG; + t->nout--; + + aoecmd_work(d); +xmit: + __skb_queue_head_init(&queue); + skb_queue_splice_init(&d->sendq, &queue); + + spin_unlock_irqrestore(&d->lock, flags); + aoenet_xmit(&queue); +} + +void +aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) +{ + struct sk_buff_head queue; + + __skb_queue_head_init(&queue); + aoecmd_cfg_pkts(aoemajor, aoeminor, &queue); + aoenet_xmit(&queue); +} + +struct sk_buff * +aoecmd_ata_id(struct aoedev *d) +{ + struct aoe_hdr *h; + struct aoe_atahdr *ah; + struct frame *f; + struct sk_buff *skb; + struct aoetgt *t; + + f = freeframe(d); + if (f == NULL) + return NULL; + + t = *d->tgt; + + /* initialize the headers & frame */ + skb = f->skb; + h = (struct aoe_hdr *) skb_mac_header(skb); + ah = (struct aoe_atahdr *) (h+1); + skb_put(skb, sizeof *h + sizeof *ah); + memset(h, 0, skb->len); + f->tag = aoehdr_atainit(d, t, h); + t->nout++; + f->waited = 0; + + /* set up ata header */ + ah->scnt = 1; + ah->cmdstat = WIN_IDENTIFY; + ah->lba3 = 0xa0; + + skb->dev = t->ifp->nd; + + d->rttavg = MAXTIMER; + d->timer.function = rexmit_timer; + + return skb_clone(skb, GFP_ATOMIC); +} + +static struct aoetgt * +addtgt(struct aoedev *d, char *addr, ulong nframes) +{ + struct aoetgt *t, **tt, **te; + struct frame *f, *e; + + tt = d->targets; + te = tt + NTARGETS; + for (; tt < te && *tt; tt++) + ; + + if (tt == te) { + printk(KERN_INFO + "aoe: device addtgt failure; too many targets\n"); + return NULL; + } + t = kcalloc(1, sizeof *t, GFP_ATOMIC); + f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); + if (!t || !f) { + kfree(f); + kfree(t); + printk(KERN_INFO "aoe: cannot allocate memory to add target\n"); + return NULL; + } + + t->nframes = nframes; + t->frames = f; + e = f + nframes; + for (; f < e; f++) + f->tag = FREETAG; + memcpy(t->addr, addr, sizeof t->addr); + t->ifp = t->ifs; + t->maxout = t->nframes; + return *tt = t; +} + +void +aoecmd_cfg_rsp(struct sk_buff *skb) +{ + struct aoedev *d; + struct aoe_hdr *h; + struct aoe_cfghdr *ch; + struct aoetgt *t; + struct aoeif *ifp; + ulong flags, sysminor, aoemajor; + struct sk_buff *sl; + u16 n; + + h = (struct aoe_hdr *) skb_mac_header(skb); + ch = (struct aoe_cfghdr *) (h+1); + + /* + * Enough people have their dip switches set backwards to + * warrant a loud message for this special case. + */ + aoemajor = get_unaligned_be16(&h->major); + if (aoemajor == 0xfff) { + printk(KERN_ERR "aoe: Warning: shelf address is all ones. " + "Check shelf dip switches.\n"); + return; + } + + sysminor = SYSMINOR(aoemajor, h->minor); + if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) { + printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n", + aoemajor, (int) h->minor); + return; + } + + n = be16_to_cpu(ch->bufcnt); + if (n > aoe_maxout) /* keep it reasonable */ + n = aoe_maxout; + + d = aoedev_by_sysminor_m(sysminor); + if (d == NULL) { + printk(KERN_INFO "aoe: device sysminor_m failure\n"); + return; + } + + spin_lock_irqsave(&d->lock, flags); + + t = gettgt(d, h->src); + if (!t) { + t = addtgt(d, h->src, n); + if (!t) { + spin_unlock_irqrestore(&d->lock, flags); + return; + } + } + ifp = getif(t, skb->dev); + if (!ifp) { + ifp = addif(t, skb->dev); + if (!ifp) { + printk(KERN_INFO + "aoe: device addif failure; " + "too many interfaces?\n"); + spin_unlock_irqrestore(&d->lock, flags); + return; + } + } + if (ifp->maxbcnt) { + n = ifp->nd->mtu; + n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr); + n /= 512; + if (n > ch->scnt) + n = ch->scnt; + n = n ? n * 512 : DEFAULTBCNT; + if (n != ifp->maxbcnt) { + printk(KERN_INFO + "aoe: e%ld.%d: setting %d%s%s:%012llx\n", + d->aoemajor, d->aoeminor, n, + " byte data frames on ", ifp->nd->name, + mac_addr(t->addr)); + ifp->maxbcnt = n; + } + } + + /* don't change users' perspective */ + if (d->nopen) { + spin_unlock_irqrestore(&d->lock, flags); + return; + } + d->fw_ver = be16_to_cpu(ch->fwver); + + sl = aoecmd_ata_id(d); + + spin_unlock_irqrestore(&d->lock, flags); + + if (sl) { + struct sk_buff_head queue; + __skb_queue_head_init(&queue); + __skb_queue_tail(&queue, sl); + aoenet_xmit(&queue); + } +} + +void +aoecmd_cleanslate(struct aoedev *d) +{ + struct aoetgt **t, **te; + struct aoeif *p, *e; + + d->mintimer = MINTIMER; + + t = d->targets; + te = t + NTARGETS; + for (; t < te && *t; t++) { + (*t)->maxout = (*t)->nframes; + p = (*t)->ifs; + e = p + NAOEIFS; + for (; p < e; p++) { + p->lostjumbo = 0; + p->lost = 0; + p->maxbcnt = DEFAULTBCNT; + } + } +} diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c new file mode 100644 index 0000000..cc25057 --- /dev/null +++ b/drivers/block/aoe/aoedev.c @@ -0,0 +1,277 @@ +/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* + * aoedev.c + * AoE device utility functions; maintains device list. + */ + +#include <linux/hdreg.h> +#include <linux/blkdev.h> +#include <linux/netdevice.h> +#include <linux/delay.h> +#include "aoe.h" + +static void dummy_timer(ulong); +static void aoedev_freedev(struct aoedev *); +static void freetgt(struct aoedev *d, struct aoetgt *t); +static void skbpoolfree(struct aoedev *d); + +static struct aoedev *devlist; +static DEFINE_SPINLOCK(devlist_lock); + +struct aoedev * +aoedev_by_aoeaddr(int maj, int min) +{ + struct aoedev *d; + ulong flags; + + spin_lock_irqsave(&devlist_lock, flags); + + for (d=devlist; d; d=d->next) + if (d->aoemajor == maj && d->aoeminor == min) + break; + + spin_unlock_irqrestore(&devlist_lock, flags); + return d; +} + +static void +dummy_timer(ulong vp) +{ + struct aoedev *d; + + d = (struct aoedev *)vp; + if (d->flags & DEVFL_TKILL) + return; + d->timer.expires = jiffies + HZ; + add_timer(&d->timer); +} + +void +aoedev_downdev(struct aoedev *d) +{ + struct aoetgt **t, **te; + struct frame *f, *e; + struct buf *buf; + struct bio *bio; + + t = d->targets; + te = t + NTARGETS; + for (; t < te && *t; t++) { + f = (*t)->frames; + e = f + (*t)->nframes; + for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) { + if (f->tag == FREETAG || f->buf == NULL) + continue; + buf = f->buf; + bio = buf->bio; + if (--buf->nframesout == 0 + && buf != d->inprocess) { + mempool_free(buf, d->bufpool); + bio_endio(bio, -EIO); + } + } + (*t)->maxout = (*t)->nframes; + (*t)->nout = 0; + } + buf = d->inprocess; + if (buf) { + bio = buf->bio; + mempool_free(buf, d->bufpool); + bio_endio(bio, -EIO); + } + d->inprocess = NULL; + d->htgt = NULL; + + while (!list_empty(&d->bufq)) { + buf = container_of(d->bufq.next, struct buf, bufs); + list_del(d->bufq.next); + bio = buf->bio; + mempool_free(buf, d->bufpool); + bio_endio(bio, -EIO); + } + + if (d->gd) + set_capacity(d->gd, 0); + + d->flags &= ~DEVFL_UP; +} + +static void +aoedev_freedev(struct aoedev *d) +{ + struct aoetgt **t, **e; + + if (d->gd) { + aoedisk_rm_sysfs(d); + del_gendisk(d->gd); + put_disk(d->gd); + } + t = d->targets; + e = t + NTARGETS; + for (; t < e && *t; t++) + freetgt(d, *t); + if (d->bufpool) + mempool_destroy(d->bufpool); + skbpoolfree(d); + kfree(d); +} + +int +aoedev_flush(const char __user *str, size_t cnt) +{ + ulong flags; + struct aoedev *d, **dd; + struct aoedev *rmd = NULL; + char buf[16]; + int all = 0; + + if (cnt >= 3) { + if (cnt > sizeof buf) + cnt = sizeof buf; + if (copy_from_user(buf, str, cnt)) + return -EFAULT; + all = !strncmp(buf, "all", 3); + } + + flush_scheduled_work(); + spin_lock_irqsave(&devlist_lock, flags); + dd = &devlist; + while ((d = *dd)) { + spin_lock(&d->lock); + if ((!all && (d->flags & DEVFL_UP)) + || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) + || d->nopen) { + spin_unlock(&d->lock); + dd = &d->next; + continue; + } + *dd = d->next; + aoedev_downdev(d); + d->flags |= DEVFL_TKILL; + spin_unlock(&d->lock); + d->next = rmd; + rmd = d; + } + spin_unlock_irqrestore(&devlist_lock, flags); + while ((d = rmd)) { + rmd = d->next; + del_timer_sync(&d->timer); + aoedev_freedev(d); /* must be able to sleep */ + } + return 0; +} + +/* I'm not really sure that this is a realistic problem, but if the +network driver goes gonzo let's just leak memory after complaining. */ +static void +skbfree(struct sk_buff *skb) +{ + enum { Sms = 100, Tms = 3*1000}; + int i = Tms / Sms; + + if (skb == NULL) + return; + while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0) + msleep(Sms); + if (i <= 0) { + printk(KERN_ERR + "aoe: %s holds ref: %s\n", + skb->dev ? skb->dev->name : "netif", + "cannot free skb -- memory leaked."); + return; + } + skb_shinfo(skb)->nr_frags = skb->data_len = 0; + skb_trim(skb, 0); + dev_kfree_skb(skb); +} + +static void +skbpoolfree(struct aoedev *d) +{ + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(&d->skbpool, skb, tmp) + skbfree(skb); + + __skb_queue_head_init(&d->skbpool); +} + +/* find it or malloc it */ +struct aoedev * +aoedev_by_sysminor_m(ulong sysminor) +{ + struct aoedev *d; + ulong flags; + + spin_lock_irqsave(&devlist_lock, flags); + + for (d=devlist; d; d=d->next) + if (d->sysminor == sysminor) + break; + if (d) + goto out; + d = kcalloc(1, sizeof *d, GFP_ATOMIC); + if (!d) + goto out; + INIT_WORK(&d->work, aoecmd_sleepwork); + spin_lock_init(&d->lock); + skb_queue_head_init(&d->sendq); + skb_queue_head_init(&d->skbpool); + init_timer(&d->timer); + d->timer.data = (ulong) d; + d->timer.function = dummy_timer; + d->timer.expires = jiffies + HZ; + add_timer(&d->timer); + d->bufpool = NULL; /* defer to aoeblk_gdalloc */ + d->tgt = d->targets; + INIT_LIST_HEAD(&d->bufq); + d->sysminor = sysminor; + d->aoemajor = AOEMAJOR(sysminor); + d->aoeminor = AOEMINOR(sysminor); + d->mintimer = MINTIMER; + d->next = devlist; + devlist = d; + out: + spin_unlock_irqrestore(&devlist_lock, flags); + return d; +} + +static void +freetgt(struct aoedev *d, struct aoetgt *t) +{ + struct frame *f, *e; + + f = t->frames; + e = f + t->nframes; + for (; f < e; f++) + skbfree(f->skb); + kfree(t->frames); + kfree(t); +} + +void +aoedev_exit(void) +{ + struct aoedev *d; + ulong flags; + + flush_scheduled_work(); + + while ((d = devlist)) { + devlist = d->next; + + spin_lock_irqsave(&d->lock, flags); + aoedev_downdev(d); + d->flags |= DEVFL_TKILL; + spin_unlock_irqrestore(&d->lock, flags); + + del_timer_sync(&d->timer); + aoedev_freedev(d); + } +} + +int __init +aoedev_init(void) +{ + return 0; +} diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c new file mode 100644 index 0000000..7f83ad9 --- /dev/null +++ b/drivers/block/aoe/aoemain.c @@ -0,0 +1,111 @@ +/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* + * aoemain.c + * Module initialization routines, discover timer + */ + +#include <linux/hdreg.h> +#include <linux/blkdev.h> +#include <linux/module.h> +#include <linux/skbuff.h> +#include "aoe.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sam Hopkins <sah@coraid.com>"); +MODULE_DESCRIPTION("AoE block/char driver for 2.6.2 and newer 2.6 kernels"); +MODULE_VERSION(VERSION); + +enum { TINIT, TRUN, TKILL }; + +static void +discover_timer(ulong vp) +{ + static struct timer_list t; + static volatile ulong die; + static spinlock_t lock; + ulong flags; + enum { DTIMERTICK = HZ * 60 }; /* one minute */ + + switch (vp) { + case TINIT: + init_timer(&t); + spin_lock_init(&lock); + t.data = TRUN; + t.function = discover_timer; + die = 0; + case TRUN: + spin_lock_irqsave(&lock, flags); + if (!die) { + t.expires = jiffies + DTIMERTICK; + add_timer(&t); + } + spin_unlock_irqrestore(&lock, flags); + + aoecmd_cfg(0xffff, 0xff); + return; + case TKILL: + spin_lock_irqsave(&lock, flags); + die = 1; + spin_unlock_irqrestore(&lock, flags); + + del_timer_sync(&t); + default: + return; + } +} + +static void +aoe_exit(void) +{ + discover_timer(TKILL); + + aoenet_exit(); + unregister_blkdev(AOE_MAJOR, DEVICE_NAME); + aoechr_exit(); + aoedev_exit(); + aoeblk_exit(); /* free cache after de-allocating bufs */ +} + +static int __init +aoe_init(void) +{ + int ret; + + ret = aoedev_init(); + if (ret) + return ret; + ret = aoechr_init(); + if (ret) + goto chr_fail; + ret = aoeblk_init(); + if (ret) + goto blk_fail; + ret = aoenet_init(); + if (ret) + goto net_fail; + ret = register_blkdev(AOE_MAJOR, DEVICE_NAME); + if (ret < 0) { + printk(KERN_ERR "aoe: can't register major\n"); + goto blkreg_fail; + } + + printk(KERN_INFO "aoe: AoE v%s initialised.\n", VERSION); + discover_timer(TINIT); + return 0; + + blkreg_fail: + aoenet_exit(); + net_fail: + aoeblk_exit(); + blk_fail: + aoechr_exit(); + chr_fail: + aoedev_exit(); + + printk(KERN_INFO "aoe: initialisation failure.\n"); + return ret; +} + +module_init(aoe_init); +module_exit(aoe_exit); + diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c new file mode 100644 index 0000000..b67629a --- /dev/null +++ b/drivers/block/aoe/aoenet.c @@ -0,0 +1,182 @@ +/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* + * aoenet.c + * Ethernet portion of AoE driver + */ + +#include <linux/hdreg.h> +#include <linux/blkdev.h> +#include <linux/netdevice.h> +#include <linux/moduleparam.h> +#include <net/net_namespace.h> +#include <asm/unaligned.h> +#include "aoe.h" + +#define NECODES 5 + +static char *aoe_errlist[] = +{ + "no such error", + "unrecognized command code", + "bad argument parameter", + "device unavailable", + "config string present", + "unsupported version" +}; + +enum { + IFLISTSZ = 1024, +}; + +static char aoe_iflist[IFLISTSZ]; +module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600); +MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\""); + +#ifndef MODULE +static int __init aoe_iflist_setup(char *str) +{ + strncpy(aoe_iflist, str, IFLISTSZ); + aoe_iflist[IFLISTSZ - 1] = '\0'; + return 1; +} + +__setup("aoe_iflist=", aoe_iflist_setup); +#endif + +int +is_aoe_netif(struct net_device *ifp) +{ + register char *p, *q; + register int len; + + if (aoe_iflist[0] == '\0') + return 1; + + p = aoe_iflist + strspn(aoe_iflist, WHITESPACE); + for (; *p; p = q + strspn(q, WHITESPACE)) { + q = p + strcspn(p, WHITESPACE); + if (q != p) + len = q - p; + else + len = strlen(p); /* last token in aoe_iflist */ + + if (strlen(ifp->name) == len && !strncmp(ifp->name, p, len)) + return 1; + if (q == p) + break; + } + + return 0; +} + +int +set_aoe_iflist(const char __user *user_str, size_t size) +{ + if (size >= IFLISTSZ) + return -EINVAL; + + if (copy_from_user(aoe_iflist, user_str, size)) { + printk(KERN_INFO "aoe: copy from user failed\n"); + return -EFAULT; + } + aoe_iflist[size] = 0x00; + return 0; +} + +unsigned long long +mac_addr(char addr[6]) +{ + __be64 n = 0; + char *p = (char *) &n; + + memcpy(p + 2, addr, 6); /* (sizeof addr != 6) */ + + return (unsigned long long) __be64_to_cpu(n); +} + +void +aoenet_xmit(struct sk_buff_head *queue) +{ + struct sk_buff *skb, *tmp; + + skb_queue_walk_safe(queue, skb, tmp) { + __skb_unlink(skb, queue); + dev_queue_xmit(skb); + } +} + +/* + * (1) len doesn't include the header by default. I want this. + */ +static int +aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev) +{ + struct aoe_hdr *h; + u32 n; + + if (dev_net(ifp) != &init_net) + goto exit; + + skb = skb_share_check(skb, GFP_ATOMIC); + if (skb == NULL) + return 0; + if (skb_linearize(skb)) + goto exit; + if (!is_aoe_netif(ifp)) + goto exit; + skb_push(skb, ETH_HLEN); /* (1) */ + + h = (struct aoe_hdr *) skb_mac_header(skb); + n = get_unaligned_be32(&h->tag); + if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) + goto exit; + + if (h->verfl & AOEFL_ERR) { + n = h->err; + if (n > NECODES) + n = 0; + if (net_ratelimit()) + printk(KERN_ERR + "%s%d.%d@%s; ecode=%d '%s'\n", + "aoe: error packet from ", + get_unaligned_be16(&h->major), + h->minor, skb->dev->name, + h->err, aoe_errlist[n]); + goto exit; + } + + switch (h->cmd) { + case AOECMD_ATA: + aoecmd_ata_rsp(skb); + break; + case AOECMD_CFG: + aoecmd_cfg_rsp(skb); + break; + default: + if (h->cmd >= AOECMD_VEND_MIN) + break; /* don't complain about vendor commands */ + printk(KERN_INFO "aoe: unknown cmd %d\n", h->cmd); + } +exit: + dev_kfree_skb(skb); + return 0; +} + +static struct packet_type aoe_pt = { + .type = __constant_htons(ETH_P_AOE), + .func = aoenet_rcv, +}; + +int __init +aoenet_init(void) +{ + dev_add_pack(&aoe_pt); + return 0; +} + +void +aoenet_exit(void) +{ + dev_remove_pack(&aoe_pt); +} + diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c new file mode 100644 index 0000000..69e1df7 --- /dev/null +++ b/drivers/block/ataflop.c @@ -0,0 +1,2006 @@ +/* + * drivers/block/ataflop.c + * + * Copyright (C) 1993 Greg Harp + * Atari Support by Bjoern Brauel, Roman Hodek + * + * Big cleanup Sep 11..14 1994 Roman Hodek: + * - Driver now works interrupt driven + * - Support for two drives; should work, but I cannot test that :-( + * - Reading is done in whole tracks and buffered to speed up things + * - Disk change detection and drive deselecting after motor-off + * similar to TOS + * - Autodetection of disk format (DD/HD); untested yet, because I + * don't have an HD drive :-( + * + * Fixes Nov 13 1994 Martin Schaller: + * - Autodetection works now + * - Support for 5 1/4'' disks + * - Removed drive type (unknown on atari) + * - Do seeks with 8 Mhz + * + * Changes by Andreas Schwab: + * - After errors in multiple read mode try again reading single sectors + * (Feb 1995): + * - Clean up error handling + * - Set blk_size for proper size checking + * - Initialize track register when testing presence of floppy + * - Implement some ioctl's + * + * Changes by Torsten Lang: + * - When probing the floppies we should add the FDCCMDADD_H flag since + * the FDC will otherwise wait forever when no disk is inserted... + * + * ++ Freddi Aschwanden (fa) 20.9.95 fixes for medusa: + * - MFPDELAY() after each FDC access -> atari + * - more/other disk formats + * - DMA to the block buffer directly if we have a 32bit DMA + * - for medusa, the step rate is always 3ms + * - on medusa, use only cache_push() + * Roman: + * - Make disk format numbering independent from minors + * - Let user set max. supported drive type (speeds up format + * detection, saves buffer space) + * + * Roman 10/15/95: + * - implement some more ioctls + * - disk formatting + * + * Andreas 95/12/12: + * - increase gap size at start of track for HD/ED disks + * + * Michael (MSch) 11/07/96: + * - implemented FDSETPRM and FDDEFPRM ioctl + * + * Andreas (97/03/19): + * - implemented missing BLK* ioctls + * + * Things left to do: + * - Formatting + * - Maybe a better strategy for disk change detection (does anyone + * know one?) + */ + +#include <linux/module.h> + +#include <linux/fd.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/blkdev.h> + +#include <asm/atafd.h> +#include <asm/atafdreg.h> +#include <asm/atariints.h> +#include <asm/atari_stdma.h> +#include <asm/atari_stram.h> + +#define FD_MAX_UNITS 2 + +#undef DEBUG + +static struct request_queue *floppy_queue; + +#define QUEUE (floppy_queue) +#define CURRENT elv_next_request(floppy_queue) + +/* Disk types: DD, HD, ED */ +static struct atari_disk_type { + const char *name; + unsigned spt; /* sectors per track */ + unsigned blocks; /* total number of blocks */ + unsigned fdc_speed; /* fdc_speed setting */ + unsigned stretch; /* track doubling ? */ +} atari_disk_type[] = { + { "d360", 9, 720, 0, 0}, /* 0: 360kB diskette */ + { "D360", 9, 720, 0, 1}, /* 1: 360kb in 720k or 1.2MB drive */ + { "D720", 9,1440, 0, 0}, /* 2: 720kb in 720k or 1.2MB drive */ + { "D820", 10,1640, 0, 0}, /* 3: DD disk with 82 tracks/10 sectors */ +/* formats above are probed for type DD */ +#define MAX_TYPE_DD 3 + { "h1200",15,2400, 3, 0}, /* 4: 1.2MB diskette */ + { "H1440",18,2880, 3, 0}, /* 5: 1.4 MB diskette (HD) */ + { "H1640",20,3280, 3, 0}, /* 6: 1.64MB diskette (fat HD) 82 tr 20 sec */ +/* formats above are probed for types DD and HD */ +#define MAX_TYPE_HD 6 + { "E2880",36,5760, 3, 0}, /* 7: 2.8 MB diskette (ED) */ + { "E3280",40,6560, 3, 0}, /* 8: 3.2 MB diskette (fat ED) 82 tr 40 sec */ +/* formats above are probed for types DD, HD and ED */ +#define MAX_TYPE_ED 8 +/* types below are never autoprobed */ + { "H1680",21,3360, 3, 0}, /* 9: 1.68MB diskette (fat HD) 80 tr 21 sec */ + { "h410",10,820, 0, 1}, /* 10: 410k diskette 41 tr 10 sec, stretch */ + { "h1476",18,2952, 3, 0}, /* 11: 1.48MB diskette 82 tr 18 sec */ + { "H1722",21,3444, 3, 0}, /* 12: 1.72MB diskette 82 tr 21 sec */ + { "h420",10,840, 0, 1}, /* 13: 420k diskette 42 tr 10 sec, stretch */ + { "H830",10,1660, 0, 0}, /* 14: 820k diskette 83 tr 10 sec */ + { "h1494",18,2952, 3, 0}, /* 15: 1.49MB diskette 83 tr 18 sec */ + { "H1743",21,3486, 3, 0}, /* 16: 1.74MB diskette 83 tr 21 sec */ + { "h880",11,1760, 0, 0}, /* 17: 880k diskette 80 tr 11 sec */ + { "D1040",13,2080, 0, 0}, /* 18: 1.04MB diskette 80 tr 13 sec */ + { "D1120",14,2240, 0, 0}, /* 19: 1.12MB diskette 80 tr 14 sec */ + { "h1600",20,3200, 3, 0}, /* 20: 1.60MB diskette 80 tr 20 sec */ + { "H1760",22,3520, 3, 0}, /* 21: 1.76MB diskette 80 tr 22 sec */ + { "H1920",24,3840, 3, 0}, /* 22: 1.92MB diskette 80 tr 24 sec */ + { "E3200",40,6400, 3, 0}, /* 23: 3.2MB diskette 80 tr 40 sec */ + { "E3520",44,7040, 3, 0}, /* 24: 3.52MB diskette 80 tr 44 sec */ + { "E3840",48,7680, 3, 0}, /* 25: 3.84MB diskette 80 tr 48 sec */ + { "H1840",23,3680, 3, 0}, /* 26: 1.84MB diskette 80 tr 23 sec */ + { "D800",10,1600, 0, 0}, /* 27: 800k diskette 80 tr 10 sec */ +}; + +static int StartDiskType[] = { + MAX_TYPE_DD, + MAX_TYPE_HD, + MAX_TYPE_ED +}; + +#define TYPE_DD 0 +#define TYPE_HD 1 +#define TYPE_ED 2 + +static int DriveType = TYPE_HD; + +static DEFINE_SPINLOCK(ataflop_lock); + +/* Array for translating minors into disk formats */ +static struct { + int index; + unsigned drive_types; +} minor2disktype[] = { + { 0, TYPE_DD }, /* 1: d360 */ + { 4, TYPE_HD }, /* 2: h1200 */ + { 1, TYPE_DD }, /* 3: D360 */ + { 2, TYPE_DD }, /* 4: D720 */ + { 1, TYPE_DD }, /* 5: h360 = D360 */ + { 2, TYPE_DD }, /* 6: h720 = D720 */ + { 5, TYPE_HD }, /* 7: H1440 */ + { 7, TYPE_ED }, /* 8: E2880 */ +/* some PC formats :-) */ + { 8, TYPE_ED }, /* 9: E3280 <- was "CompaQ" == E2880 for PC */ + { 5, TYPE_HD }, /* 10: h1440 = H1440 */ + { 9, TYPE_HD }, /* 11: H1680 */ + { 10, TYPE_DD }, /* 12: h410 */ + { 3, TYPE_DD }, /* 13: H820 <- == D820, 82x10 */ + { 11, TYPE_HD }, /* 14: h1476 */ + { 12, TYPE_HD }, /* 15: H1722 */ + { 13, TYPE_DD }, /* 16: h420 */ + { 14, TYPE_DD }, /* 17: H830 */ + { 15, TYPE_HD }, /* 18: h1494 */ + { 16, TYPE_HD }, /* 19: H1743 */ + { 17, TYPE_DD }, /* 20: h880 */ + { 18, TYPE_DD }, /* 21: D1040 */ + { 19, TYPE_DD }, /* 22: D1120 */ + { 20, TYPE_HD }, /* 23: h1600 */ + { 21, TYPE_HD }, /* 24: H1760 */ + { 22, TYPE_HD }, /* 25: H1920 */ + { 23, TYPE_ED }, /* 26: E3200 */ + { 24, TYPE_ED }, /* 27: E3520 */ + { 25, TYPE_ED }, /* 28: E3840 */ + { 26, TYPE_HD }, /* 29: H1840 */ + { 27, TYPE_DD }, /* 30: D800 */ + { 6, TYPE_HD }, /* 31: H1640 <- was H1600 == h1600 for PC */ +}; + +#define NUM_DISK_MINORS ARRAY_SIZE(minor2disktype) + +/* + * Maximum disk size (in kilobytes). This default is used whenever the + * current disk size is unknown. + */ +#define MAX_DISK_SIZE 3280 + +/* + * MSch: User-provided type information. 'drive' points to + * the respective entry of this array. Set by FDSETPRM ioctls. + */ +static struct atari_disk_type user_params[FD_MAX_UNITS]; + +/* + * User-provided permanent type information. 'drive' points to + * the respective entry of this array. Set by FDDEFPRM ioctls, + * restored upon disk change by floppy_revalidate() if valid (as seen by + * default_params[].blocks > 0 - a bit in unit[].flags might be used for this?) + */ +static struct atari_disk_type default_params[FD_MAX_UNITS]; + +/* current info on each unit */ +static struct atari_floppy_struct { + int connected; /* !=0 : drive is connected */ + int autoprobe; /* !=0 : do autoprobe */ + + struct atari_disk_type *disktype; /* current type of disk */ + + int track; /* current head position or -1 if + unknown */ + unsigned int steprate; /* steprate setting */ + unsigned int wpstat; /* current state of WP signal (for + disk change detection) */ + int flags; /* flags */ + struct gendisk *disk; + int ref; + int type; +} unit[FD_MAX_UNITS]; + +#define UD unit[drive] +#define UDT unit[drive].disktype +#define SUD unit[SelectedDrive] +#define SUDT unit[SelectedDrive].disktype + + +#define FDC_READ(reg) ({ \ + /* unsigned long __flags; */ \ + unsigned short __val; \ + /* local_irq_save(__flags); */ \ + dma_wd.dma_mode_status = 0x80 | (reg); \ + udelay(25); \ + __val = dma_wd.fdc_acces_seccount; \ + MFPDELAY(); \ + /* local_irq_restore(__flags); */ \ + __val & 0xff; \ +}) + +#define FDC_WRITE(reg,val) \ + do { \ + /* unsigned long __flags; */ \ + /* local_irq_save(__flags); */ \ + dma_wd.dma_mode_status = 0x80 | (reg); \ + udelay(25); \ + dma_wd.fdc_acces_seccount = (val); \ + MFPDELAY(); \ + /* local_irq_restore(__flags); */ \ + } while(0) + + +/* Buffering variables: + * First, there is a DMA buffer in ST-RAM that is used for floppy DMA + * operations. Second, a track buffer is used to cache a whole track + * of the disk to save read operations. These are two separate buffers + * because that allows write operations without clearing the track buffer. + */ + +static int MaxSectors[] = { + 11, 22, 44 +}; +static int BufferSize[] = { + 15*512, 30*512, 60*512 +}; + +#define BUFFER_SIZE (BufferSize[DriveType]) + +unsigned char *DMABuffer; /* buffer for writes */ +static unsigned long PhysDMABuffer; /* physical address */ + +static int UseTrackbuffer = -1; /* Do track buffering? */ +module_param(UseTrackbuffer, int, 0); + +unsigned char *TrackBuffer; /* buffer for reads */ +static unsigned long PhysTrackBuffer; /* physical address */ +static int BufferDrive, BufferSide, BufferTrack; +static int read_track; /* non-zero if we are reading whole tracks */ + +#define SECTOR_BUFFER(sec) (TrackBuffer + ((sec)-1)*512) +#define IS_BUFFERED(drive,side,track) \ + (BufferDrive == (drive) && BufferSide == (side) && BufferTrack == (track)) + +/* + * These are global variables, as that's the easiest way to give + * information to interrupts. They are the data used for the current + * request. + */ +static int SelectedDrive = 0; +static int ReqCmd, ReqBlock; +static int ReqSide, ReqTrack, ReqSector, ReqCnt; +static int HeadSettleFlag = 0; +static unsigned char *ReqData, *ReqBuffer; +static int MotorOn = 0, MotorOffTrys; +static int IsFormatting = 0, FormatError; + +static int UserSteprate[FD_MAX_UNITS] = { -1, -1 }; +module_param_array(UserSteprate, int, NULL, 0); + +/* Synchronization of FDC access. */ +static volatile int fdc_busy = 0; +static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); +static DECLARE_WAIT_QUEUE_HEAD(format_wait); + +static unsigned long changed_floppies = 0xff, fake_change = 0; +#define CHECK_CHANGE_DELAY HZ/2 + +#define FD_MOTOR_OFF_DELAY (3*HZ) +#define FD_MOTOR_OFF_MAXTRY (10*20) + +#define FLOPPY_TIMEOUT (6*HZ) +#define RECALIBRATE_ERRORS 4 /* After this many errors the drive + * will be recalibrated. */ +#define MAX_ERRORS 8 /* After this many errors the driver + * will give up. */ + + +/* + * The driver is trying to determine the correct media format + * while Probing is set. fd_rwsec_done() clears it after a + * successful access. + */ +static int Probing = 0; + +/* This flag is set when a dummy seek is necessary to make the WP + * status bit accessible. + */ +static int NeedSeek = 0; + + +#ifdef DEBUG +#define DPRINT(a) printk a +#else +#define DPRINT(a) +#endif + +/***************************** Prototypes *****************************/ + +static void fd_select_side( int side ); +static void fd_select_drive( int drive ); +static void fd_deselect( void ); +static void fd_motor_off_timer( unsigned long dummy ); +static void check_change( unsigned long dummy ); +static irqreturn_t floppy_irq (int irq, void *dummy); +static void fd_error( void ); +static int do_format(int drive, int type, struct atari_format_descr *desc); +static void do_fd_action( int drive ); +static void fd_calibrate( void ); +static void fd_calibrate_done( int status ); +static void fd_seek( void ); +static void fd_seek_done( int status ); +static void fd_rwsec( void ); +static void fd_readtrack_check( unsigned long dummy ); +static void fd_rwsec_done( int status ); +static void fd_rwsec_done1(int status); +static void fd_writetrack( void ); +static void fd_writetrack_done( int status ); +static void fd_times_out( unsigned long dummy ); +static void finish_fdc( void ); +static void finish_fdc_done( int dummy ); +static void setup_req_params( int drive ); +static void redo_fd_request( void); +static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int + cmd, unsigned long param); +static void fd_probe( int drive ); +static int fd_test_drive_present( int drive ); +static void config_types( void ); +static int floppy_open(struct block_device *bdev, fmode_t mode); +static int floppy_release(struct gendisk *disk, fmode_t mode); + +/************************* End of Prototypes **************************/ + +static DEFINE_TIMER(motor_off_timer, fd_motor_off_timer, 0, 0); +static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0); +static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); +static DEFINE_TIMER(fd_timer, check_change, 0, 0); + +static inline void start_motor_off_timer(void) +{ + mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY); + MotorOffTrys = 0; +} + +static inline void start_check_change_timer( void ) +{ + mod_timer(&fd_timer, jiffies + CHECK_CHANGE_DELAY); +} + +static inline void start_timeout(void) +{ + mod_timer(&timeout_timer, jiffies + FLOPPY_TIMEOUT); +} + +static inline void stop_timeout(void) +{ + del_timer(&timeout_timer); +} + +/* Select the side to use. */ + +static void fd_select_side( int side ) +{ + unsigned long flags; + + /* protect against various other ints mucking around with the PSG */ + local_irq_save(flags); + + sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */ + sound_ym.wd_data = (side == 0) ? sound_ym.rd_data_reg_sel | 0x01 : + sound_ym.rd_data_reg_sel & 0xfe; + + local_irq_restore(flags); +} + + +/* Select a drive, update the FDC's track register and set the correct + * clock speed for this disk's type. + */ + +static void fd_select_drive( int drive ) +{ + unsigned long flags; + unsigned char tmp; + + if (drive == SelectedDrive) + return; + + /* protect against various other ints mucking around with the PSG */ + local_irq_save(flags); + sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */ + tmp = sound_ym.rd_data_reg_sel; + sound_ym.wd_data = (tmp | DSKDRVNONE) & ~(drive == 0 ? DSKDRV0 : DSKDRV1); + atari_dont_touch_floppy_select = 1; + local_irq_restore(flags); + + /* restore track register to saved value */ + FDC_WRITE( FDCREG_TRACK, UD.track ); + udelay(25); + + /* select 8/16 MHz */ + if (UDT) + if (ATARIHW_PRESENT(FDCSPEED)) + dma_wd.fdc_speed = UDT->fdc_speed; + + SelectedDrive = drive; +} + + +/* Deselect both drives. */ + +static void fd_deselect( void ) +{ + unsigned long flags; + + /* protect against various other ints mucking around with the PSG */ + local_irq_save(flags); + atari_dont_touch_floppy_select = 0; + sound_ym.rd_data_reg_sel=14; /* Select PSG Port A */ + sound_ym.wd_data = (sound_ym.rd_data_reg_sel | + (MACH_IS_FALCON ? 3 : 7)); /* no drives selected */ + /* On Falcon, the drive B select line is used on the printer port, so + * leave it alone... */ + SelectedDrive = -1; + local_irq_restore(flags); +} + + +/* This timer function deselects the drives when the FDC switched the + * motor off. The deselection cannot happen earlier because the FDC + * counts the index signals, which arrive only if one drive is selected. + */ + +static void fd_motor_off_timer( unsigned long dummy ) +{ + unsigned char status; + + if (SelectedDrive < 0) + /* no drive selected, needn't deselect anyone */ + return; + + if (stdma_islocked()) + goto retry; + + status = FDC_READ( FDCREG_STATUS ); + + if (!(status & 0x80)) { + /* motor already turned off by FDC -> deselect drives */ + MotorOn = 0; + fd_deselect(); + return; + } + /* not yet off, try again */ + + retry: + /* Test again later; if tested too often, it seems there is no disk + * in the drive and the FDC will leave the motor on forever (or, + * at least until a disk is inserted). So we'll test only twice + * per second from then on... + */ + mod_timer(&motor_off_timer, + jiffies + (MotorOffTrys++ < FD_MOTOR_OFF_MAXTRY ? HZ/20 : HZ/2)); +} + + +/* This function is repeatedly called to detect disk changes (as good + * as possible) and keep track of the current state of the write protection. + */ + +static void check_change( unsigned long dummy ) +{ + static int drive = 0; + + unsigned long flags; + unsigned char old_porta; + int stat; + + if (++drive > 1 || !UD.connected) + drive = 0; + + /* protect against various other ints mucking around with the PSG */ + local_irq_save(flags); + + if (!stdma_islocked()) { + sound_ym.rd_data_reg_sel = 14; + old_porta = sound_ym.rd_data_reg_sel; + sound_ym.wd_data = (old_porta | DSKDRVNONE) & + ~(drive == 0 ? DSKDRV0 : DSKDRV1); + stat = !!(FDC_READ( FDCREG_STATUS ) & FDCSTAT_WPROT); + sound_ym.wd_data = old_porta; + + if (stat != UD.wpstat) { + DPRINT(( "wpstat[%d] = %d\n", drive, stat )); + UD.wpstat = stat; + set_bit (drive, &changed_floppies); + } + } + local_irq_restore(flags); + + start_check_change_timer(); +} + + +/* Handling of the Head Settling Flag: This flag should be set after each + * seek operation, because we don't use seeks with verify. + */ + +static inline void set_head_settle_flag(void) +{ + HeadSettleFlag = FDCCMDADD_E; +} + +static inline int get_head_settle_flag(void) +{ + int tmp = HeadSettleFlag; + HeadSettleFlag = 0; + return( tmp ); +} + +static inline void copy_buffer(void *from, void *to) +{ + ulong *p1 = (ulong *)from, *p2 = (ulong *)to; + int cnt; + + for (cnt = 512/4; cnt; cnt--) + *p2++ = *p1++; +} + + + + +/* General Interrupt Handling */ + +static void (*FloppyIRQHandler)( int status ) = NULL; + +static irqreturn_t floppy_irq (int irq, void *dummy) +{ + unsigned char status; + void (*handler)( int ); + + handler = xchg(&FloppyIRQHandler, NULL); + + if (handler) { + nop(); + status = FDC_READ( FDCREG_STATUS ); + DPRINT(("FDC irq, status = %02x handler = %08lx\n",status,(unsigned long)handler)); + handler( status ); + } + else { + DPRINT(("FDC irq, no handler\n")); + } + return IRQ_HANDLED; +} + + +/* Error handling: If some error happened, retry some times, then + * recalibrate, then try again, and fail after MAX_ERRORS. + */ + +static void fd_error( void ) +{ + if (IsFormatting) { + IsFormatting = 0; + FormatError = 1; + wake_up( &format_wait ); + return; + } + + if (!CURRENT) + return; + + CURRENT->errors++; + if (CURRENT->errors >= MAX_ERRORS) { + printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); + end_request(CURRENT, 0); + } + else if (CURRENT->errors == RECALIBRATE_ERRORS) { + printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); + if (SelectedDrive != -1) + SUD.track = -1; + } + redo_fd_request(); +} + + + +#define SET_IRQ_HANDLER(proc) do { FloppyIRQHandler = (proc); } while(0) + + +/* ---------- Formatting ---------- */ + +#define FILL(n,val) \ + do { \ + memset( p, val, n ); \ + p += n; \ + } while(0) + +static int do_format(int drive, int type, struct atari_format_descr *desc) +{ + unsigned char *p; + int sect, nsect; + unsigned long flags; + + DPRINT(("do_format( dr=%d tr=%d he=%d offs=%d )\n", + drive, desc->track, desc->head, desc->sect_offset )); + + local_irq_save(flags); + while( fdc_busy ) sleep_on( &fdc_wait ); + fdc_busy = 1; + stdma_lock(floppy_irq, NULL); + atari_turnon_irq( IRQ_MFP_FDC ); /* should be already, just to be sure */ + local_irq_restore(flags); + + if (type) { + if (--type >= NUM_DISK_MINORS || + minor2disktype[type].drive_types > DriveType) { + redo_fd_request(); + return -EINVAL; + } + type = minor2disktype[type].index; + UDT = &atari_disk_type[type]; + } + + if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) { + redo_fd_request(); + return -EINVAL; + } + + nsect = UDT->spt; + p = TrackBuffer; + /* The track buffer is used for the raw track data, so its + contents become invalid! */ + BufferDrive = -1; + /* stop deselect timer */ + del_timer( &motor_off_timer ); + + FILL( 60 * (nsect / 9), 0x4e ); + for( sect = 0; sect < nsect; ++sect ) { + FILL( 12, 0 ); + FILL( 3, 0xf5 ); + *p++ = 0xfe; + *p++ = desc->track; + *p++ = desc->head; + *p++ = (nsect + sect - desc->sect_offset) % nsect + 1; + *p++ = 2; + *p++ = 0xf7; + FILL( 22, 0x4e ); + FILL( 12, 0 ); + FILL( 3, 0xf5 ); + *p++ = 0xfb; + FILL( 512, 0xe5 ); + *p++ = 0xf7; + FILL( 40, 0x4e ); + } + FILL( TrackBuffer+BUFFER_SIZE-p, 0x4e ); + + IsFormatting = 1; + FormatError = 0; + ReqTrack = desc->track; + ReqSide = desc->head; + do_fd_action( drive ); + + sleep_on( &format_wait ); + + redo_fd_request(); + return( FormatError ? -EIO : 0 ); +} + + +/* do_fd_action() is the general procedure for a fd request: All + * required parameter settings (drive select, side select, track + * position) are checked and set if needed. For each of these + * parameters and the actual reading or writing exist two functions: + * one that starts the setting (or skips it if possible) and one + * callback for the "done" interrupt. Each done func calls the next + * set function to propagate the request down to fd_rwsec_done(). + */ + +static void do_fd_action( int drive ) +{ + DPRINT(("do_fd_action\n")); + + if (UseTrackbuffer && !IsFormatting) { + repeat: + if (IS_BUFFERED( drive, ReqSide, ReqTrack )) { + if (ReqCmd == READ) { + copy_buffer( SECTOR_BUFFER(ReqSector), ReqData ); + if (++ReqCnt < CURRENT->current_nr_sectors) { + /* read next sector */ + setup_req_params( drive ); + goto repeat; + } + else { + /* all sectors finished */ + CURRENT->nr_sectors -= CURRENT->current_nr_sectors; + CURRENT->sector += CURRENT->current_nr_sectors; + end_request(CURRENT, 1); + redo_fd_request(); + return; + } + } + else { + /* cmd == WRITE, pay attention to track buffer + * consistency! */ + copy_buffer( ReqData, SECTOR_BUFFER(ReqSector) ); + } + } + } + + if (SelectedDrive != drive) + fd_select_drive( drive ); + + if (UD.track == -1) + fd_calibrate(); + else if (UD.track != ReqTrack << UDT->stretch) + fd_seek(); + else if (IsFormatting) + fd_writetrack(); + else + fd_rwsec(); +} + + +/* Seek to track 0 if the current track is unknown */ + +static void fd_calibrate( void ) +{ + if (SUD.track >= 0) { + fd_calibrate_done( 0 ); + return; + } + + if (ATARIHW_PRESENT(FDCSPEED)) + dma_wd.fdc_speed = 0; /* always seek with 8 Mhz */; + DPRINT(("fd_calibrate\n")); + SET_IRQ_HANDLER( fd_calibrate_done ); + /* we can't verify, since the speed may be incorrect */ + FDC_WRITE( FDCREG_CMD, FDCCMD_RESTORE | SUD.steprate ); + + NeedSeek = 1; + MotorOn = 1; + start_timeout(); + /* wait for IRQ */ +} + + +static void fd_calibrate_done( int status ) +{ + DPRINT(("fd_calibrate_done()\n")); + stop_timeout(); + + /* set the correct speed now */ + if (ATARIHW_PRESENT(FDCSPEED)) + dma_wd.fdc_speed = SUDT->fdc_speed; + if (status & FDCSTAT_RECNF) { + printk(KERN_ERR "fd%d: restore failed\n", SelectedDrive ); + fd_error(); + } + else { + SUD.track = 0; + fd_seek(); + } +} + + +/* Seek the drive to the requested track. The drive must have been + * calibrated at some point before this. + */ + +static void fd_seek( void ) +{ + if (SUD.track == ReqTrack << SUDT->stretch) { + fd_seek_done( 0 ); + return; + } + + if (ATARIHW_PRESENT(FDCSPEED)) { + dma_wd.fdc_speed = 0; /* always seek witch 8 Mhz */ + MFPDELAY(); + } + + DPRINT(("fd_seek() to track %d\n",ReqTrack)); + FDC_WRITE( FDCREG_DATA, ReqTrack << SUDT->stretch); + udelay(25); + SET_IRQ_HANDLER( fd_seek_done ); + FDC_WRITE( FDCREG_CMD, FDCCMD_SEEK | SUD.steprate ); + + MotorOn = 1; + set_head_settle_flag(); + start_timeout(); + /* wait for IRQ */ +} + + +static void fd_seek_done( int status ) +{ + DPRINT(("fd_seek_done()\n")); + stop_timeout(); + + /* set the correct speed */ + if (ATARIHW_PRESENT(FDCSPEED)) + dma_wd.fdc_speed = SUDT->fdc_speed; + if (status & FDCSTAT_RECNF) { + printk(KERN_ERR "fd%d: seek error (to track %d)\n", + SelectedDrive, ReqTrack ); + /* we don't know exactly which track we are on now! */ + SUD.track = -1; + fd_error(); + } + else { + SUD.track = ReqTrack << SUDT->stretch; + NeedSeek = 0; + if (IsFormatting) + fd_writetrack(); + else + fd_rwsec(); + } +} + + +/* This does the actual reading/writing after positioning the head + * over the correct track. + */ + +static int MultReadInProgress = 0; + + +static void fd_rwsec( void ) +{ + unsigned long paddr, flags; + unsigned int rwflag, old_motoron; + unsigned int track; + + DPRINT(("fd_rwsec(), Sec=%d, Access=%c\n",ReqSector, ReqCmd == WRITE ? 'w' : 'r' )); + if (ReqCmd == WRITE) { + if (ATARIHW_PRESENT(EXTD_DMA)) { + paddr = virt_to_phys(ReqData); + } + else { + copy_buffer( ReqData, DMABuffer ); + paddr = PhysDMABuffer; + } + dma_cache_maintenance( paddr, 512, 1 ); + rwflag = 0x100; + } + else { + if (read_track) + paddr = PhysTrackBuffer; + else + paddr = ATARIHW_PRESENT(EXTD_DMA) ? + virt_to_phys(ReqData) : PhysDMABuffer; + rwflag = 0; + } + + fd_select_side( ReqSide ); + + /* Start sector of this operation */ + FDC_WRITE( FDCREG_SECTOR, read_track ? 1 : ReqSector ); + MFPDELAY(); + /* Cheat for track if stretch != 0 */ + if (SUDT->stretch) { + track = FDC_READ( FDCREG_TRACK); + MFPDELAY(); + FDC_WRITE( FDCREG_TRACK, track >> SUDT->stretch); + } + udelay(25); + + /* Setup DMA */ + local_irq_save(flags); + dma_wd.dma_lo = (unsigned char)paddr; + MFPDELAY(); + paddr >>= 8; + dma_wd.dma_md = (unsigned char)paddr; + MFPDELAY(); + paddr >>= 8; + if (ATARIHW_PRESENT(EXTD_DMA)) + st_dma_ext_dmahi = (unsigned short)paddr; + else + dma_wd.dma_hi = (unsigned char)paddr; + MFPDELAY(); + local_irq_restore(flags); + + /* Clear FIFO and switch DMA to correct mode */ + dma_wd.dma_mode_status = 0x90 | rwflag; + MFPDELAY(); + dma_wd.dma_mode_status = 0x90 | (rwflag ^ 0x100); + MFPDELAY(); + dma_wd.dma_mode_status = 0x90 | rwflag; + MFPDELAY(); + + /* How many sectors for DMA */ + dma_wd.fdc_acces_seccount = read_track ? SUDT->spt : 1; + + udelay(25); + + /* Start operation */ + dma_wd.dma_mode_status = FDCSELREG_STP | rwflag; + udelay(25); + SET_IRQ_HANDLER( fd_rwsec_done ); + dma_wd.fdc_acces_seccount = + (get_head_settle_flag() | + (rwflag ? FDCCMD_WRSEC : (FDCCMD_RDSEC | (read_track ? FDCCMDADD_M : 0)))); + + old_motoron = MotorOn; + MotorOn = 1; + NeedSeek = 1; + /* wait for interrupt */ + + if (read_track) { + /* If reading a whole track, wait about one disk rotation and + * then check if all sectors are read. The FDC will even + * search for the first non-existent sector and need 1 sec to + * recognise that it isn't present :-( + */ + MultReadInProgress = 1; + mod_timer(&readtrack_timer, + /* 1 rot. + 5 rot.s if motor was off */ + jiffies + HZ/5 + (old_motoron ? 0 : HZ)); + } + start_timeout(); +} + + +static void fd_readtrack_check( unsigned long dummy ) +{ + unsigned long flags, addr, addr2; + + local_irq_save(flags); + + if (!MultReadInProgress) { + /* This prevents a race condition that could arise if the + * interrupt is triggered while the calling of this timer + * callback function takes place. The IRQ function then has + * already cleared 'MultReadInProgress' when flow of control + * gets here. + */ + local_irq_restore(flags); + return; + } + + /* get the current DMA address */ + /* ++ f.a. read twice to avoid being fooled by switcher */ + addr = 0; + do { + addr2 = addr; + addr = dma_wd.dma_lo & 0xff; + MFPDELAY(); + addr |= (dma_wd.dma_md & 0xff) << 8; + MFPDELAY(); + if (ATARIHW_PRESENT( EXTD_DMA )) + addr |= (st_dma_ext_dmahi & 0xffff) << 16; + else + addr |= (dma_wd.dma_hi & 0xff) << 16; + MFPDELAY(); + } while(addr != addr2); + + if (addr >= PhysTrackBuffer + SUDT->spt*512) { + /* already read enough data, force an FDC interrupt to stop + * the read operation + */ + SET_IRQ_HANDLER( NULL ); + MultReadInProgress = 0; + local_irq_restore(flags); + DPRINT(("fd_readtrack_check(): done\n")); + FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI ); + udelay(25); + + /* No error until now -- the FDC would have interrupted + * otherwise! + */ + fd_rwsec_done1(0); + } + else { + /* not yet finished, wait another tenth rotation */ + local_irq_restore(flags); + DPRINT(("fd_readtrack_check(): not yet finished\n")); + mod_timer(&readtrack_timer, jiffies + HZ/5/10); + } +} + + +static void fd_rwsec_done( int status ) +{ + DPRINT(("fd_rwsec_done()\n")); + + if (read_track) { + del_timer(&readtrack_timer); + if (!MultReadInProgress) + return; + MultReadInProgress = 0; + } + fd_rwsec_done1(status); +} + +static void fd_rwsec_done1(int status) +{ + unsigned int track; + + stop_timeout(); + + /* Correct the track if stretch != 0 */ + if (SUDT->stretch) { + track = FDC_READ( FDCREG_TRACK); + MFPDELAY(); + FDC_WRITE( FDCREG_TRACK, track << SUDT->stretch); + } + + if (!UseTrackbuffer) { + dma_wd.dma_mode_status = 0x90; + MFPDELAY(); + if (!(dma_wd.dma_mode_status & 0x01)) { + printk(KERN_ERR "fd%d: DMA error\n", SelectedDrive ); + goto err_end; + } + } + MFPDELAY(); + + if (ReqCmd == WRITE && (status & FDCSTAT_WPROT)) { + printk(KERN_NOTICE "fd%d: is write protected\n", SelectedDrive ); + goto err_end; + } + if ((status & FDCSTAT_RECNF) && + /* RECNF is no error after a multiple read when the FDC + searched for a non-existent sector! */ + !(read_track && FDC_READ(FDCREG_SECTOR) > SUDT->spt)) { + if (Probing) { + if (SUDT > atari_disk_type) { + if (SUDT[-1].blocks > ReqBlock) { + /* try another disk type */ + SUDT--; + set_capacity(unit[SelectedDrive].disk, + SUDT->blocks); + } else + Probing = 0; + } + else { + if (SUD.flags & FTD_MSG) + printk(KERN_INFO "fd%d: Auto-detected floppy type %s\n", + SelectedDrive, SUDT->name ); + Probing=0; + } + } else { +/* record not found, but not probing. Maybe stretch wrong ? Restart probing */ + if (SUD.autoprobe) { + SUDT = atari_disk_type + StartDiskType[DriveType]; + set_capacity(unit[SelectedDrive].disk, + SUDT->blocks); + Probing = 1; + } + } + if (Probing) { + if (ATARIHW_PRESENT(FDCSPEED)) { + dma_wd.fdc_speed = SUDT->fdc_speed; + MFPDELAY(); + } + setup_req_params( SelectedDrive ); + BufferDrive = -1; + do_fd_action( SelectedDrive ); + return; + } + + printk(KERN_ERR "fd%d: sector %d not found (side %d, track %d)\n", + SelectedDrive, FDC_READ (FDCREG_SECTOR), ReqSide, ReqTrack ); + goto err_end; + } + if (status & FDCSTAT_CRC) { + printk(KERN_ERR "fd%d: CRC error (side %d, track %d, sector %d)\n", + SelectedDrive, ReqSide, ReqTrack, FDC_READ (FDCREG_SECTOR) ); + goto err_end; + } + if (status & FDCSTAT_LOST) { + printk(KERN_ERR "fd%d: lost data (side %d, track %d, sector %d)\n", + SelectedDrive, ReqSide, ReqTrack, FDC_READ (FDCREG_SECTOR) ); + goto err_end; + } + + Probing = 0; + + if (ReqCmd == READ) { + if (!read_track) { + void *addr; + addr = ATARIHW_PRESENT( EXTD_DMA ) ? ReqData : DMABuffer; + dma_cache_maintenance( virt_to_phys(addr), 512, 0 ); + if (!ATARIHW_PRESENT( EXTD_DMA )) + copy_buffer (addr, ReqData); + } else { + dma_cache_maintenance( PhysTrackBuffer, MaxSectors[DriveType] * 512, 0 ); + BufferDrive = SelectedDrive; + BufferSide = ReqSide; + BufferTrack = ReqTrack; + copy_buffer (SECTOR_BUFFER (ReqSector), ReqData); + } + } + + if (++ReqCnt < CURRENT->current_nr_sectors) { + /* read next sector */ + setup_req_params( SelectedDrive ); + do_fd_action( SelectedDrive ); + } + else { + /* all sectors finished */ + CURRENT->nr_sectors -= CURRENT->current_nr_sectors; + CURRENT->sector += CURRENT->current_nr_sectors; + end_request(CURRENT, 1); + redo_fd_request(); + } + return; + + err_end: + BufferDrive = -1; + fd_error(); +} + + +static void fd_writetrack( void ) +{ + unsigned long paddr, flags; + unsigned int track; + + DPRINT(("fd_writetrack() Tr=%d Si=%d\n", ReqTrack, ReqSide )); + + paddr = PhysTrackBuffer; + dma_cache_maintenance( paddr, BUFFER_SIZE, 1 ); + + fd_select_side( ReqSide ); + + /* Cheat for track if stretch != 0 */ + if (SUDT->stretch) { + track = FDC_READ( FDCREG_TRACK); + MFPDELAY(); + FDC_WRITE(FDCREG_TRACK,track >> SUDT->stretch); + } + udelay(40); + + /* Setup DMA */ + local_irq_save(flags); + dma_wd.dma_lo = (unsigned char)paddr; + MFPDELAY(); + paddr >>= 8; + dma_wd.dma_md = (unsigned char)paddr; + MFPDELAY(); + paddr >>= 8; + if (ATARIHW_PRESENT( EXTD_DMA )) + st_dma_ext_dmahi = (unsigned short)paddr; + else + dma_wd.dma_hi = (unsigned char)paddr; + MFPDELAY(); + local_irq_restore(flags); + + /* Clear FIFO and switch DMA to correct mode */ + dma_wd.dma_mode_status = 0x190; + MFPDELAY(); + dma_wd.dma_mode_status = 0x90; + MFPDELAY(); + dma_wd.dma_mode_status = 0x190; + MFPDELAY(); + + /* How many sectors for DMA */ + dma_wd.fdc_acces_seccount = BUFFER_SIZE/512; + udelay(40); + + /* Start operation */ + dma_wd.dma_mode_status = FDCSELREG_STP | 0x100; + udelay(40); + SET_IRQ_HANDLER( fd_writetrack_done ); + dma_wd.fdc_acces_seccount = FDCCMD_WRTRA | get_head_settle_flag(); + + MotorOn = 1; + start_timeout(); + /* wait for interrupt */ +} + + +static void fd_writetrack_done( int status ) +{ + DPRINT(("fd_writetrack_done()\n")); + + stop_timeout(); + + if (status & FDCSTAT_WPROT) { + printk(KERN_NOTICE "fd%d: is write protected\n", SelectedDrive ); + goto err_end; + } + if (status & FDCSTAT_LOST) { + printk(KERN_ERR "fd%d: lost data (side %d, track %d)\n", + SelectedDrive, ReqSide, ReqTrack ); + goto err_end; + } + + wake_up( &format_wait ); + return; + + err_end: + fd_error(); +} + +static void fd_times_out( unsigned long dummy ) +{ + atari_disable_irq( IRQ_MFP_FDC ); + if (!FloppyIRQHandler) goto end; /* int occurred after timer was fired, but + * before we came here... */ + + SET_IRQ_HANDLER( NULL ); + /* If the timeout occurred while the readtrack_check timer was + * active, we need to cancel it, else bad things will happen */ + if (UseTrackbuffer) + del_timer( &readtrack_timer ); + FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI ); + udelay( 25 ); + + printk(KERN_ERR "floppy timeout\n" ); + fd_error(); + end: + atari_enable_irq( IRQ_MFP_FDC ); +} + + +/* The (noop) seek operation here is needed to make the WP bit in the + * FDC status register accessible for check_change. If the last disk + * operation would have been a RDSEC, this bit would always read as 0 + * no matter what :-( To save time, the seek goes to the track we're + * already on. + */ + +static void finish_fdc( void ) +{ + if (!NeedSeek) { + finish_fdc_done( 0 ); + } + else { + DPRINT(("finish_fdc: dummy seek started\n")); + FDC_WRITE (FDCREG_DATA, SUD.track); + SET_IRQ_HANDLER( finish_fdc_done ); + FDC_WRITE (FDCREG_CMD, FDCCMD_SEEK); + MotorOn = 1; + start_timeout(); + /* we must wait for the IRQ here, because the ST-DMA + is released immediately afterwards and the interrupt + may be delivered to the wrong driver. */ + } +} + + +static void finish_fdc_done( int dummy ) +{ + unsigned long flags; + + DPRINT(("finish_fdc_done entered\n")); + stop_timeout(); + NeedSeek = 0; + + if (timer_pending(&fd_timer) && time_before(fd_timer.expires, jiffies + 5)) + /* If the check for a disk change is done too early after this + * last seek command, the WP bit still reads wrong :-(( + */ + mod_timer(&fd_timer, jiffies + 5); + else + start_check_change_timer(); + start_motor_off_timer(); + + local_irq_save(flags); + stdma_release(); + fdc_busy = 0; + wake_up( &fdc_wait ); + local_irq_restore(flags); + + DPRINT(("finish_fdc() finished\n")); +} + +/* The detection of disk changes is a dark chapter in Atari history :-( + * Because the "Drive ready" signal isn't present in the Atari + * hardware, one has to rely on the "Write Protect". This works fine, + * as long as no write protected disks are used. TOS solves this + * problem by introducing tri-state logic ("maybe changed") and + * looking at the serial number in block 0. This isn't possible for + * Linux, since the floppy driver can't make assumptions about the + * filesystem used on the disk and thus the contents of block 0. I've + * chosen the method to always say "The disk was changed" if it is + * unsure whether it was. This implies that every open or mount + * invalidates the disk buffers if you work with write protected + * disks. But at least this is better than working with incorrect data + * due to unrecognised disk changes. + */ + +static int check_floppy_change(struct gendisk *disk) +{ + struct atari_floppy_struct *p = disk->private_data; + unsigned int drive = p - unit; + if (test_bit (drive, &fake_change)) { + /* simulated change (e.g. after formatting) */ + return 1; + } + if (test_bit (drive, &changed_floppies)) { + /* surely changed (the WP signal changed at least once) */ + return 1; + } + if (UD.wpstat) { + /* WP is on -> could be changed: to be sure, buffers should be + * invalidated... + */ + return 1; + } + + return 0; +} + +static int floppy_revalidate(struct gendisk *disk) +{ + struct atari_floppy_struct *p = disk->private_data; + unsigned int drive = p - unit; + + if (test_bit(drive, &changed_floppies) || + test_bit(drive, &fake_change) || + p->disktype == 0) { + if (UD.flags & FTD_MSG) + printk(KERN_ERR "floppy: clear format %p!\n", UDT); + BufferDrive = -1; + clear_bit(drive, &fake_change); + clear_bit(drive, &changed_floppies); + /* MSch: clearing geometry makes sense only for autoprobe + formats, for 'permanent user-defined' parameter: + restore default_params[] here if flagged valid! */ + if (default_params[drive].blocks == 0) + UDT = NULL; + else + UDT = &default_params[drive]; + } + return 0; +} + + +/* This sets up the global variables describing the current request. */ + +static void setup_req_params( int drive ) +{ + int block = ReqBlock + ReqCnt; + + ReqTrack = block / UDT->spt; + ReqSector = block - ReqTrack * UDT->spt + 1; + ReqSide = ReqTrack & 1; + ReqTrack >>= 1; + ReqData = ReqBuffer + 512 * ReqCnt; + + if (UseTrackbuffer) + read_track = (ReqCmd == READ && CURRENT->errors == 0); + else + read_track = 0; + + DPRINT(("Request params: Si=%d Tr=%d Se=%d Data=%08lx\n",ReqSide, + ReqTrack, ReqSector, (unsigned long)ReqData )); +} + + +static void redo_fd_request(void) +{ + int drive, type; + struct atari_floppy_struct *floppy; + + DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n", + CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "", + CURRENT ? CURRENT->sector : 0 )); + + IsFormatting = 0; + +repeat: + + if (!CURRENT) + goto the_end; + + floppy = CURRENT->rq_disk->private_data; + drive = floppy - unit; + type = floppy->type; + + if (!UD.connected) { + /* drive not connected */ + printk(KERN_ERR "Unknown Device: fd%d\n", drive ); + end_request(CURRENT, 0); + goto repeat; + } + + if (type == 0) { + if (!UDT) { + Probing = 1; + UDT = atari_disk_type + StartDiskType[DriveType]; + set_capacity(floppy->disk, UDT->blocks); + UD.autoprobe = 1; + } + } + else { + /* user supplied disk type */ + if (--type >= NUM_DISK_MINORS) { + printk(KERN_WARNING "fd%d: invalid disk format", drive ); + end_request(CURRENT, 0); + goto repeat; + } + if (minor2disktype[type].drive_types > DriveType) { + printk(KERN_WARNING "fd%d: unsupported disk format", drive ); + end_request(CURRENT, 0); + goto repeat; + } + type = minor2disktype[type].index; + UDT = &atari_disk_type[type]; + set_capacity(floppy->disk, UDT->blocks); + UD.autoprobe = 0; + } + + if (CURRENT->sector + 1 > UDT->blocks) { + end_request(CURRENT, 0); + goto repeat; + } + + /* stop deselect timer */ + del_timer( &motor_off_timer ); + + ReqCnt = 0; + ReqCmd = rq_data_dir(CURRENT); + ReqBlock = CURRENT->sector; + ReqBuffer = CURRENT->buffer; + setup_req_params( drive ); + do_fd_action( drive ); + + return; + + the_end: + finish_fdc(); +} + + +void do_fd_request(struct request_queue * q) +{ + unsigned long flags; + + DPRINT(("do_fd_request for pid %d\n",current->pid)); + while( fdc_busy ) sleep_on( &fdc_wait ); + fdc_busy = 1; + stdma_lock(floppy_irq, NULL); + + atari_disable_irq( IRQ_MFP_FDC ); + local_save_flags(flags); /* The request function is called with ints + local_irq_disable(); * disabled... so must save the IPL for later */ + redo_fd_request(); + local_irq_restore(flags); + atari_enable_irq( IRQ_MFP_FDC ); +} + +static int fd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long param) +{ + struct gendisk *disk = bdev->bd_disk; + struct atari_floppy_struct *floppy = disk->private_data; + int drive = floppy - unit; + int type = floppy->type; + struct atari_format_descr fmt_desc; + struct atari_disk_type *dtp; + struct floppy_struct getprm; + int settype; + struct floppy_struct setprm; + void __user *argp = (void __user *)param; + + switch (cmd) { + case FDGETPRM: + if (type) { + if (--type >= NUM_DISK_MINORS) + return -ENODEV; + if (minor2disktype[type].drive_types > DriveType) + return -ENODEV; + type = minor2disktype[type].index; + dtp = &atari_disk_type[type]; + if (UD.flags & FTD_MSG) + printk (KERN_ERR "floppy%d: found dtp %p name %s!\n", + drive, dtp, dtp->name); + } + else { + if (!UDT) + return -ENXIO; + else + dtp = UDT; + } + memset((void *)&getprm, 0, sizeof(getprm)); + getprm.size = dtp->blocks; + getprm.sect = dtp->spt; + getprm.head = 2; + getprm.track = dtp->blocks/dtp->spt/2; + getprm.stretch = dtp->stretch; + if (copy_to_user(argp, &getprm, sizeof(getprm))) + return -EFAULT; + return 0; + } + switch (cmd) { + case FDSETPRM: + case FDDEFPRM: + /* + * MSch 7/96: simple 'set geometry' case: just set the + * 'default' device params (minor == 0). + * Currently, the drive geometry is cleared after each + * disk change and subsequent revalidate()! simple + * implementation of FDDEFPRM: save geometry from a + * FDDEFPRM call and restore it in floppy_revalidate() ! + */ + + /* get the parameters from user space */ + if (floppy->ref != 1 && floppy->ref != -1) + return -EBUSY; + if (copy_from_user(&setprm, argp, sizeof(setprm))) + return -EFAULT; + /* + * first of all: check for floppy change and revalidate, + * or the next access will revalidate - and clear UDT :-( + */ + + if (check_floppy_change(disk)) + floppy_revalidate(disk); + + if (UD.flags & FTD_MSG) + printk (KERN_INFO "floppy%d: setting size %d spt %d str %d!\n", + drive, setprm.size, setprm.sect, setprm.stretch); + + /* what if type > 0 here? Overwrite specified entry ? */ + if (type) { + /* refuse to re-set a predefined type for now */ + redo_fd_request(); + return -EINVAL; + } + + /* + * type == 0: first look for a matching entry in the type list, + * and set the UD.disktype field to use the perdefined entry. + * TODO: add user-defined format to head of autoprobe list ? + * Useful to include the user-type for future autodetection! + */ + + for (settype = 0; settype < NUM_DISK_MINORS; settype++) { + int setidx = 0; + if (minor2disktype[settype].drive_types > DriveType) { + /* skip this one, invalid for drive ... */ + continue; + } + setidx = minor2disktype[settype].index; + dtp = &atari_disk_type[setidx]; + + /* found matching entry ?? */ + if ( dtp->blocks == setprm.size + && dtp->spt == setprm.sect + && dtp->stretch == setprm.stretch ) { + if (UD.flags & FTD_MSG) + printk (KERN_INFO "floppy%d: setting %s %p!\n", + drive, dtp->name, dtp); + UDT = dtp; + set_capacity(floppy->disk, UDT->blocks); + + if (cmd == FDDEFPRM) { + /* save settings as permanent default type */ + default_params[drive].name = dtp->name; + default_params[drive].spt = dtp->spt; + default_params[drive].blocks = dtp->blocks; + default_params[drive].fdc_speed = dtp->fdc_speed; + default_params[drive].stretch = dtp->stretch; + } + + return 0; + } + + } + + /* no matching disk type found above - setting user_params */ + + if (cmd == FDDEFPRM) { + /* set permanent type */ + dtp = &default_params[drive]; + } else + /* set user type (reset by disk change!) */ + dtp = &user_params[drive]; + + dtp->name = "user format"; + dtp->blocks = setprm.size; + dtp->spt = setprm.sect; + if (setprm.sect > 14) + dtp->fdc_speed = 3; + else + dtp->fdc_speed = 0; + dtp->stretch = setprm.stretch; + + if (UD.flags & FTD_MSG) + printk (KERN_INFO "floppy%d: blk %d spt %d str %d!\n", + drive, dtp->blocks, dtp->spt, dtp->stretch); + + /* sanity check */ + if (!dtp || setprm.track != dtp->blocks/dtp->spt/2 || + setprm.head != 2) { + redo_fd_request(); + return -EINVAL; + } + + UDT = dtp; + set_capacity(floppy->disk, UDT->blocks); + + return 0; + case FDMSGON: + UD.flags |= FTD_MSG; + return 0; + case FDMSGOFF: + UD.flags &= ~FTD_MSG; + return 0; + case FDSETEMSGTRESH: + return -EINVAL; + case FDFMTBEG: + return 0; + case FDFMTTRK: + if (floppy->ref != 1 && floppy->ref != -1) + return -EBUSY; + if (copy_from_user(&fmt_desc, argp, sizeof(fmt_desc))) + return -EFAULT; + return do_format(drive, type, &fmt_desc); + case FDCLRPRM: + UDT = NULL; + /* MSch: invalidate default_params */ + default_params[drive].blocks = 0; + set_capacity(floppy->disk, MAX_DISK_SIZE * 2); + case FDFMTEND: + case FDFLUSH: + /* invalidate the buffer track to force a reread */ + BufferDrive = -1; + set_bit(drive, &fake_change); + check_disk_change(bdev); + return 0; + default: + return -EINVAL; + } +} + + +/* Initialize the 'unit' variable for drive 'drive' */ + +static void __init fd_probe( int drive ) +{ + UD.connected = 0; + UDT = NULL; + + if (!fd_test_drive_present( drive )) + return; + + UD.connected = 1; + UD.track = 0; + switch( UserSteprate[drive] ) { + case 2: + UD.steprate = FDCSTEP_2; + break; + case 3: + UD.steprate = FDCSTEP_3; + break; + case 6: + UD.steprate = FDCSTEP_6; + break; + case 12: + UD.steprate = FDCSTEP_12; + break; + default: /* should be -1 for "not set by user" */ + if (ATARIHW_PRESENT( FDCSPEED ) || MACH_IS_MEDUSA) + UD.steprate = FDCSTEP_3; + else + UD.steprate = FDCSTEP_6; + break; + } + MotorOn = 1; /* from probe restore operation! */ +} + + +/* This function tests the physical presence of a floppy drive (not + * whether a disk is inserted). This is done by issuing a restore + * command, waiting max. 2 seconds (that should be enough to move the + * head across the whole disk) and looking at the state of the "TR00" + * signal. This should now be raised if there is a drive connected + * (and there is no hardware failure :-) Otherwise, the drive is + * declared absent. + */ + +static int __init fd_test_drive_present( int drive ) +{ + unsigned long timeout; + unsigned char status; + int ok; + + if (drive >= (MACH_IS_FALCON ? 1 : 2)) return( 0 ); + fd_select_drive( drive ); + + /* disable interrupt temporarily */ + atari_turnoff_irq( IRQ_MFP_FDC ); + FDC_WRITE (FDCREG_TRACK, 0xff00); + FDC_WRITE( FDCREG_CMD, FDCCMD_RESTORE | FDCCMDADD_H | FDCSTEP_6 ); + + timeout = jiffies + 2*HZ+HZ/2; + while (time_before(jiffies, timeout)) + if (!(mfp.par_dt_reg & 0x20)) + break; + + status = FDC_READ( FDCREG_STATUS ); + ok = (status & FDCSTAT_TR00) != 0; + + /* force interrupt to abort restore operation (FDC would try + * about 50 seconds!) */ + FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI ); + udelay(500); + status = FDC_READ( FDCREG_STATUS ); + udelay(20); + + if (ok) { + /* dummy seek command to make WP bit accessible */ + FDC_WRITE( FDCREG_DATA, 0 ); + FDC_WRITE( FDCREG_CMD, FDCCMD_SEEK ); + while( mfp.par_dt_reg & 0x20 ) + ; + status = FDC_READ( FDCREG_STATUS ); + } + + atari_turnon_irq( IRQ_MFP_FDC ); + return( ok ); +} + + +/* Look how many and which kind of drives are connected. If there are + * floppies, additionally start the disk-change and motor-off timers. + */ + +static void __init config_types( void ) +{ + int drive, cnt = 0; + + /* for probing drives, set the FDC speed to 8 MHz */ + if (ATARIHW_PRESENT(FDCSPEED)) + dma_wd.fdc_speed = 0; + + printk(KERN_INFO "Probing floppy drive(s):\n"); + for( drive = 0; drive < FD_MAX_UNITS; drive++ ) { + fd_probe( drive ); + if (UD.connected) { + printk(KERN_INFO "fd%d\n", drive); + ++cnt; + } + } + + if (FDC_READ( FDCREG_STATUS ) & FDCSTAT_BUSY) { + /* If FDC is still busy from probing, give it another FORCI + * command to abort the operation. If this isn't done, the FDC + * will interrupt later and its IRQ line stays low, because + * the status register isn't read. And this will block any + * interrupts on this IRQ line :-( + */ + FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI ); + udelay(500); + FDC_READ( FDCREG_STATUS ); + udelay(20); + } + + if (cnt > 0) { + start_motor_off_timer(); + if (cnt == 1) fd_select_drive( 0 ); + start_check_change_timer(); + } +} + +/* + * floppy_open check for aliasing (/dev/fd0 can be the same as + * /dev/PS0 etc), and disallows simultaneous access to the same + * drive with different device numbers. + */ + +static int floppy_open(struct block_device *bdev, fmode_t mode) +{ + struct atari_floppy_struct *p = bdev->bd_disk->private_data; + int type = MINOR(bdev->bd_dev) >> 2; + + DPRINT(("fd_open: type=%d\n",type)); + if (p->ref && p->type != type) + return -EBUSY; + + if (p->ref == -1 || (p->ref && mode & FMODE_EXCL)) + return -EBUSY; + + if (mode & FMODE_EXCL) + p->ref = -1; + else + p->ref++; + + p->type = type; + + if (mode & FMODE_NDELAY) + return 0; + + if (mode & (FMODE_READ|FMODE_WRITE)) { + check_disk_change(bdev); + if (mode & FMODE_WRITE) { + if (p->wpstat) { + if (p->ref < 0) + p->ref = 0; + else + p->ref--; + return -EROFS; + } + } + } + return 0; +} + + +static int floppy_release(struct gendisk *disk, fmode_t mode) +{ + struct atari_floppy_struct *p = disk->private_data; + if (p->ref < 0) + p->ref = 0; + else if (!p->ref--) { + printk(KERN_ERR "floppy_release with fd_ref == 0"); + p->ref = 0; + } + return 0; +} + +static struct block_device_operations floppy_fops = { + .owner = THIS_MODULE, + .open = floppy_open, + .release = floppy_release, + .locked_ioctl = fd_ioctl, + .media_changed = check_floppy_change, + .revalidate_disk= floppy_revalidate, +}; + +static struct kobject *floppy_find(dev_t dev, int *part, void *data) +{ + int drive = *part & 3; + int type = *part >> 2; + if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS) + return NULL; + *part = 0; + return get_disk(unit[drive].disk); +} + +static int __init atari_floppy_init (void) +{ + int i; + + if (!MACH_IS_ATARI) + /* Amiga, Mac, ... don't have Atari-compatible floppy :-) */ + return -ENODEV; + + if (register_blkdev(FLOPPY_MAJOR,"fd")) + return -EBUSY; + + for (i = 0; i < FD_MAX_UNITS; i++) { + unit[i].disk = alloc_disk(1); + if (!unit[i].disk) + goto Enomem; + } + + if (UseTrackbuffer < 0) + /* not set by user -> use default: for now, we turn + track buffering off for all Medusas, though it + could be used with ones that have a counter + card. But the test is too hard :-( */ + UseTrackbuffer = !MACH_IS_MEDUSA; + + /* initialize variables */ + SelectedDrive = -1; + BufferDrive = -1; + + DMABuffer = atari_stram_alloc(BUFFER_SIZE+512, "ataflop"); + if (!DMABuffer) { + printk(KERN_ERR "atari_floppy_init: cannot get dma buffer\n"); + goto Enomem; + } + TrackBuffer = DMABuffer + 512; + PhysDMABuffer = virt_to_phys(DMABuffer); + PhysTrackBuffer = virt_to_phys(TrackBuffer); + BufferDrive = BufferSide = BufferTrack = -1; + + floppy_queue = blk_init_queue(do_fd_request, &ataflop_lock); + if (!floppy_queue) + goto Enomem; + + for (i = 0; i < FD_MAX_UNITS; i++) { + unit[i].track = -1; + unit[i].flags = 0; + unit[i].disk->major = FLOPPY_MAJOR; + unit[i].disk->first_minor = i; + sprintf(unit[i].disk->disk_name, "fd%d", i); + unit[i].disk->fops = &floppy_fops; + unit[i].disk->private_data = &unit[i]; + unit[i].disk->queue = floppy_queue; + set_capacity(unit[i].disk, MAX_DISK_SIZE * 2); + add_disk(unit[i].disk); + } + + blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, + floppy_find, NULL, NULL); + + printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n", + DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E', + UseTrackbuffer ? "" : "no "); + config_types(); + + return 0; +Enomem: + while (i--) + put_disk(unit[i].disk); + if (floppy_queue) + blk_cleanup_queue(floppy_queue); + unregister_blkdev(FLOPPY_MAJOR, "fd"); + return -ENOMEM; +} + +#ifndef MODULE +static int __init atari_floppy_setup(char *str) +{ + int ints[3 + FD_MAX_UNITS]; + int i; + + if (!MACH_IS_ATARI) + return 0; + + str = get_options(str, 3 + FD_MAX_UNITS, ints); + + if (ints[0] < 1) { + printk(KERN_ERR "ataflop_setup: no arguments!\n" ); + return 0; + } + else if (ints[0] > 2+FD_MAX_UNITS) { + printk(KERN_ERR "ataflop_setup: too many arguments\n" ); + } + + if (ints[1] < 0 || ints[1] > 2) + printk(KERN_ERR "ataflop_setup: bad drive type\n" ); + else + DriveType = ints[1]; + + if (ints[0] >= 2) + UseTrackbuffer = (ints[2] > 0); + + for( i = 3; i <= ints[0] && i-3 < FD_MAX_UNITS; ++i ) { + if (ints[i] != 2 && ints[i] != 3 && ints[i] != 6 && ints[i] != 12) + printk(KERN_ERR "ataflop_setup: bad steprate\n" ); + else + UserSteprate[i-3] = ints[i]; + } + return 1; +} + +__setup("floppy=", atari_floppy_setup); +#endif + +static void __exit atari_floppy_exit(void) +{ + int i; + blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); + for (i = 0; i < FD_MAX_UNITS; i++) { + del_gendisk(unit[i].disk); + put_disk(unit[i].disk); + } + unregister_blkdev(FLOPPY_MAJOR, "fd"); + + blk_cleanup_queue(floppy_queue); + del_timer_sync(&fd_timer); + atari_stram_free( DMABuffer ); +} + +module_init(atari_floppy_init) +module_exit(atari_floppy_exit) + +MODULE_LICENSE("GPL"); diff --git a/drivers/block/brd.c b/drivers/block/brd.c new file mode 100644 index 0000000..bdd4f5f --- /dev/null +++ b/drivers/block/brd.c @@ -0,0 +1,594 @@ +/* + * Ram backed block device driver. + * + * Copyright (C) 2007 Nick Piggin + * Copyright (C) 2007 Novell Inc. + * + * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright + * of their respective owners. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/major.h> +#include <linux/blkdev.h> +#include <linux/bio.h> +#include <linux/highmem.h> +#include <linux/gfp.h> +#include <linux/radix-tree.h> +#include <linux/buffer_head.h> /* invalidate_bh_lrus() */ + +#include <asm/uaccess.h> + +#define SECTOR_SHIFT 9 +#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) +#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) + +/* + * Each block ramdisk device has a radix_tree brd_pages of pages that stores + * the pages containing the block device's contents. A brd page's ->index is + * its offset in PAGE_SIZE units. This is similar to, but in no way connected + * with, the kernel's pagecache or buffer cache (which sit above our block + * device). + */ +struct brd_device { + int brd_number; + int brd_refcnt; + loff_t brd_offset; + loff_t brd_sizelimit; + unsigned brd_blocksize; + + struct request_queue *brd_queue; + struct gendisk *brd_disk; + struct list_head brd_list; + + /* + * Backing store of pages and lock to protect it. This is the contents + * of the block device. + */ + spinlock_t brd_lock; + struct radix_tree_root brd_pages; +}; + +/* + * Look up and return a brd's page for a given sector. + */ +static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) +{ + pgoff_t idx; + struct page *page; + + /* + * The page lifetime is protected by the fact that we have opened the + * device node -- brd pages will never be deleted under us, so we + * don't need any further locking or refcounting. + * + * This is strictly true for the radix-tree nodes as well (ie. we + * don't actually need the rcu_read_lock()), however that is not a + * documented feature of the radix-tree API so it is better to be + * safe here (we don't have total exclusion from radix tree updates + * here, only deletes). + */ + rcu_read_lock(); + idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ + page = radix_tree_lookup(&brd->brd_pages, idx); + rcu_read_unlock(); + + BUG_ON(page && page->index != idx); + + return page; +} + +/* + * Look up and return a brd's page for a given sector. + * If one does not exist, allocate an empty page, and insert that. Then + * return it. + */ +static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) +{ + pgoff_t idx; + struct page *page; + gfp_t gfp_flags; + + page = brd_lookup_page(brd, sector); + if (page) + return page; + + /* + * Must use NOIO because we don't want to recurse back into the + * block or filesystem layers from page reclaim. + * + * Cannot support XIP and highmem, because our ->direct_access + * routine for XIP must return memory that is always addressable. + * If XIP was reworked to use pfns and kmap throughout, this + * restriction might be able to be lifted. + */ + gfp_flags = GFP_NOIO | __GFP_ZERO; +#ifndef CONFIG_BLK_DEV_XIP + gfp_flags |= __GFP_HIGHMEM; +#endif + page = alloc_page(gfp_flags); + if (!page) + return NULL; + + if (radix_tree_preload(GFP_NOIO)) { + __free_page(page); + return NULL; + } + + spin_lock(&brd->brd_lock); + idx = sector >> PAGE_SECTORS_SHIFT; + if (radix_tree_insert(&brd->brd_pages, idx, page)) { + __free_page(page); + page = radix_tree_lookup(&brd->brd_pages, idx); + BUG_ON(!page); + BUG_ON(page->index != idx); + } else + page->index = idx; + spin_unlock(&brd->brd_lock); + + radix_tree_preload_end(); + + return page; +} + +/* + * Free all backing store pages and radix tree. This must only be called when + * there are no other users of the device. + */ +#define FREE_BATCH 16 +static void brd_free_pages(struct brd_device *brd) +{ + unsigned long pos = 0; + struct page *pages[FREE_BATCH]; + int nr_pages; + + do { + int i; + + nr_pages = radix_tree_gang_lookup(&brd->brd_pages, + (void **)pages, pos, FREE_BATCH); + + for (i = 0; i < nr_pages; i++) { + void *ret; + + BUG_ON(pages[i]->index < pos); + pos = pages[i]->index; + ret = radix_tree_delete(&brd->brd_pages, pos); + BUG_ON(!ret || ret != pages[i]); + __free_page(pages[i]); + } + + pos++; + + /* + * This assumes radix_tree_gang_lookup always returns as + * many pages as possible. If the radix-tree code changes, + * so will this have to. + */ + } while (nr_pages == FREE_BATCH); +} + +/* + * copy_to_brd_setup must be called before copy_to_brd. It may sleep. + */ +static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) +{ + unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; + size_t copy; + + copy = min_t(size_t, n, PAGE_SIZE - offset); + if (!brd_insert_page(brd, sector)) + return -ENOMEM; + if (copy < n) { + sector += copy >> SECTOR_SHIFT; + if (!brd_insert_page(brd, sector)) + return -ENOMEM; + } + return 0; +} + +/* + * Copy n bytes from src to the brd starting at sector. Does not sleep. + */ +static void copy_to_brd(struct brd_device *brd, const void *src, + sector_t sector, size_t n) +{ + struct page *page; + void *dst; + unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; + size_t copy; + + copy = min_t(size_t, n, PAGE_SIZE - offset); + page = brd_lookup_page(brd, sector); + BUG_ON(!page); + + dst = kmap_atomic(page, KM_USER1); + memcpy(dst + offset, src, copy); + kunmap_atomic(dst, KM_USER1); + + if (copy < n) { + src += copy; + sector += copy >> SECTOR_SHIFT; + copy = n - copy; + page = brd_lookup_page(brd, sector); + BUG_ON(!page); + + dst = kmap_atomic(page, KM_USER1); + memcpy(dst, src, copy); + kunmap_atomic(dst, KM_USER1); + } +} + +/* + * Copy n bytes to dst from the brd starting at sector. Does not sleep. + */ +static void copy_from_brd(void *dst, struct brd_device *brd, + sector_t sector, size_t n) +{ + struct page *page; + void *src; + unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; + size_t copy; + + copy = min_t(size_t, n, PAGE_SIZE - offset); + page = brd_lookup_page(brd, sector); + if (page) { + src = kmap_atomic(page, KM_USER1); + memcpy(dst, src + offset, copy); + kunmap_atomic(src, KM_USER1); + } else + memset(dst, 0, copy); + + if (copy < n) { + dst += copy; + sector += copy >> SECTOR_SHIFT; + copy = n - copy; + page = brd_lookup_page(brd, sector); + if (page) { + src = kmap_atomic(page, KM_USER1); + memcpy(dst, src, copy); + kunmap_atomic(src, KM_USER1); + } else + memset(dst, 0, copy); + } +} + +/* + * Process a single bvec of a bio. + */ +static int brd_do_bvec(struct brd_device *brd, struct page *page, + unsigned int len, unsigned int off, int rw, + sector_t sector) +{ + void *mem; + int err = 0; + + if (rw != READ) { + err = copy_to_brd_setup(brd, sector, len); + if (err) + goto out; + } + + mem = kmap_atomic(page, KM_USER0); + if (rw == READ) { + copy_from_brd(mem + off, brd, sector, len); + flush_dcache_page(page); + } else + copy_to_brd(brd, mem + off, sector, len); + kunmap_atomic(mem, KM_USER0); + +out: + return err; +} + +static int brd_make_request(struct request_queue *q, struct bio *bio) +{ + struct block_device *bdev = bio->bi_bdev; + struct brd_device *brd = bdev->bd_disk->private_data; + int rw; + struct bio_vec *bvec; + sector_t sector; + int i; + int err = -EIO; + + sector = bio->bi_sector; + if (sector + (bio->bi_size >> SECTOR_SHIFT) > + get_capacity(bdev->bd_disk)) + goto out; + + rw = bio_rw(bio); + if (rw == READA) + rw = READ; + + bio_for_each_segment(bvec, bio, i) { + unsigned int len = bvec->bv_len; + err = brd_do_bvec(brd, bvec->bv_page, len, + bvec->bv_offset, rw, sector); + if (err) + break; + sector += len >> SECTOR_SHIFT; + } + +out: + bio_endio(bio, err); + + return 0; +} + +#ifdef CONFIG_BLK_DEV_XIP +static int brd_direct_access (struct block_device *bdev, sector_t sector, + void **kaddr, unsigned long *pfn) +{ + struct brd_device *brd = bdev->bd_disk->private_data; + struct page *page; + + if (!brd) + return -ENODEV; + if (sector & (PAGE_SECTORS-1)) + return -EINVAL; + if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk)) + return -ERANGE; + page = brd_insert_page(brd, sector); + if (!page) + return -ENOMEM; + *kaddr = page_address(page); + *pfn = page_to_pfn(page); + + return 0; +} +#endif + +static int brd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + int error; + struct brd_device *brd = bdev->bd_disk->private_data; + + if (cmd != BLKFLSBUF) + return -ENOTTY; + + /* + * ram device BLKFLSBUF has special semantics, we want to actually + * release and destroy the ramdisk data. + */ + mutex_lock(&bdev->bd_mutex); + error = -EBUSY; + if (bdev->bd_openers <= 1) { + /* + * Invalidate the cache first, so it isn't written + * back to the device. + * + * Another thread might instantiate more buffercache here, + * but there is not much we can do to close that race. + */ + invalidate_bh_lrus(); + truncate_inode_pages(bdev->bd_inode->i_mapping, 0); + brd_free_pages(brd); + error = 0; + } + mutex_unlock(&bdev->bd_mutex); + + return error; +} + +static struct block_device_operations brd_fops = { + .owner = THIS_MODULE, + .locked_ioctl = brd_ioctl, +#ifdef CONFIG_BLK_DEV_XIP + .direct_access = brd_direct_access, +#endif +}; + +/* + * And now the modules code and kernel interface. + */ +static int rd_nr; +int rd_size = CONFIG_BLK_DEV_RAM_SIZE; +static int max_part; +static int part_shift; +module_param(rd_nr, int, 0); +MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); +module_param(rd_size, int, 0); +MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); +module_param(max_part, int, 0); +MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); +MODULE_ALIAS("rd"); + +#ifndef MODULE +/* Legacy boot options - nonmodular */ +static int __init ramdisk_size(char *str) +{ + rd_size = simple_strtol(str, NULL, 0); + return 1; +} +static int __init ramdisk_size2(char *str) +{ + return ramdisk_size(str); +} +__setup("ramdisk=", ramdisk_size); +__setup("ramdisk_size=", ramdisk_size2); +#endif + +/* + * The device scheme is derived from loop.c. Keep them in synch where possible + * (should share code eventually). + */ +static LIST_HEAD(brd_devices); +static DEFINE_MUTEX(brd_devices_mutex); + +static struct brd_device *brd_alloc(int i) +{ + struct brd_device *brd; + struct gendisk *disk; + + brd = kzalloc(sizeof(*brd), GFP_KERNEL); + if (!brd) + goto out; + brd->brd_number = i; + spin_lock_init(&brd->brd_lock); + INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); + + brd->brd_queue = blk_alloc_queue(GFP_KERNEL); + if (!brd->brd_queue) + goto out_free_dev; + blk_queue_make_request(brd->brd_queue, brd_make_request); + blk_queue_max_sectors(brd->brd_queue, 1024); + blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); + + disk = brd->brd_disk = alloc_disk(1 << part_shift); + if (!disk) + goto out_free_queue; + disk->major = RAMDISK_MAJOR; + disk->first_minor = i << part_shift; + disk->fops = &brd_fops; + disk->private_data = brd; + disk->queue = brd->brd_queue; + disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; + sprintf(disk->disk_name, "ram%d", i); + set_capacity(disk, rd_size * 2); + + return brd; + +out_free_queue: + blk_cleanup_queue(brd->brd_queue); +out_free_dev: + kfree(brd); +out: + return NULL; +} + +static void brd_free(struct brd_device *brd) +{ + put_disk(brd->brd_disk); + blk_cleanup_queue(brd->brd_queue); + brd_free_pages(brd); + kfree(brd); +} + +static struct brd_device *brd_init_one(int i) +{ + struct brd_device *brd; + + list_for_each_entry(brd, &brd_devices, brd_list) { + if (brd->brd_number == i) + goto out; + } + + brd = brd_alloc(i); + if (brd) { + add_disk(brd->brd_disk); + list_add_tail(&brd->brd_list, &brd_devices); + } +out: + return brd; +} + +static void brd_del_one(struct brd_device *brd) +{ + list_del(&brd->brd_list); + del_gendisk(brd->brd_disk); + brd_free(brd); +} + +static struct kobject *brd_probe(dev_t dev, int *part, void *data) +{ + struct brd_device *brd; + struct kobject *kobj; + + mutex_lock(&brd_devices_mutex); + brd = brd_init_one(dev & MINORMASK); + kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM); + mutex_unlock(&brd_devices_mutex); + + *part = 0; + return kobj; +} + +static int __init brd_init(void) +{ + int i, nr; + unsigned long range; + struct brd_device *brd, *next; + + /* + * brd module now has a feature to instantiate underlying device + * structure on-demand, provided that there is an access dev node. + * However, this will not work well with user space tool that doesn't + * know about such "feature". In order to not break any existing + * tool, we do the following: + * + * (1) if rd_nr is specified, create that many upfront, and this + * also becomes a hard limit. + * (2) if rd_nr is not specified, create 1 rd device on module + * load, user can further extend brd device by create dev node + * themselves and have kernel automatically instantiate actual + * device on-demand. + */ + + part_shift = 0; + if (max_part > 0) + part_shift = fls(max_part); + + if (rd_nr > 1UL << (MINORBITS - part_shift)) + return -EINVAL; + + if (rd_nr) { + nr = rd_nr; + range = rd_nr; + } else { + nr = CONFIG_BLK_DEV_RAM_COUNT; + range = 1UL << (MINORBITS - part_shift); + } + + if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) + return -EIO; + + for (i = 0; i < nr; i++) { + brd = brd_alloc(i); + if (!brd) + goto out_free; + list_add_tail(&brd->brd_list, &brd_devices); + } + + /* point of no return */ + + list_for_each_entry(brd, &brd_devices, brd_list) + add_disk(brd->brd_disk); + + blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range, + THIS_MODULE, brd_probe, NULL, NULL); + + printk(KERN_INFO "brd: module loaded\n"); + return 0; + +out_free: + list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { + list_del(&brd->brd_list); + brd_free(brd); + } + unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); + + return -ENOMEM; +} + +static void __exit brd_exit(void) +{ + unsigned long range; + struct brd_device *brd, *next; + + range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift); + + list_for_each_entry_safe(brd, next, &brd_devices, brd_list) + brd_del_one(brd); + + blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range); + unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); +} + +module_init(brd_init); +module_exit(brd_exit); + diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c new file mode 100644 index 0000000..9f7c543 --- /dev/null +++ b/drivers/block/cciss.c @@ -0,0 +1,3757 @@ +/* + * Disk Array driver for HP Smart Array controllers. + * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + * 02111-1307, USA. + * + * Questions/Comments/Bugfixes to iss_storagedev@hp.com + * + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/major.h> +#include <linux/fs.h> +#include <linux/bio.h> +#include <linux/blkpg.h> +#include <linux/timer.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/init.h> +#include <linux/hdreg.h> +#include <linux/spinlock.h> +#include <linux/compat.h> +#include <linux/blktrace_api.h> +#include <asm/uaccess.h> +#include <asm/io.h> + +#include <linux/dma-mapping.h> +#include <linux/blkdev.h> +#include <linux/genhd.h> +#include <linux/completion.h> +#include <scsi/scsi.h> +#include <scsi/sg.h> +#include <scsi/scsi_ioctl.h> +#include <linux/cdrom.h> +#include <linux/scatterlist.h> + +#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) +#define DRIVER_NAME "HP CISS Driver (v 3.6.20)" +#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20) + +/* Embedded module documentation macros - see modules.h */ +MODULE_AUTHOR("Hewlett-Packard Company"); +MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); +MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" + " SA6i P600 P800 P400 P400i E200 E200i E500 P700m" + " Smart Array G2 Series SAS/SATA Controllers"); +MODULE_VERSION("3.6.20"); +MODULE_LICENSE("GPL"); + +#include "cciss_cmd.h" +#include "cciss.h" +#include <linux/cciss_ioctl.h> + +/* define the PCI info for the cards we can control */ +static const struct pci_device_id cciss_pci_device_id[] = { + {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070}, + {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080}, + {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082}, + {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083}, + {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091}, + {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A}, + {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B}, + {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C}, + {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, + {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); + +/* board_id = Subsystem Device ID & Vendor ID + * product = Marketing Name for the board + * access = Address of the struct of function pointers + */ +static struct board_type products[] = { + {0x40700E11, "Smart Array 5300", &SA5_access}, + {0x40800E11, "Smart Array 5i", &SA5B_access}, + {0x40820E11, "Smart Array 532", &SA5B_access}, + {0x40830E11, "Smart Array 5312", &SA5B_access}, + {0x409A0E11, "Smart Array 641", &SA5_access}, + {0x409B0E11, "Smart Array 642", &SA5_access}, + {0x409C0E11, "Smart Array 6400", &SA5_access}, + {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, + {0x40910E11, "Smart Array 6i", &SA5_access}, + {0x3225103C, "Smart Array P600", &SA5_access}, + {0x3223103C, "Smart Array P800", &SA5_access}, + {0x3234103C, "Smart Array P400", &SA5_access}, + {0x3235103C, "Smart Array P400i", &SA5_access}, + {0x3211103C, "Smart Array E200i", &SA5_access}, + {0x3212103C, "Smart Array E200", &SA5_access}, + {0x3213103C, "Smart Array E200i", &SA5_access}, + {0x3214103C, "Smart Array E200i", &SA5_access}, + {0x3215103C, "Smart Array E200i", &SA5_access}, + {0x3237103C, "Smart Array E500", &SA5_access}, + {0x323D103C, "Smart Array P700m", &SA5_access}, + {0x3241103C, "Smart Array P212", &SA5_access}, + {0x3243103C, "Smart Array P410", &SA5_access}, + {0x3245103C, "Smart Array P410i", &SA5_access}, + {0x3247103C, "Smart Array P411", &SA5_access}, + {0x3249103C, "Smart Array P812", &SA5_access}, + {0x324A103C, "Smart Array P712m", &SA5_access}, + {0x324B103C, "Smart Array P711m", &SA5_access}, + {0xFFFF103C, "Unknown Smart Array", &SA5_access}, +}; + +/* How long to wait (in milliseconds) for board to go into simple mode */ +#define MAX_CONFIG_WAIT 30000 +#define MAX_IOCTL_CONFIG_WAIT 1000 + +/*define how many times we will try a command because of bus resets */ +#define MAX_CMD_RETRIES 3 + +#define MAX_CTLR 32 + +/* Originally cciss driver only supports 8 major numbers */ +#define MAX_CTLR_ORIG 8 + +static ctlr_info_t *hba[MAX_CTLR]; + +static void do_cciss_request(struct request_queue *q); +static irqreturn_t do_cciss_intr(int irq, void *dev_id); +static int cciss_open(struct block_device *bdev, fmode_t mode); +static int cciss_release(struct gendisk *disk, fmode_t mode); +static int cciss_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); +static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); + +static int cciss_revalidate(struct gendisk *disk); +static int rebuild_lun_table(ctlr_info_t *h, int first_time); +static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, + int clear_all); + +static void cciss_read_capacity(int ctlr, int logvol, int withirq, + sector_t *total_size, unsigned int *block_size); +static void cciss_read_capacity_16(int ctlr, int logvol, int withirq, + sector_t *total_size, unsigned int *block_size); +static void cciss_geometry_inquiry(int ctlr, int logvol, + int withirq, sector_t total_size, + unsigned int block_size, InquiryData_struct *inq_buff, + drive_info_struct *drv); +static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, + __u32); +static void start_io(ctlr_info_t *h); +static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, + unsigned int use_unit_num, unsigned int log_unit, + __u8 page_code, unsigned char *scsi3addr, int cmd_type); +static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, + unsigned int use_unit_num, unsigned int log_unit, + __u8 page_code, int cmd_type); + +static void fail_all_cmds(unsigned long ctlr); + +#ifdef CONFIG_PROC_FS +static void cciss_procinit(int i); +#else +static void cciss_procinit(int i) +{ +} +#endif /* CONFIG_PROC_FS */ + +#ifdef CONFIG_COMPAT +static int cciss_compat_ioctl(struct block_device *, fmode_t, + unsigned, unsigned long); +#endif + +static struct block_device_operations cciss_fops = { + .owner = THIS_MODULE, + .open = cciss_open, + .release = cciss_release, + .locked_ioctl = cciss_ioctl, + .getgeo = cciss_getgeo, +#ifdef CONFIG_COMPAT + .compat_ioctl = cciss_compat_ioctl, +#endif + .revalidate_disk = cciss_revalidate, +}; + +/* + * Enqueuing and dequeuing functions for cmdlists. + */ +static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c) +{ + if (*Qptr == NULL) { + *Qptr = c; + c->next = c->prev = c; + } else { + c->prev = (*Qptr)->prev; + c->next = (*Qptr); + (*Qptr)->prev->next = c; + (*Qptr)->prev = c; + } +} + +static inline CommandList_struct *removeQ(CommandList_struct **Qptr, + CommandList_struct *c) +{ + if (c && c->next != c) { + if (*Qptr == c) + *Qptr = c->next; + c->prev->next = c->next; + c->next->prev = c->prev; + } else { + *Qptr = NULL; + } + return c; +} + +#include "cciss_scsi.c" /* For SCSI tape support */ + +#define RAID_UNKNOWN 6 + +#ifdef CONFIG_PROC_FS + +/* + * Report information about this controller. + */ +#define ENG_GIG 1000000000 +#define ENG_GIG_FACTOR (ENG_GIG/512) +#define ENGAGE_SCSI "engage scsi" +static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", + "UNKNOWN" +}; + +static struct proc_dir_entry *proc_cciss; + +static void cciss_seq_show_header(struct seq_file *seq) +{ + ctlr_info_t *h = seq->private; + + seq_printf(seq, "%s: HP %s Controller\n" + "Board ID: 0x%08lx\n" + "Firmware Version: %c%c%c%c\n" + "IRQ: %d\n" + "Logical drives: %d\n" + "Current Q depth: %d\n" + "Current # commands on controller: %d\n" + "Max Q depth since init: %d\n" + "Max # commands on controller since init: %d\n" + "Max SG entries since init: %d\n", + h->devname, + h->product_name, + (unsigned long)h->board_id, + h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], + h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], + h->num_luns, + h->Qdepth, h->commands_outstanding, + h->maxQsinceinit, h->max_outstanding, h->maxSG); + +#ifdef CONFIG_CISS_SCSI_TAPE + cciss_seq_tape_report(seq, h->ctlr); +#endif /* CONFIG_CISS_SCSI_TAPE */ +} + +static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) +{ + ctlr_info_t *h = seq->private; + unsigned ctlr = h->ctlr; + unsigned long flags; + + /* prevent displaying bogus info during configuration + * or deconfiguration of a logical volume + */ + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + if (h->busy_configuring) { + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + return ERR_PTR(-EBUSY); + } + h->busy_configuring = 1; + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + + if (*pos == 0) + cciss_seq_show_header(seq); + + return pos; +} + +static int cciss_seq_show(struct seq_file *seq, void *v) +{ + sector_t vol_sz, vol_sz_frac; + ctlr_info_t *h = seq->private; + unsigned ctlr = h->ctlr; + loff_t *pos = v; + drive_info_struct *drv = &h->drv[*pos]; + + if (*pos > h->highest_lun) + return 0; + + if (drv->heads == 0) + return 0; + + vol_sz = drv->nr_blocks; + vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); + vol_sz_frac *= 100; + sector_div(vol_sz_frac, ENG_GIG_FACTOR); + + if (drv->raid_level > 5) + drv->raid_level = RAID_UNKNOWN; + seq_printf(seq, "cciss/c%dd%d:" + "\t%4u.%02uGB\tRAID %s\n", + ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, + raid_label[drv->raid_level]); + return 0; +} + +static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ctlr_info_t *h = seq->private; + + if (*pos > h->highest_lun) + return NULL; + *pos += 1; + + return pos; +} + +static void cciss_seq_stop(struct seq_file *seq, void *v) +{ + ctlr_info_t *h = seq->private; + + /* Only reset h->busy_configuring if we succeeded in setting + * it during cciss_seq_start. */ + if (v == ERR_PTR(-EBUSY)) + return; + + h->busy_configuring = 0; +} + +static struct seq_operations cciss_seq_ops = { + .start = cciss_seq_start, + .show = cciss_seq_show, + .next = cciss_seq_next, + .stop = cciss_seq_stop, +}; + +static int cciss_seq_open(struct inode *inode, struct file *file) +{ + int ret = seq_open(file, &cciss_seq_ops); + struct seq_file *seq = file->private_data; + + if (!ret) + seq->private = PDE(inode)->data; + + return ret; +} + +static ssize_t +cciss_proc_write(struct file *file, const char __user *buf, + size_t length, loff_t *ppos) +{ + int err; + char *buffer; + +#ifndef CONFIG_CISS_SCSI_TAPE + return -EINVAL; +#endif + + if (!buf || length > PAGE_SIZE - 1) + return -EINVAL; + + buffer = (char *)__get_free_page(GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + err = -EFAULT; + if (copy_from_user(buffer, buf, length)) + goto out; + buffer[length] = '\0'; + +#ifdef CONFIG_CISS_SCSI_TAPE + if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { + struct seq_file *seq = file->private_data; + ctlr_info_t *h = seq->private; + int rc; + + rc = cciss_engage_scsi(h->ctlr); + if (rc != 0) + err = -rc; + else + err = length; + } else +#endif /* CONFIG_CISS_SCSI_TAPE */ + err = -EINVAL; + /* might be nice to have "disengage" too, but it's not + safely possible. (only 1 module use count, lock issues.) */ + +out: + free_page((unsigned long)buffer); + return err; +} + +static struct file_operations cciss_proc_fops = { + .owner = THIS_MODULE, + .open = cciss_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, + .write = cciss_proc_write, +}; + +static void __devinit cciss_procinit(int i) +{ + struct proc_dir_entry *pde; + + if (proc_cciss == NULL) + proc_cciss = proc_mkdir("driver/cciss", NULL); + if (!proc_cciss) + return; + pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | + S_IROTH, proc_cciss, + &cciss_proc_fops, hba[i]); +} +#endif /* CONFIG_PROC_FS */ + +/* + * For operations that cannot sleep, a command block is allocated at init, + * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track + * which ones are free or in use. For operations that can wait for kmalloc + * to possible sleep, this routine can be called with get_from_pool set to 0. + * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was. + */ +static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool) +{ + CommandList_struct *c; + int i; + u64bit temp64; + dma_addr_t cmd_dma_handle, err_dma_handle; + + if (!get_from_pool) { + c = (CommandList_struct *) pci_alloc_consistent(h->pdev, + sizeof(CommandList_struct), &cmd_dma_handle); + if (c == NULL) + return NULL; + memset(c, 0, sizeof(CommandList_struct)); + + c->cmdindex = -1; + + c->err_info = (ErrorInfo_struct *) + pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct), + &err_dma_handle); + + if (c->err_info == NULL) { + pci_free_consistent(h->pdev, + sizeof(CommandList_struct), c, cmd_dma_handle); + return NULL; + } + memset(c->err_info, 0, sizeof(ErrorInfo_struct)); + } else { /* get it out of the controllers pool */ + + do { + i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); + if (i == h->nr_cmds) + return NULL; + } while (test_and_set_bit + (i & (BITS_PER_LONG - 1), + h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); +#ifdef CCISS_DEBUG + printk(KERN_DEBUG "cciss: using command buffer %d\n", i); +#endif + c = h->cmd_pool + i; + memset(c, 0, sizeof(CommandList_struct)); + cmd_dma_handle = h->cmd_pool_dhandle + + i * sizeof(CommandList_struct); + c->err_info = h->errinfo_pool + i; + memset(c->err_info, 0, sizeof(ErrorInfo_struct)); + err_dma_handle = h->errinfo_pool_dhandle + + i * sizeof(ErrorInfo_struct); + h->nr_allocs++; + + c->cmdindex = i; + } + + c->busaddr = (__u32) cmd_dma_handle; + temp64.val = (__u64) err_dma_handle; + c->ErrDesc.Addr.lower = temp64.val32.lower; + c->ErrDesc.Addr.upper = temp64.val32.upper; + c->ErrDesc.Len = sizeof(ErrorInfo_struct); + + c->ctlr = h->ctlr; + return c; +} + +/* + * Frees a command block that was previously allocated with cmd_alloc(). + */ +static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool) +{ + int i; + u64bit temp64; + + if (!got_from_pool) { + temp64.val32.lower = c->ErrDesc.Addr.lower; + temp64.val32.upper = c->ErrDesc.Addr.upper; + pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), + c->err_info, (dma_addr_t) temp64.val); + pci_free_consistent(h->pdev, sizeof(CommandList_struct), + c, (dma_addr_t) c->busaddr); + } else { + i = c - h->cmd_pool; + clear_bit(i & (BITS_PER_LONG - 1), + h->cmd_pool_bits + (i / BITS_PER_LONG)); + h->nr_frees++; + } +} + +static inline ctlr_info_t *get_host(struct gendisk *disk) +{ + return disk->queue->queuedata; +} + +static inline drive_info_struct *get_drv(struct gendisk *disk) +{ + return disk->private_data; +} + +/* + * Open. Make sure the device is really there. + */ +static int cciss_open(struct block_device *bdev, fmode_t mode) +{ + ctlr_info_t *host = get_host(bdev->bd_disk); + drive_info_struct *drv = get_drv(bdev->bd_disk); + +#ifdef CCISS_DEBUG + printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); +#endif /* CCISS_DEBUG */ + + if (host->busy_initializing || drv->busy_configuring) + return -EBUSY; + /* + * Root is allowed to open raw volume zero even if it's not configured + * so array config can still work. Root is also allowed to open any + * volume that has a LUN ID, so it can issue IOCTL to reread the + * disk information. I don't think I really like this + * but I'm already using way to many device nodes to claim another one + * for "raw controller". + */ + if (drv->heads == 0) { + if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */ + /* if not node 0 make sure it is a partition = 0 */ + if (MINOR(bdev->bd_dev) & 0x0f) { + return -ENXIO; + /* if it is, make sure we have a LUN ID */ + } else if (drv->LunID == 0) { + return -ENXIO; + } + } + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + } + drv->usage_count++; + host->usage_count++; + return 0; +} + +/* + * Close. Sync first. + */ +static int cciss_release(struct gendisk *disk, fmode_t mode) +{ + ctlr_info_t *host = get_host(disk); + drive_info_struct *drv = get_drv(disk); + +#ifdef CCISS_DEBUG + printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name); +#endif /* CCISS_DEBUG */ + + drv->usage_count--; + host->usage_count--; + return 0; +} + +#ifdef CONFIG_COMPAT + +static int do_ioctl(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg) +{ + int ret; + lock_kernel(); + ret = cciss_ioctl(bdev, mode, cmd, arg); + unlock_kernel(); + return ret; +} + +static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg); +static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg); + +static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg) +{ + switch (cmd) { + case CCISS_GETPCIINFO: + case CCISS_GETINTINFO: + case CCISS_SETINTINFO: + case CCISS_GETNODENAME: + case CCISS_SETNODENAME: + case CCISS_GETHEARTBEAT: + case CCISS_GETBUSTYPES: + case CCISS_GETFIRMVER: + case CCISS_GETDRIVVER: + case CCISS_REVALIDVOLS: + case CCISS_DEREGDISK: + case CCISS_REGNEWDISK: + case CCISS_REGNEWD: + case CCISS_RESCANDISK: + case CCISS_GETLUNINFO: + return do_ioctl(bdev, mode, cmd, arg); + + case CCISS_PASSTHRU32: + return cciss_ioctl32_passthru(bdev, mode, cmd, arg); + case CCISS_BIG_PASSTHRU32: + return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg); + + default: + return -ENOIOCTLCMD; + } +} + +static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg) +{ + IOCTL32_Command_struct __user *arg32 = + (IOCTL32_Command_struct __user *) arg; + IOCTL_Command_struct arg64; + IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); + int err; + u32 cp; + + err = 0; + err |= + copy_from_user(&arg64.LUN_info, &arg32->LUN_info, + sizeof(arg64.LUN_info)); + err |= + copy_from_user(&arg64.Request, &arg32->Request, + sizeof(arg64.Request)); + err |= + copy_from_user(&arg64.error_info, &arg32->error_info, + sizeof(arg64.error_info)); + err |= get_user(arg64.buf_size, &arg32->buf_size); + err |= get_user(cp, &arg32->buf); + arg64.buf = compat_ptr(cp); + err |= copy_to_user(p, &arg64, sizeof(arg64)); + + if (err) + return -EFAULT; + + err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); + if (err) + return err; + err |= + copy_in_user(&arg32->error_info, &p->error_info, + sizeof(arg32->error_info)); + if (err) + return -EFAULT; + return err; +} + +static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg) +{ + BIG_IOCTL32_Command_struct __user *arg32 = + (BIG_IOCTL32_Command_struct __user *) arg; + BIG_IOCTL_Command_struct arg64; + BIG_IOCTL_Command_struct __user *p = + compat_alloc_user_space(sizeof(arg64)); + int err; + u32 cp; + + err = 0; + err |= + copy_from_user(&arg64.LUN_info, &arg32->LUN_info, + sizeof(arg64.LUN_info)); + err |= + copy_from_user(&arg64.Request, &arg32->Request, + sizeof(arg64.Request)); + err |= + copy_from_user(&arg64.error_info, &arg32->error_info, + sizeof(arg64.error_info)); + err |= get_user(arg64.buf_size, &arg32->buf_size); + err |= get_user(arg64.malloc_size, &arg32->malloc_size); + err |= get_user(cp, &arg32->buf); + arg64.buf = compat_ptr(cp); + err |= copy_to_user(p, &arg64, sizeof(arg64)); + + if (err) + return -EFAULT; + + err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); + if (err) + return err; + err |= + copy_in_user(&arg32->error_info, &p->error_info, + sizeof(arg32->error_info)); + if (err) + return -EFAULT; + return err; +} +#endif + +static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + drive_info_struct *drv = get_drv(bdev->bd_disk); + + if (!drv->cylinders) + return -ENXIO; + + geo->heads = drv->heads; + geo->sectors = drv->sectors; + geo->cylinders = drv->cylinders; + return 0; +} + +/* + * ioctl + */ +static int cciss_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct gendisk *disk = bdev->bd_disk; + ctlr_info_t *host = get_host(disk); + drive_info_struct *drv = get_drv(disk); + int ctlr = host->ctlr; + void __user *argp = (void __user *)arg; + +#ifdef CCISS_DEBUG + printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg); +#endif /* CCISS_DEBUG */ + + switch (cmd) { + case CCISS_GETPCIINFO: + { + cciss_pci_info_struct pciinfo; + + if (!arg) + return -EINVAL; + pciinfo.domain = pci_domain_nr(host->pdev->bus); + pciinfo.bus = host->pdev->bus->number; + pciinfo.dev_fn = host->pdev->devfn; + pciinfo.board_id = host->board_id; + if (copy_to_user + (argp, &pciinfo, sizeof(cciss_pci_info_struct))) + return -EFAULT; + return 0; + } + case CCISS_GETINTINFO: + { + cciss_coalint_struct intinfo; + if (!arg) + return -EINVAL; + intinfo.delay = + readl(&host->cfgtable->HostWrite.CoalIntDelay); + intinfo.count = + readl(&host->cfgtable->HostWrite.CoalIntCount); + if (copy_to_user + (argp, &intinfo, sizeof(cciss_coalint_struct))) + return -EFAULT; + return 0; + } + case CCISS_SETINTINFO: + { + cciss_coalint_struct intinfo; + unsigned long flags; + int i; + + if (!arg) + return -EINVAL; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user + (&intinfo, argp, sizeof(cciss_coalint_struct))) + return -EFAULT; + if ((intinfo.delay == 0) && (intinfo.count == 0)) + { +// printk("cciss_ioctl: delay and count cannot be 0\n"); + return -EINVAL; + } + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + /* Update the field, and then ring the doorbell */ + writel(intinfo.delay, + &(host->cfgtable->HostWrite.CoalIntDelay)); + writel(intinfo.count, + &(host->cfgtable->HostWrite.CoalIntCount)); + writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL); + + for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { + if (!(readl(host->vaddr + SA5_DOORBELL) + & CFGTBL_ChangeReq)) + break; + /* delay and try again */ + udelay(1000); + } + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + if (i >= MAX_IOCTL_CONFIG_WAIT) + return -EAGAIN; + return 0; + } + case CCISS_GETNODENAME: + { + NodeName_type NodeName; + int i; + + if (!arg) + return -EINVAL; + for (i = 0; i < 16; i++) + NodeName[i] = + readb(&host->cfgtable->ServerName[i]); + if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) + return -EFAULT; + return 0; + } + case CCISS_SETNODENAME: + { + NodeName_type NodeName; + unsigned long flags; + int i; + + if (!arg) + return -EINVAL; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user + (NodeName, argp, sizeof(NodeName_type))) + return -EFAULT; + + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + + /* Update the field, and then ring the doorbell */ + for (i = 0; i < 16; i++) + writeb(NodeName[i], + &host->cfgtable->ServerName[i]); + + writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL); + + for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { + if (!(readl(host->vaddr + SA5_DOORBELL) + & CFGTBL_ChangeReq)) + break; + /* delay and try again */ + udelay(1000); + } + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + if (i >= MAX_IOCTL_CONFIG_WAIT) + return -EAGAIN; + return 0; + } + + case CCISS_GETHEARTBEAT: + { + Heartbeat_type heartbeat; + + if (!arg) + return -EINVAL; + heartbeat = readl(&host->cfgtable->HeartBeat); + if (copy_to_user + (argp, &heartbeat, sizeof(Heartbeat_type))) + return -EFAULT; + return 0; + } + case CCISS_GETBUSTYPES: + { + BusTypes_type BusTypes; + + if (!arg) + return -EINVAL; + BusTypes = readl(&host->cfgtable->BusTypes); + if (copy_to_user + (argp, &BusTypes, sizeof(BusTypes_type))) + return -EFAULT; + return 0; + } + case CCISS_GETFIRMVER: + { + FirmwareVer_type firmware; + + if (!arg) + return -EINVAL; + memcpy(firmware, host->firm_ver, 4); + + if (copy_to_user + (argp, firmware, sizeof(FirmwareVer_type))) + return -EFAULT; + return 0; + } + case CCISS_GETDRIVVER: + { + DriverVer_type DriverVer = DRIVER_VERSION; + + if (!arg) + return -EINVAL; + + if (copy_to_user + (argp, &DriverVer, sizeof(DriverVer_type))) + return -EFAULT; + return 0; + } + + case CCISS_DEREGDISK: + case CCISS_REGNEWD: + case CCISS_REVALIDVOLS: + return rebuild_lun_table(host, 0); + + case CCISS_GETLUNINFO:{ + LogvolInfo_struct luninfo; + + luninfo.LunID = drv->LunID; + luninfo.num_opens = drv->usage_count; + luninfo.num_parts = 0; + if (copy_to_user(argp, &luninfo, + sizeof(LogvolInfo_struct))) + return -EFAULT; + return 0; + } + case CCISS_PASSTHRU: + { + IOCTL_Command_struct iocommand; + CommandList_struct *c; + char *buff = NULL; + u64bit temp64; + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(wait); + + if (!arg) + return -EINVAL; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + if (copy_from_user + (&iocommand, argp, sizeof(IOCTL_Command_struct))) + return -EFAULT; + if ((iocommand.buf_size < 1) && + (iocommand.Request.Type.Direction != XFER_NONE)) { + return -EINVAL; + } +#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */ + /* Check kmalloc limits */ + if (iocommand.buf_size > 128000) + return -EINVAL; +#endif + if (iocommand.buf_size > 0) { + buff = kmalloc(iocommand.buf_size, GFP_KERNEL); + if (buff == NULL) + return -EFAULT; + } + if (iocommand.Request.Type.Direction == XFER_WRITE) { + /* Copy the data into the buffer we created */ + if (copy_from_user + (buff, iocommand.buf, iocommand.buf_size)) { + kfree(buff); + return -EFAULT; + } + } else { + memset(buff, 0, iocommand.buf_size); + } + if ((c = cmd_alloc(host, 0)) == NULL) { + kfree(buff); + return -ENOMEM; + } + // Fill in the command type + c->cmd_type = CMD_IOCTL_PEND; + // Fill in Command Header + c->Header.ReplyQueue = 0; // unused in simple mode + if (iocommand.buf_size > 0) // buffer to fill + { + c->Header.SGList = 1; + c->Header.SGTotal = 1; + } else // no buffers to fill + { + c->Header.SGList = 0; + c->Header.SGTotal = 0; + } + c->Header.LUN = iocommand.LUN_info; + c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag + + // Fill in Request block + c->Request = iocommand.Request; + + // Fill in the scatter gather information + if (iocommand.buf_size > 0) { + temp64.val = pci_map_single(host->pdev, buff, + iocommand.buf_size, + PCI_DMA_BIDIRECTIONAL); + c->SG[0].Addr.lower = temp64.val32.lower; + c->SG[0].Addr.upper = temp64.val32.upper; + c->SG[0].Len = iocommand.buf_size; + c->SG[0].Ext = 0; // we are not chaining + } + c->waiting = &wait; + + /* Put the request on the tail of the request queue */ + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + addQ(&host->reqQ, c); + host->Qdepth++; + start_io(host); + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + + wait_for_completion(&wait); + + /* unlock the buffers from DMA */ + temp64.val32.lower = c->SG[0].Addr.lower; + temp64.val32.upper = c->SG[0].Addr.upper; + pci_unmap_single(host->pdev, (dma_addr_t) temp64.val, + iocommand.buf_size, + PCI_DMA_BIDIRECTIONAL); + + /* Copy the error information out */ + iocommand.error_info = *(c->err_info); + if (copy_to_user + (argp, &iocommand, sizeof(IOCTL_Command_struct))) { + kfree(buff); + cmd_free(host, c, 0); + return -EFAULT; + } + + if (iocommand.Request.Type.Direction == XFER_READ) { + /* Copy the data out of the buffer we created */ + if (copy_to_user + (iocommand.buf, buff, iocommand.buf_size)) { + kfree(buff); + cmd_free(host, c, 0); + return -EFAULT; + } + } + kfree(buff); + cmd_free(host, c, 0); + return 0; + } + case CCISS_BIG_PASSTHRU:{ + BIG_IOCTL_Command_struct *ioc; + CommandList_struct *c; + unsigned char **buff = NULL; + int *buff_size = NULL; + u64bit temp64; + unsigned long flags; + BYTE sg_used = 0; + int status = 0; + int i; + DECLARE_COMPLETION_ONSTACK(wait); + __u32 left; + __u32 sz; + BYTE __user *data_ptr; + + if (!arg) + return -EINVAL; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + ioc = (BIG_IOCTL_Command_struct *) + kmalloc(sizeof(*ioc), GFP_KERNEL); + if (!ioc) { + status = -ENOMEM; + goto cleanup1; + } + if (copy_from_user(ioc, argp, sizeof(*ioc))) { + status = -EFAULT; + goto cleanup1; + } + if ((ioc->buf_size < 1) && + (ioc->Request.Type.Direction != XFER_NONE)) { + status = -EINVAL; + goto cleanup1; + } + /* Check kmalloc limits using all SGs */ + if (ioc->malloc_size > MAX_KMALLOC_SIZE) { + status = -EINVAL; + goto cleanup1; + } + if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { + status = -EINVAL; + goto cleanup1; + } + buff = + kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); + if (!buff) { + status = -ENOMEM; + goto cleanup1; + } + buff_size = kmalloc(MAXSGENTRIES * sizeof(int), + GFP_KERNEL); + if (!buff_size) { + status = -ENOMEM; + goto cleanup1; + } + left = ioc->buf_size; + data_ptr = ioc->buf; + while (left) { + sz = (left > + ioc->malloc_size) ? ioc-> + malloc_size : left; + buff_size[sg_used] = sz; + buff[sg_used] = kmalloc(sz, GFP_KERNEL); + if (buff[sg_used] == NULL) { + status = -ENOMEM; + goto cleanup1; + } + if (ioc->Request.Type.Direction == XFER_WRITE) { + if (copy_from_user + (buff[sg_used], data_ptr, sz)) { + status = -EFAULT; + goto cleanup1; + } + } else { + memset(buff[sg_used], 0, sz); + } + left -= sz; + data_ptr += sz; + sg_used++; + } + if ((c = cmd_alloc(host, 0)) == NULL) { + status = -ENOMEM; + goto cleanup1; + } + c->cmd_type = CMD_IOCTL_PEND; + c->Header.ReplyQueue = 0; + + if (ioc->buf_size > 0) { + c->Header.SGList = sg_used; + c->Header.SGTotal = sg_used; + } else { + c->Header.SGList = 0; + c->Header.SGTotal = 0; + } + c->Header.LUN = ioc->LUN_info; + c->Header.Tag.lower = c->busaddr; + + c->Request = ioc->Request; + if (ioc->buf_size > 0) { + int i; + for (i = 0; i < sg_used; i++) { + temp64.val = + pci_map_single(host->pdev, buff[i], + buff_size[i], + PCI_DMA_BIDIRECTIONAL); + c->SG[i].Addr.lower = + temp64.val32.lower; + c->SG[i].Addr.upper = + temp64.val32.upper; + c->SG[i].Len = buff_size[i]; + c->SG[i].Ext = 0; /* we are not chaining */ + } + } + c->waiting = &wait; + /* Put the request on the tail of the request queue */ + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + addQ(&host->reqQ, c); + host->Qdepth++; + start_io(host); + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + wait_for_completion(&wait); + /* unlock the buffers from DMA */ + for (i = 0; i < sg_used; i++) { + temp64.val32.lower = c->SG[i].Addr.lower; + temp64.val32.upper = c->SG[i].Addr.upper; + pci_unmap_single(host->pdev, + (dma_addr_t) temp64.val, buff_size[i], + PCI_DMA_BIDIRECTIONAL); + } + /* Copy the error information out */ + ioc->error_info = *(c->err_info); + if (copy_to_user(argp, ioc, sizeof(*ioc))) { + cmd_free(host, c, 0); + status = -EFAULT; + goto cleanup1; + } + if (ioc->Request.Type.Direction == XFER_READ) { + /* Copy the data out of the buffer we created */ + BYTE __user *ptr = ioc->buf; + for (i = 0; i < sg_used; i++) { + if (copy_to_user + (ptr, buff[i], buff_size[i])) { + cmd_free(host, c, 0); + status = -EFAULT; + goto cleanup1; + } + ptr += buff_size[i]; + } + } + cmd_free(host, c, 0); + status = 0; + cleanup1: + if (buff) { + for (i = 0; i < sg_used; i++) + kfree(buff[i]); + kfree(buff); + } + kfree(buff_size); + kfree(ioc); + return status; + } + + /* scsi_cmd_ioctl handles these, below, though some are not */ + /* very meaningful for cciss. SG_IO is the main one people want. */ + + case SG_GET_VERSION_NUM: + case SG_SET_TIMEOUT: + case SG_GET_TIMEOUT: + case SG_GET_RESERVED_SIZE: + case SG_SET_RESERVED_SIZE: + case SG_EMULATED_HOST: + case SG_IO: + case SCSI_IOCTL_SEND_COMMAND: + return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); + + /* scsi_cmd_ioctl would normally handle these, below, but */ + /* they aren't a good fit for cciss, as CD-ROMs are */ + /* not supported, and we don't have any bus/target/lun */ + /* which we present to the kernel. */ + + case CDROM_SEND_PACKET: + case CDROMCLOSETRAY: + case CDROMEJECT: + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + default: + return -ENOTTY; + } +} + +static void cciss_check_queues(ctlr_info_t *h) +{ + int start_queue = h->next_to_run; + int i; + + /* check to see if we have maxed out the number of commands that can + * be placed on the queue. If so then exit. We do this check here + * in case the interrupt we serviced was from an ioctl and did not + * free any new commands. + */ + if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) + return; + + /* We have room on the queue for more commands. Now we need to queue + * them up. We will also keep track of the next queue to run so + * that every queue gets a chance to be started first. + */ + for (i = 0; i < h->highest_lun + 1; i++) { + int curr_queue = (start_queue + i) % (h->highest_lun + 1); + /* make sure the disk has been added and the drive is real + * because this can be called from the middle of init_one. + */ + if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) + continue; + blk_start_queue(h->gendisk[curr_queue]->queue); + + /* check to see if we have maxed out the number of commands + * that can be placed on the queue. + */ + if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) { + if (curr_queue == start_queue) { + h->next_to_run = + (start_queue + 1) % (h->highest_lun + 1); + break; + } else { + h->next_to_run = curr_queue; + break; + } + } + } +} + +static void cciss_softirq_done(struct request *rq) +{ + CommandList_struct *cmd = rq->completion_data; + ctlr_info_t *h = hba[cmd->ctlr]; + unsigned long flags; + u64bit temp64; + int i, ddir; + + if (cmd->Request.Type.Direction == XFER_READ) + ddir = PCI_DMA_FROMDEVICE; + else + ddir = PCI_DMA_TODEVICE; + + /* command did not need to be retried */ + /* unmap the DMA mapping for all the scatter gather elements */ + for (i = 0; i < cmd->Header.SGList; i++) { + temp64.val32.lower = cmd->SG[i].Addr.lower; + temp64.val32.upper = cmd->SG[i].Addr.upper; + pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir); + } + +#ifdef CCISS_DEBUG + printk("Done with %p\n", rq); +#endif /* CCISS_DEBUG */ + + if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq))) + BUG(); + + spin_lock_irqsave(&h->lock, flags); + cmd_free(h, cmd, 1); + cciss_check_queues(h); + spin_unlock_irqrestore(&h->lock, flags); +} + +/* This function gets the serial number of a logical drive via + * inquiry page 0x83. Serial no. is 16 bytes. If the serial + * number cannot be had, for whatever reason, 16 bytes of 0xff + * are returned instead. + */ +static void cciss_get_serial_no(int ctlr, int logvol, int withirq, + unsigned char *serial_no, int buflen) +{ +#define PAGE_83_INQ_BYTES 64 + int rc; + unsigned char *buf; + + if (buflen > 16) + buflen = 16; + memset(serial_no, 0xff, buflen); + buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL); + if (!buf) + return; + memset(serial_no, 0, buflen); + if (withirq) + rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf, + PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD); + else + rc = sendcmd(CISS_INQUIRY, ctlr, buf, + PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD); + if (rc == IO_OK) + memcpy(serial_no, &buf[8], buflen); + kfree(buf); + return; +} + +static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, + int drv_index) +{ + disk->queue = blk_init_queue(do_cciss_request, &h->lock); + sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); + disk->major = h->major; + disk->first_minor = drv_index << NWD_SHIFT; + disk->fops = &cciss_fops; + disk->private_data = &h->drv[drv_index]; + disk->driverfs_dev = &h->pdev->dev; + + /* Set up queue information */ + blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); + + /* This is a hardware imposed limit. */ + blk_queue_max_hw_segments(disk->queue, MAXSGENTRIES); + + /* This is a limit in the driver and could be eliminated. */ + blk_queue_max_phys_segments(disk->queue, MAXSGENTRIES); + + blk_queue_max_sectors(disk->queue, h->cciss_max_sectors); + + blk_queue_softirq_done(disk->queue, cciss_softirq_done); + + disk->queue->queuedata = h; + + blk_queue_hardsect_size(disk->queue, + h->drv[drv_index].block_size); + + /* Make sure all queue data is written out before */ + /* setting h->drv[drv_index].queue, as setting this */ + /* allows the interrupt handler to start the queue */ + wmb(); + h->drv[drv_index].queue = disk->queue; + add_disk(disk); +} + +/* This function will check the usage_count of the drive to be updated/added. + * If the usage_count is zero and it is a heretofore unknown drive, or, + * the drive's capacity, geometry, or serial number has changed, + * then the drive information will be updated and the disk will be + * re-registered with the kernel. If these conditions don't hold, + * then it will be left alone for the next reboot. The exception to this + * is disk 0 which will always be left registered with the kernel since it + * is also the controller node. Any changes to disk 0 will show up on + * the next reboot. + */ +static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) +{ + ctlr_info_t *h = hba[ctlr]; + struct gendisk *disk; + InquiryData_struct *inq_buff = NULL; + unsigned int block_size; + sector_t total_size; + unsigned long flags = 0; + int ret = 0; + drive_info_struct *drvinfo; + int was_only_controller_node; + + /* Get information about the disk and modify the driver structure */ + inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); + drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL); + if (inq_buff == NULL || drvinfo == NULL) + goto mem_msg; + + /* See if we're trying to update the "controller node" + * this will happen the when the first logical drive gets + * created by ACU. + */ + was_only_controller_node = (drv_index == 0 && + h->drv[0].raid_level == -1); + + /* testing to see if 16-byte CDBs are already being used */ + if (h->cciss_read == CCISS_READ_16) { + cciss_read_capacity_16(h->ctlr, drv_index, 1, + &total_size, &block_size); + + } else { + cciss_read_capacity(ctlr, drv_index, 1, + &total_size, &block_size); + + /* if read_capacity returns all F's this volume is >2TB */ + /* in size so we switch to 16-byte CDB's for all */ + /* read/write ops */ + if (total_size == 0xFFFFFFFFULL) { + cciss_read_capacity_16(ctlr, drv_index, 1, + &total_size, &block_size); + h->cciss_read = CCISS_READ_16; + h->cciss_write = CCISS_WRITE_16; + } else { + h->cciss_read = CCISS_READ_10; + h->cciss_write = CCISS_WRITE_10; + } + } + + cciss_geometry_inquiry(ctlr, drv_index, 1, total_size, block_size, + inq_buff, drvinfo); + drvinfo->block_size = block_size; + drvinfo->nr_blocks = total_size + 1; + + cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, + sizeof(drvinfo->serial_no)); + + /* Is it the same disk we already know, and nothing's changed? */ + if (h->drv[drv_index].raid_level != -1 && + ((memcmp(drvinfo->serial_no, + h->drv[drv_index].serial_no, 16) == 0) && + drvinfo->block_size == h->drv[drv_index].block_size && + drvinfo->nr_blocks == h->drv[drv_index].nr_blocks && + drvinfo->heads == h->drv[drv_index].heads && + drvinfo->sectors == h->drv[drv_index].sectors && + drvinfo->cylinders == h->drv[drv_index].cylinders)) + /* The disk is unchanged, nothing to update */ + goto freeret; + + /* If we get here it's not the same disk, or something's changed, + * so we need to * deregister it, and re-register it, if it's not + * in use. + * If the disk already exists then deregister it before proceeding + * (unless it's the first disk (for the controller node). + */ + if (h->drv[drv_index].raid_level != -1 && drv_index != 0) { + printk(KERN_WARNING "disk %d has changed.\n", drv_index); + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); + h->drv[drv_index].busy_configuring = 1; + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + + /* deregister_disk sets h->drv[drv_index].queue = NULL + * which keeps the interrupt handler from starting + * the queue. + */ + ret = deregister_disk(h->gendisk[drv_index], + &h->drv[drv_index], 0); + h->drv[drv_index].busy_configuring = 0; + } + + /* If the disk is in use return */ + if (ret) + goto freeret; + + /* Save the new information from cciss_geometry_inquiry + * and serial number inquiry. + */ + h->drv[drv_index].block_size = drvinfo->block_size; + h->drv[drv_index].nr_blocks = drvinfo->nr_blocks; + h->drv[drv_index].heads = drvinfo->heads; + h->drv[drv_index].sectors = drvinfo->sectors; + h->drv[drv_index].cylinders = drvinfo->cylinders; + h->drv[drv_index].raid_level = drvinfo->raid_level; + memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); + + ++h->num_luns; + disk = h->gendisk[drv_index]; + set_capacity(disk, h->drv[drv_index].nr_blocks); + + /* If it's not disk 0 (drv_index != 0) + * or if it was disk 0, but there was previously + * no actual corresponding configured logical drive + * (raid_leve == -1) then we want to update the + * logical drive's information. + */ + if (drv_index || first_time) + cciss_add_disk(h, disk, drv_index); + +freeret: + kfree(inq_buff); + kfree(drvinfo); + return; +mem_msg: + printk(KERN_ERR "cciss: out of memory\n"); + goto freeret; +} + +/* This function will find the first index of the controllers drive array + * that has a -1 for the raid_level and will return that index. This is + * where new drives will be added. If the index to be returned is greater + * than the highest_lun index for the controller then highest_lun is set + * to this new index. If there are no available indexes then -1 is returned. + * "controller_node" is used to know if this is a real logical drive, or just + * the controller node, which determines if this counts towards highest_lun. + */ +static int cciss_find_free_drive_index(int ctlr, int controller_node) +{ + int i; + + for (i = 0; i < CISS_MAX_LUN; i++) { + if (hba[ctlr]->drv[i].raid_level == -1) { + if (i > hba[ctlr]->highest_lun) + if (!controller_node) + hba[ctlr]->highest_lun = i; + return i; + } + } + return -1; +} + +/* cciss_add_gendisk finds a free hba[]->drv structure + * and allocates a gendisk if needed, and sets the lunid + * in the drvinfo structure. It returns the index into + * the ->drv[] array, or -1 if none are free. + * is_controller_node indicates whether highest_lun should + * count this disk, or if it's only being added to provide + * a means to talk to the controller in case no logical + * drives have yet been configured. + */ +static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) +{ + int drv_index; + + drv_index = cciss_find_free_drive_index(h->ctlr, controller_node); + if (drv_index == -1) + return -1; + /*Check if the gendisk needs to be allocated */ + if (!h->gendisk[drv_index]) { + h->gendisk[drv_index] = + alloc_disk(1 << NWD_SHIFT); + if (!h->gendisk[drv_index]) { + printk(KERN_ERR "cciss%d: could not " + "allocate a new disk %d\n", + h->ctlr, drv_index); + return -1; + } + } + h->drv[drv_index].LunID = lunid; + + /* Don't need to mark this busy because nobody */ + /* else knows about this disk yet to contend */ + /* for access to it. */ + h->drv[drv_index].busy_configuring = 0; + wmb(); + return drv_index; +} + +/* This is for the special case of a controller which + * has no logical drives. In this case, we still need + * to register a disk so the controller can be accessed + * by the Array Config Utility. + */ +static void cciss_add_controller_node(ctlr_info_t *h) +{ + struct gendisk *disk; + int drv_index; + + if (h->gendisk[0] != NULL) /* already did this? Then bail. */ + return; + + drv_index = cciss_add_gendisk(h, 0, 1); + if (drv_index == -1) { + printk(KERN_WARNING "cciss%d: could not " + "add disk 0.\n", h->ctlr); + return; + } + h->drv[drv_index].block_size = 512; + h->drv[drv_index].nr_blocks = 0; + h->drv[drv_index].heads = 0; + h->drv[drv_index].sectors = 0; + h->drv[drv_index].cylinders = 0; + h->drv[drv_index].raid_level = -1; + memset(h->drv[drv_index].serial_no, 0, 16); + disk = h->gendisk[drv_index]; + cciss_add_disk(h, disk, drv_index); +} + +/* This function will add and remove logical drives from the Logical + * drive array of the controller and maintain persistency of ordering + * so that mount points are preserved until the next reboot. This allows + * for the removal of logical drives in the middle of the drive array + * without a re-ordering of those drives. + * INPUT + * h = The controller to perform the operations on + */ +static int rebuild_lun_table(ctlr_info_t *h, int first_time) +{ + int ctlr = h->ctlr; + int num_luns; + ReportLunData_struct *ld_buff = NULL; + int return_code; + int listlength = 0; + int i; + int drv_found; + int drv_index = 0; + __u32 lunid = 0; + unsigned long flags; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + /* Set busy_configuring flag for this operation */ + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); + if (h->busy_configuring) { + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return -EBUSY; + } + h->busy_configuring = 1; + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + + ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); + if (ld_buff == NULL) + goto mem_msg; + + return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, + sizeof(ReportLunData_struct), 0, + 0, 0, TYPE_CMD); + + if (return_code == IO_OK) + listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); + else { /* reading number of logical volumes failed */ + printk(KERN_WARNING "cciss: report logical volume" + " command failed\n"); + listlength = 0; + goto freeret; + } + + num_luns = listlength / 8; /* 8 bytes per entry */ + if (num_luns > CISS_MAX_LUN) { + num_luns = CISS_MAX_LUN; + printk(KERN_WARNING "cciss: more luns configured" + " on controller than can be handled by" + " this driver.\n"); + } + + if (num_luns == 0) + cciss_add_controller_node(h); + + /* Compare controller drive array to driver's drive array + * to see if any drives are missing on the controller due + * to action of Array Config Utility (user deletes drive) + * and deregister logical drives which have disappeared. + */ + for (i = 0; i <= h->highest_lun; i++) { + int j; + drv_found = 0; + + /* skip holes in the array from already deleted drives */ + if (h->drv[i].raid_level == -1) + continue; + + for (j = 0; j < num_luns; j++) { + memcpy(&lunid, &ld_buff->LUN[j][0], 4); + lunid = le32_to_cpu(lunid); + if (h->drv[i].LunID == lunid) { + drv_found = 1; + break; + } + } + if (!drv_found) { + /* Deregister it from the OS, it's gone. */ + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); + h->drv[i].busy_configuring = 1; + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return_code = deregister_disk(h->gendisk[i], + &h->drv[i], 1); + h->drv[i].busy_configuring = 0; + } + } + + /* Compare controller drive array to driver's drive array. + * Check for updates in the drive information and any new drives + * on the controller due to ACU adding logical drives, or changing + * a logical drive's size, etc. Reregister any new/changed drives + */ + for (i = 0; i < num_luns; i++) { + int j; + + drv_found = 0; + + memcpy(&lunid, &ld_buff->LUN[i][0], 4); + lunid = le32_to_cpu(lunid); + + /* Find if the LUN is already in the drive array + * of the driver. If so then update its info + * if not in use. If it does not exist then find + * the first free index and add it. + */ + for (j = 0; j <= h->highest_lun; j++) { + if (h->drv[j].raid_level != -1 && + h->drv[j].LunID == lunid) { + drv_index = j; + drv_found = 1; + break; + } + } + + /* check if the drive was found already in the array */ + if (!drv_found) { + drv_index = cciss_add_gendisk(h, lunid, 0); + if (drv_index == -1) + goto freeret; + } + cciss_update_drive_info(ctlr, drv_index, first_time); + } /* end for */ + +freeret: + kfree(ld_buff); + h->busy_configuring = 0; + /* We return -1 here to tell the ACU that we have registered/updated + * all of the drives that we can and to keep it from calling us + * additional times. + */ + return -1; +mem_msg: + printk(KERN_ERR "cciss: out of memory\n"); + h->busy_configuring = 0; + goto freeret; +} + +/* This function will deregister the disk and it's queue from the + * kernel. It must be called with the controller lock held and the + * drv structures busy_configuring flag set. It's parameters are: + * + * disk = This is the disk to be deregistered + * drv = This is the drive_info_struct associated with the disk to be + * deregistered. It contains information about the disk used + * by the driver. + * clear_all = This flag determines whether or not the disk information + * is going to be completely cleared out and the highest_lun + * reset. Sometimes we want to clear out information about + * the disk in preparation for re-adding it. In this case + * the highest_lun should be left unchanged and the LunID + * should not be cleared. +*/ +static int deregister_disk(struct gendisk *disk, drive_info_struct *drv, + int clear_all) +{ + int i; + ctlr_info_t *h = get_host(disk); + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + /* make sure logical volume is NOT is use */ + if (clear_all || (h->gendisk[0] == disk)) { + if (drv->usage_count > 1) + return -EBUSY; + } else if (drv->usage_count > 0) + return -EBUSY; + + /* invalidate the devices and deregister the disk. If it is disk + * zero do not deregister it but just zero out it's values. This + * allows us to delete disk zero but keep the controller registered. + */ + if (h->gendisk[0] != disk) { + struct request_queue *q = disk->queue; + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + if (q) { + blk_cleanup_queue(q); + /* Set drv->queue to NULL so that we do not try + * to call blk_start_queue on this queue in the + * interrupt handler + */ + drv->queue = NULL; + } + /* If clear_all is set then we are deleting the logical + * drive, not just refreshing its info. For drives + * other than disk 0 we will call put_disk. We do not + * do this for disk 0 as we need it to be able to + * configure the controller. + */ + if (clear_all){ + /* This isn't pretty, but we need to find the + * disk in our array and NULL our the pointer. + * This is so that we will call alloc_disk if + * this index is used again later. + */ + for (i=0; i < CISS_MAX_LUN; i++){ + if (h->gendisk[i] == disk) { + h->gendisk[i] = NULL; + break; + } + } + put_disk(disk); + } + } else { + set_capacity(disk, 0); + } + + --h->num_luns; + /* zero out the disk size info */ + drv->nr_blocks = 0; + drv->block_size = 0; + drv->heads = 0; + drv->sectors = 0; + drv->cylinders = 0; + drv->raid_level = -1; /* This can be used as a flag variable to + * indicate that this element of the drive + * array is free. + */ + + if (clear_all) { + /* check to see if it was the last disk */ + if (drv == h->drv + h->highest_lun) { + /* if so, find the new hightest lun */ + int i, newhighest = -1; + for (i = 0; i <= h->highest_lun; i++) { + /* if the disk has size > 0, it is available */ + if (h->drv[i].heads) + newhighest = i; + } + h->highest_lun = newhighest; + } + + drv->LunID = 0; + } + return 0; +} + +static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller, + 1: address logical volume log_unit, + 2: periph device address is scsi3addr */ + unsigned int log_unit, __u8 page_code, + unsigned char *scsi3addr, int cmd_type) +{ + ctlr_info_t *h = hba[ctlr]; + u64bit buff_dma_handle; + int status = IO_OK; + + c->cmd_type = CMD_IOCTL_PEND; + c->Header.ReplyQueue = 0; + if (buff != NULL) { + c->Header.SGList = 1; + c->Header.SGTotal = 1; + } else { + c->Header.SGList = 0; + c->Header.SGTotal = 0; + } + c->Header.Tag.lower = c->busaddr; + + c->Request.Type.Type = cmd_type; + if (cmd_type == TYPE_CMD) { + switch (cmd) { + case CISS_INQUIRY: + /* If the logical unit number is 0 then, this is going + to controller so It's a physical command + mode = 0 target = 0. So we have nothing to write. + otherwise, if use_unit_num == 1, + mode = 1(volume set addressing) target = LUNID + otherwise, if use_unit_num == 2, + mode = 0(periph dev addr) target = scsi3addr */ + if (use_unit_num == 1) { + c->Header.LUN.LogDev.VolId = + h->drv[log_unit].LunID; + c->Header.LUN.LogDev.Mode = 1; + } else if (use_unit_num == 2) { + memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, + 8); + c->Header.LUN.LogDev.Mode = 0; + } + /* are we trying to read a vital product page */ + if (page_code != 0) { + c->Request.CDB[1] = 0x01; + c->Request.CDB[2] = page_code; + } + c->Request.CDBLen = 6; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_READ; + c->Request.Timeout = 0; + c->Request.CDB[0] = CISS_INQUIRY; + c->Request.CDB[4] = size & 0xFF; + break; + case CISS_REPORT_LOG: + case CISS_REPORT_PHYS: + /* Talking to controller so It's a physical command + mode = 00 target = 0. Nothing to write. + */ + c->Request.CDBLen = 12; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_READ; + c->Request.Timeout = 0; + c->Request.CDB[0] = cmd; + c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB + c->Request.CDB[7] = (size >> 16) & 0xFF; + c->Request.CDB[8] = (size >> 8) & 0xFF; + c->Request.CDB[9] = size & 0xFF; + break; + + case CCISS_READ_CAPACITY: + c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID; + c->Header.LUN.LogDev.Mode = 1; + c->Request.CDBLen = 10; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_READ; + c->Request.Timeout = 0; + c->Request.CDB[0] = cmd; + break; + case CCISS_READ_CAPACITY_16: + c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID; + c->Header.LUN.LogDev.Mode = 1; + c->Request.CDBLen = 16; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_READ; + c->Request.Timeout = 0; + c->Request.CDB[0] = cmd; + c->Request.CDB[1] = 0x10; + c->Request.CDB[10] = (size >> 24) & 0xFF; + c->Request.CDB[11] = (size >> 16) & 0xFF; + c->Request.CDB[12] = (size >> 8) & 0xFF; + c->Request.CDB[13] = size & 0xFF; + c->Request.Timeout = 0; + c->Request.CDB[0] = cmd; + break; + case CCISS_CACHE_FLUSH: + c->Request.CDBLen = 12; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_WRITE; + c->Request.Timeout = 0; + c->Request.CDB[0] = BMIC_WRITE; + c->Request.CDB[6] = BMIC_CACHE_FLUSH; + break; + default: + printk(KERN_WARNING + "cciss%d: Unknown Command 0x%c\n", ctlr, cmd); + return IO_ERROR; + } + } else if (cmd_type == TYPE_MSG) { + switch (cmd) { + case 0: /* ABORT message */ + c->Request.CDBLen = 12; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_WRITE; + c->Request.Timeout = 0; + c->Request.CDB[0] = cmd; /* abort */ + c->Request.CDB[1] = 0; /* abort a command */ + /* buff contains the tag of the command to abort */ + memcpy(&c->Request.CDB[4], buff, 8); + break; + case 1: /* RESET message */ + c->Request.CDBLen = 12; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_WRITE; + c->Request.Timeout = 0; + memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); + c->Request.CDB[0] = cmd; /* reset */ + c->Request.CDB[1] = 0x04; /* reset a LUN */ + break; + case 3: /* No-Op message */ + c->Request.CDBLen = 1; + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = XFER_WRITE; + c->Request.Timeout = 0; + c->Request.CDB[0] = cmd; + break; + default: + printk(KERN_WARNING + "cciss%d: unknown message type %d\n", ctlr, cmd); + return IO_ERROR; + } + } else { + printk(KERN_WARNING + "cciss%d: unknown command type %d\n", ctlr, cmd_type); + return IO_ERROR; + } + /* Fill in the scatter gather information */ + if (size > 0) { + buff_dma_handle.val = (__u64) pci_map_single(h->pdev, + buff, size, + PCI_DMA_BIDIRECTIONAL); + c->SG[0].Addr.lower = buff_dma_handle.val32.lower; + c->SG[0].Addr.upper = buff_dma_handle.val32.upper; + c->SG[0].Len = size; + c->SG[0].Ext = 0; /* we are not chaining */ + } + return status; +} + +static int sendcmd_withirq(__u8 cmd, + int ctlr, + void *buff, + size_t size, + unsigned int use_unit_num, + unsigned int log_unit, __u8 page_code, int cmd_type) +{ + ctlr_info_t *h = hba[ctlr]; + CommandList_struct *c; + u64bit buff_dma_handle; + unsigned long flags; + int return_status; + DECLARE_COMPLETION_ONSTACK(wait); + + if ((c = cmd_alloc(h, 0)) == NULL) + return -ENOMEM; + return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num, + log_unit, page_code, NULL, cmd_type); + if (return_status != IO_OK) { + cmd_free(h, c, 0); + return return_status; + } + resend_cmd2: + c->waiting = &wait; + + /* Put the request on the tail of the queue and send it */ + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + addQ(&h->reqQ, c); + h->Qdepth++; + start_io(h); + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + + wait_for_completion(&wait); + + if (c->err_info->CommandStatus != 0) { /* an error has occurred */ + switch (c->err_info->CommandStatus) { + case CMD_TARGET_STATUS: + printk(KERN_WARNING "cciss: cmd %p has " + " completed with errors\n", c); + if (c->err_info->ScsiStatus) { + printk(KERN_WARNING "cciss: cmd %p " + "has SCSI Status = %x\n", + c, c->err_info->ScsiStatus); + } + + break; + case CMD_DATA_UNDERRUN: + case CMD_DATA_OVERRUN: + /* expected for inquire and report lun commands */ + break; + case CMD_INVALID: + printk(KERN_WARNING "cciss: Cmd %p is " + "reported invalid\n", c); + return_status = IO_ERROR; + break; + case CMD_PROTOCOL_ERR: + printk(KERN_WARNING "cciss: cmd %p has " + "protocol error \n", c); + return_status = IO_ERROR; + break; + case CMD_HARDWARE_ERR: + printk(KERN_WARNING "cciss: cmd %p had " + " hardware error\n", c); + return_status = IO_ERROR; + break; + case CMD_CONNECTION_LOST: + printk(KERN_WARNING "cciss: cmd %p had " + "connection lost\n", c); + return_status = IO_ERROR; + break; + case CMD_ABORTED: + printk(KERN_WARNING "cciss: cmd %p was " + "aborted\n", c); + return_status = IO_ERROR; + break; + case CMD_ABORT_FAILED: + printk(KERN_WARNING "cciss: cmd %p reports " + "abort failed\n", c); + return_status = IO_ERROR; + break; + case CMD_UNSOLICITED_ABORT: + printk(KERN_WARNING + "cciss%d: unsolicited abort %p\n", ctlr, c); + if (c->retry_count < MAX_CMD_RETRIES) { + printk(KERN_WARNING + "cciss%d: retrying %p\n", ctlr, c); + c->retry_count++; + /* erase the old error information */ + memset(c->err_info, 0, + sizeof(ErrorInfo_struct)); + return_status = IO_OK; + INIT_COMPLETION(wait); + goto resend_cmd2; + } + return_status = IO_ERROR; + break; + default: + printk(KERN_WARNING "cciss: cmd %p returned " + "unknown status %x\n", c, + c->err_info->CommandStatus); + return_status = IO_ERROR; + } + } + /* unlock the buffers from DMA */ + buff_dma_handle.val32.lower = c->SG[0].Addr.lower; + buff_dma_handle.val32.upper = c->SG[0].Addr.upper; + pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, + c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); + cmd_free(h, c, 0); + return return_status; +} + +static void cciss_geometry_inquiry(int ctlr, int logvol, + int withirq, sector_t total_size, + unsigned int block_size, + InquiryData_struct *inq_buff, + drive_info_struct *drv) +{ + int return_code; + unsigned long t; + + memset(inq_buff, 0, sizeof(InquiryData_struct)); + if (withirq) + return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, + inq_buff, sizeof(*inq_buff), 1, + logvol, 0xC1, TYPE_CMD); + else + return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff, + sizeof(*inq_buff), 1, logvol, 0xC1, NULL, + TYPE_CMD); + if (return_code == IO_OK) { + if (inq_buff->data_byte[8] == 0xFF) { + printk(KERN_WARNING + "cciss: reading geometry failed, volume " + "does not support reading geometry\n"); + drv->heads = 255; + drv->sectors = 32; // Sectors per track + drv->cylinders = total_size + 1; + drv->raid_level = RAID_UNKNOWN; + } else { + drv->heads = inq_buff->data_byte[6]; + drv->sectors = inq_buff->data_byte[7]; + drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8; + drv->cylinders += inq_buff->data_byte[5]; + drv->raid_level = inq_buff->data_byte[8]; + } + drv->block_size = block_size; + drv->nr_blocks = total_size + 1; + t = drv->heads * drv->sectors; + if (t > 1) { + sector_t real_size = total_size + 1; + unsigned long rem = sector_div(real_size, t); + if (rem) + real_size++; + drv->cylinders = real_size; + } + } else { /* Get geometry failed */ + printk(KERN_WARNING "cciss: reading geometry failed\n"); + } + printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n", + drv->heads, drv->sectors, drv->cylinders); +} + +static void +cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, + unsigned int *block_size) +{ + ReadCapdata_struct *buf; + int return_code; + + buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); + if (!buf) { + printk(KERN_WARNING "cciss: out of memory\n"); + return; + } + + if (withirq) + return_code = sendcmd_withirq(CCISS_READ_CAPACITY, + ctlr, buf, sizeof(ReadCapdata_struct), + 1, logvol, 0, TYPE_CMD); + else + return_code = sendcmd(CCISS_READ_CAPACITY, + ctlr, buf, sizeof(ReadCapdata_struct), + 1, logvol, 0, NULL, TYPE_CMD); + if (return_code == IO_OK) { + *total_size = be32_to_cpu(*(__be32 *) buf->total_size); + *block_size = be32_to_cpu(*(__be32 *) buf->block_size); + } else { /* read capacity command failed */ + printk(KERN_WARNING "cciss: read capacity failed\n"); + *total_size = 0; + *block_size = BLOCK_SIZE; + } + if (*total_size != 0) + printk(KERN_INFO " blocks= %llu block_size= %d\n", + (unsigned long long)*total_size+1, *block_size); + kfree(buf); +} + +static void +cciss_read_capacity_16(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size) +{ + ReadCapdata_struct_16 *buf; + int return_code; + + buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); + if (!buf) { + printk(KERN_WARNING "cciss: out of memory\n"); + return; + } + + if (withirq) { + return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16, + ctlr, buf, sizeof(ReadCapdata_struct_16), + 1, logvol, 0, TYPE_CMD); + } + else { + return_code = sendcmd(CCISS_READ_CAPACITY_16, + ctlr, buf, sizeof(ReadCapdata_struct_16), + 1, logvol, 0, NULL, TYPE_CMD); + } + if (return_code == IO_OK) { + *total_size = be64_to_cpu(*(__be64 *) buf->total_size); + *block_size = be32_to_cpu(*(__be32 *) buf->block_size); + } else { /* read capacity command failed */ + printk(KERN_WARNING "cciss: read capacity failed\n"); + *total_size = 0; + *block_size = BLOCK_SIZE; + } + printk(KERN_INFO " blocks= %llu block_size= %d\n", + (unsigned long long)*total_size+1, *block_size); + kfree(buf); +} + +static int cciss_revalidate(struct gendisk *disk) +{ + ctlr_info_t *h = get_host(disk); + drive_info_struct *drv = get_drv(disk); + int logvol; + int FOUND = 0; + unsigned int block_size; + sector_t total_size; + InquiryData_struct *inq_buff = NULL; + + for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { + if (h->drv[logvol].LunID == drv->LunID) { + FOUND = 1; + break; + } + } + + if (!FOUND) + return 1; + + inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); + if (inq_buff == NULL) { + printk(KERN_WARNING "cciss: out of memory\n"); + return 1; + } + if (h->cciss_read == CCISS_READ_10) { + cciss_read_capacity(h->ctlr, logvol, 1, + &total_size, &block_size); + } else { + cciss_read_capacity_16(h->ctlr, logvol, 1, + &total_size, &block_size); + } + cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, + inq_buff, drv); + + blk_queue_hardsect_size(drv->queue, drv->block_size); + set_capacity(disk, drv->nr_blocks); + + kfree(inq_buff); + return 0; +} + +/* + * Wait polling for a command to complete. + * The memory mapped FIFO is polled for the completion. + * Used only at init time, interrupts from the HBA are disabled. + */ +static unsigned long pollcomplete(int ctlr) +{ + unsigned long done; + int i; + + /* Wait (up to 20 seconds) for a command to complete */ + + for (i = 20 * HZ; i > 0; i--) { + done = hba[ctlr]->access.command_completed(hba[ctlr]); + if (done == FIFO_EMPTY) + schedule_timeout_uninterruptible(1); + else + return done; + } + /* Invalid address to tell caller we ran out of time */ + return 1; +} + +static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete) +{ + /* We get in here if sendcmd() is polling for completions + and gets some command back that it wasn't expecting -- + something other than that which it just sent down. + Ordinarily, that shouldn't happen, but it can happen when + the scsi tape stuff gets into error handling mode, and + starts using sendcmd() to try to abort commands and + reset tape drives. In that case, sendcmd may pick up + completions of commands that were sent to logical drives + through the block i/o system, or cciss ioctls completing, etc. + In that case, we need to save those completions for later + processing by the interrupt handler. + */ + +#ifdef CONFIG_CISS_SCSI_TAPE + struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects; + + /* If it's not the scsi tape stuff doing error handling, (abort */ + /* or reset) then we don't expect anything weird. */ + if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) { +#endif + printk(KERN_WARNING "cciss cciss%d: SendCmd " + "Invalid command list address returned! (%lx)\n", + ctlr, complete); + /* not much we can do. */ +#ifdef CONFIG_CISS_SCSI_TAPE + return 1; + } + + /* We've sent down an abort or reset, but something else + has completed */ + if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) { + /* Uh oh. No room to save it for later... */ + printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, " + "reject list overflow, command lost!\n", ctlr); + return 1; + } + /* Save it for later */ + srl->complete[srl->ncompletions] = complete; + srl->ncompletions++; +#endif + return 0; +} + +/* + * Send a command to the controller, and wait for it to complete. + * Only used at init time. + */ +static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller, + 1: address logical volume log_unit, + 2: periph device address is scsi3addr */ + unsigned int log_unit, + __u8 page_code, unsigned char *scsi3addr, int cmd_type) +{ + CommandList_struct *c; + int i; + unsigned long complete; + ctlr_info_t *info_p = hba[ctlr]; + u64bit buff_dma_handle; + int status, done = 0; + + if ((c = cmd_alloc(info_p, 1)) == NULL) { + printk(KERN_WARNING "cciss: unable to get memory"); + return IO_ERROR; + } + status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num, + log_unit, page_code, scsi3addr, cmd_type); + if (status != IO_OK) { + cmd_free(info_p, c, 1); + return status; + } + resend_cmd1: + /* + * Disable interrupt + */ +#ifdef CCISS_DEBUG + printk(KERN_DEBUG "cciss: turning intr off\n"); +#endif /* CCISS_DEBUG */ + info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF); + + /* Make sure there is room in the command FIFO */ + /* Actually it should be completely empty at this time */ + /* unless we are in here doing error handling for the scsi */ + /* tape side of the driver. */ + for (i = 200000; i > 0; i--) { + /* if fifo isn't full go */ + if (!(info_p->access.fifo_full(info_p))) { + + break; + } + udelay(10); + printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full," + " waiting!\n", ctlr); + } + /* + * Send the cmd + */ + info_p->access.submit_command(info_p, c); + done = 0; + do { + complete = pollcomplete(ctlr); + +#ifdef CCISS_DEBUG + printk(KERN_DEBUG "cciss: command completed\n"); +#endif /* CCISS_DEBUG */ + + if (complete == 1) { + printk(KERN_WARNING + "cciss cciss%d: SendCmd Timeout out, " + "No command list address returned!\n", ctlr); + status = IO_ERROR; + done = 1; + break; + } + + /* This will need to change for direct lookup completions */ + if ((complete & CISS_ERROR_BIT) + && (complete & ~CISS_ERROR_BIT) == c->busaddr) { + /* if data overrun or underun on Report command + ignore it + */ + if (((c->Request.CDB[0] == CISS_REPORT_LOG) || + (c->Request.CDB[0] == CISS_REPORT_PHYS) || + (c->Request.CDB[0] == CISS_INQUIRY)) && + ((c->err_info->CommandStatus == + CMD_DATA_OVERRUN) || + (c->err_info->CommandStatus == CMD_DATA_UNDERRUN) + )) { + complete = c->busaddr; + } else { + if (c->err_info->CommandStatus == + CMD_UNSOLICITED_ABORT) { + printk(KERN_WARNING "cciss%d: " + "unsolicited abort %p\n", + ctlr, c); + if (c->retry_count < MAX_CMD_RETRIES) { + printk(KERN_WARNING + "cciss%d: retrying %p\n", + ctlr, c); + c->retry_count++; + /* erase the old error */ + /* information */ + memset(c->err_info, 0, + sizeof + (ErrorInfo_struct)); + goto resend_cmd1; + } else { + printk(KERN_WARNING + "cciss%d: retried %p too " + "many times\n", ctlr, c); + status = IO_ERROR; + goto cleanup1; + } + } else if (c->err_info->CommandStatus == + CMD_UNABORTABLE) { + printk(KERN_WARNING + "cciss%d: command could not be aborted.\n", + ctlr); + status = IO_ERROR; + goto cleanup1; + } + printk(KERN_WARNING "ciss ciss%d: sendcmd" + " Error %x \n", ctlr, + c->err_info->CommandStatus); + printk(KERN_WARNING "ciss ciss%d: sendcmd" + " offensive info\n" + " size %x\n num %x value %x\n", + ctlr, + c->err_info->MoreErrInfo.Invalid_Cmd. + offense_size, + c->err_info->MoreErrInfo.Invalid_Cmd. + offense_num, + c->err_info->MoreErrInfo.Invalid_Cmd. + offense_value); + status = IO_ERROR; + goto cleanup1; + } + } + /* This will need changing for direct lookup completions */ + if (complete != c->busaddr) { + if (add_sendcmd_reject(cmd, ctlr, complete) != 0) { + BUG(); /* we are pretty much hosed if we get here. */ + } + continue; + } else + done = 1; + } while (!done); + + cleanup1: + /* unlock the data buffer from DMA */ + buff_dma_handle.val32.lower = c->SG[0].Addr.lower; + buff_dma_handle.val32.upper = c->SG[0].Addr.upper; + pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val, + c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); +#ifdef CONFIG_CISS_SCSI_TAPE + /* if we saved some commands for later, process them now. */ + if (info_p->scsi_rejects.ncompletions > 0) + do_cciss_intr(0, info_p); +#endif + cmd_free(info_p, c, 1); + return status; +} + +/* + * Map (physical) PCI mem into (virtual) kernel space + */ +static void __iomem *remap_pci_mem(ulong base, ulong size) +{ + ulong page_base = ((ulong) base) & PAGE_MASK; + ulong page_offs = ((ulong) base) - page_base; + void __iomem *page_remapped = ioremap(page_base, page_offs + size); + + return page_remapped ? (page_remapped + page_offs) : NULL; +} + +/* + * Takes jobs of the Q and sends them to the hardware, then puts it on + * the Q to wait for completion. + */ +static void start_io(ctlr_info_t *h) +{ + CommandList_struct *c; + + while ((c = h->reqQ) != NULL) { + /* can't do anything if fifo is full */ + if ((h->access.fifo_full(h))) { + printk(KERN_WARNING "cciss: fifo full\n"); + break; + } + + /* Get the first entry from the Request Q */ + removeQ(&(h->reqQ), c); + h->Qdepth--; + + /* Tell the controller execute command */ + h->access.submit_command(h, c); + + /* Put job onto the completed Q */ + addQ(&(h->cmpQ), c); + } +} + +/* Assumes that CCISS_LOCK(h->ctlr) is held. */ +/* Zeros out the error record and then resends the command back */ +/* to the controller */ +static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) +{ + /* erase the old error information */ + memset(c->err_info, 0, sizeof(ErrorInfo_struct)); + + /* add it to software queue and then send it to the controller */ + addQ(&(h->reqQ), c); + h->Qdepth++; + if (h->Qdepth > h->maxQsinceinit) + h->maxQsinceinit = h->Qdepth; + + start_io(h); +} + +static inline unsigned int make_status_bytes(unsigned int scsi_status_byte, + unsigned int msg_byte, unsigned int host_byte, + unsigned int driver_byte) +{ + /* inverse of macros in scsi.h */ + return (scsi_status_byte & 0xff) | + ((msg_byte & 0xff) << 8) | + ((host_byte & 0xff) << 16) | + ((driver_byte & 0xff) << 24); +} + +static inline int evaluate_target_status(CommandList_struct *cmd) +{ + unsigned char sense_key; + unsigned char status_byte, msg_byte, host_byte, driver_byte; + int error_value; + + /* If we get in here, it means we got "target status", that is, scsi status */ + status_byte = cmd->err_info->ScsiStatus; + driver_byte = DRIVER_OK; + msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ + + if (blk_pc_request(cmd->rq)) + host_byte = DID_PASSTHROUGH; + else + host_byte = DID_OK; + + error_value = make_status_bytes(status_byte, msg_byte, + host_byte, driver_byte); + + if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { + if (!blk_pc_request(cmd->rq)) + printk(KERN_WARNING "cciss: cmd %p " + "has SCSI Status 0x%x\n", + cmd, cmd->err_info->ScsiStatus); + return error_value; + } + + /* check the sense key */ + sense_key = 0xf & cmd->err_info->SenseInfo[2]; + /* no status or recovered error */ + if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq)) + error_value = 0; + + if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */ + if (error_value != 0) + printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION" + " sense key = 0x%x\n", cmd, sense_key); + return error_value; + } + + /* SG_IO or similar, copy sense data back */ + if (cmd->rq->sense) { + if (cmd->rq->sense_len > cmd->err_info->SenseLen) + cmd->rq->sense_len = cmd->err_info->SenseLen; + memcpy(cmd->rq->sense, cmd->err_info->SenseInfo, + cmd->rq->sense_len); + } else + cmd->rq->sense_len = 0; + + return error_value; +} + +/* checks the status of the job and calls complete buffers to mark all + * buffers for the completed job. Note that this function does not need + * to hold the hba/queue lock. + */ +static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, + int timeout) +{ + int retry_cmd = 0; + struct request *rq = cmd->rq; + + rq->errors = 0; + + if (timeout) + rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT); + + if (cmd->err_info->CommandStatus == 0) /* no error has occurred */ + goto after_error_processing; + + switch (cmd->err_info->CommandStatus) { + case CMD_TARGET_STATUS: + rq->errors = evaluate_target_status(cmd); + break; + case CMD_DATA_UNDERRUN: + if (blk_fs_request(cmd->rq)) { + printk(KERN_WARNING "cciss: cmd %p has" + " completed with data underrun " + "reported\n", cmd); + cmd->rq->data_len = cmd->err_info->ResidualCnt; + } + break; + case CMD_DATA_OVERRUN: + if (blk_fs_request(cmd->rq)) + printk(KERN_WARNING "cciss: cmd %p has" + " completed with data overrun " + "reported\n", cmd); + break; + case CMD_INVALID: + printk(KERN_WARNING "cciss: cmd %p is " + "reported invalid\n", cmd); + rq->errors = make_status_bytes(SAM_STAT_GOOD, + cmd->err_info->CommandStatus, DRIVER_OK, + blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + break; + case CMD_PROTOCOL_ERR: + printk(KERN_WARNING "cciss: cmd %p has " + "protocol error \n", cmd); + rq->errors = make_status_bytes(SAM_STAT_GOOD, + cmd->err_info->CommandStatus, DRIVER_OK, + blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + break; + case CMD_HARDWARE_ERR: + printk(KERN_WARNING "cciss: cmd %p had " + " hardware error\n", cmd); + rq->errors = make_status_bytes(SAM_STAT_GOOD, + cmd->err_info->CommandStatus, DRIVER_OK, + blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + break; + case CMD_CONNECTION_LOST: + printk(KERN_WARNING "cciss: cmd %p had " + "connection lost\n", cmd); + rq->errors = make_status_bytes(SAM_STAT_GOOD, + cmd->err_info->CommandStatus, DRIVER_OK, + blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + break; + case CMD_ABORTED: + printk(KERN_WARNING "cciss: cmd %p was " + "aborted\n", cmd); + rq->errors = make_status_bytes(SAM_STAT_GOOD, + cmd->err_info->CommandStatus, DRIVER_OK, + blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); + break; + case CMD_ABORT_FAILED: + printk(KERN_WARNING "cciss: cmd %p reports " + "abort failed\n", cmd); + rq->errors = make_status_bytes(SAM_STAT_GOOD, + cmd->err_info->CommandStatus, DRIVER_OK, + blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + break; + case CMD_UNSOLICITED_ABORT: + printk(KERN_WARNING "cciss%d: unsolicited " + "abort %p\n", h->ctlr, cmd); + if (cmd->retry_count < MAX_CMD_RETRIES) { + retry_cmd = 1; + printk(KERN_WARNING + "cciss%d: retrying %p\n", h->ctlr, cmd); + cmd->retry_count++; + } else + printk(KERN_WARNING + "cciss%d: %p retried too " + "many times\n", h->ctlr, cmd); + rq->errors = make_status_bytes(SAM_STAT_GOOD, + cmd->err_info->CommandStatus, DRIVER_OK, + blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); + break; + case CMD_TIMEOUT: + printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd); + rq->errors = make_status_bytes(SAM_STAT_GOOD, + cmd->err_info->CommandStatus, DRIVER_OK, + blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + break; + default: + printk(KERN_WARNING "cciss: cmd %p returned " + "unknown status %x\n", cmd, + cmd->err_info->CommandStatus); + rq->errors = make_status_bytes(SAM_STAT_GOOD, + cmd->err_info->CommandStatus, DRIVER_OK, + blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); + } + +after_error_processing: + + /* We need to return this command */ + if (retry_cmd) { + resend_cciss_cmd(h, cmd); + return; + } + cmd->rq->completion_data = cmd; + blk_complete_request(cmd->rq); +} + +/* + * Get a request and submit it to the controller. + */ +static void do_cciss_request(struct request_queue *q) +{ + ctlr_info_t *h = q->queuedata; + CommandList_struct *c; + sector_t start_blk; + int seg; + struct request *creq; + u64bit temp64; + struct scatterlist tmp_sg[MAXSGENTRIES]; + drive_info_struct *drv; + int i, dir; + + /* We call start_io here in case there is a command waiting on the + * queue that has not been sent. + */ + if (blk_queue_plugged(q)) + goto startio; + + queue: + creq = elv_next_request(q); + if (!creq) + goto startio; + + BUG_ON(creq->nr_phys_segments > MAXSGENTRIES); + + if ((c = cmd_alloc(h, 1)) == NULL) + goto full; + + blkdev_dequeue_request(creq); + + spin_unlock_irq(q->queue_lock); + + c->cmd_type = CMD_RWREQ; + c->rq = creq; + + /* fill in the request */ + drv = creq->rq_disk->private_data; + c->Header.ReplyQueue = 0; // unused in simple mode + /* got command from pool, so use the command block index instead */ + /* for direct lookups. */ + /* The first 2 bits are reserved for controller error reporting. */ + c->Header.Tag.lower = (c->cmdindex << 3); + c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ + c->Header.LUN.LogDev.VolId = drv->LunID; + c->Header.LUN.LogDev.Mode = 1; + c->Request.CDBLen = 10; // 12 byte commands not in FW yet; + c->Request.Type.Type = TYPE_CMD; // It is a command. + c->Request.Type.Attribute = ATTR_SIMPLE; + c->Request.Type.Direction = + (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; + c->Request.Timeout = 0; // Don't time out + c->Request.CDB[0] = + (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; + start_blk = creq->sector; +#ifdef CCISS_DEBUG + printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector, + (int)creq->nr_sectors); +#endif /* CCISS_DEBUG */ + + sg_init_table(tmp_sg, MAXSGENTRIES); + seg = blk_rq_map_sg(q, creq, tmp_sg); + + /* get the DMA records for the setup */ + if (c->Request.Type.Direction == XFER_READ) + dir = PCI_DMA_FROMDEVICE; + else + dir = PCI_DMA_TODEVICE; + + for (i = 0; i < seg; i++) { + c->SG[i].Len = tmp_sg[i].length; + temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), + tmp_sg[i].offset, + tmp_sg[i].length, dir); + c->SG[i].Addr.lower = temp64.val32.lower; + c->SG[i].Addr.upper = temp64.val32.upper; + c->SG[i].Ext = 0; // we are not chaining + } + /* track how many SG entries we are using */ + if (seg > h->maxSG) + h->maxSG = seg; + +#ifdef CCISS_DEBUG + printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n", + creq->nr_sectors, seg); +#endif /* CCISS_DEBUG */ + + c->Header.SGList = c->Header.SGTotal = seg; + if (likely(blk_fs_request(creq))) { + if(h->cciss_read == CCISS_READ_10) { + c->Request.CDB[1] = 0; + c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB + c->Request.CDB[3] = (start_blk >> 16) & 0xff; + c->Request.CDB[4] = (start_blk >> 8) & 0xff; + c->Request.CDB[5] = start_blk & 0xff; + c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB + c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff; + c->Request.CDB[8] = creq->nr_sectors & 0xff; + c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; + } else { + u32 upper32 = upper_32_bits(start_blk); + + c->Request.CDBLen = 16; + c->Request.CDB[1]= 0; + c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB + c->Request.CDB[3]= (upper32 >> 16) & 0xff; + c->Request.CDB[4]= (upper32 >> 8) & 0xff; + c->Request.CDB[5]= upper32 & 0xff; + c->Request.CDB[6]= (start_blk >> 24) & 0xff; + c->Request.CDB[7]= (start_blk >> 16) & 0xff; + c->Request.CDB[8]= (start_blk >> 8) & 0xff; + c->Request.CDB[9]= start_blk & 0xff; + c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff; + c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff; + c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff; + c->Request.CDB[13]= creq->nr_sectors & 0xff; + c->Request.CDB[14] = c->Request.CDB[15] = 0; + } + } else if (blk_pc_request(creq)) { + c->Request.CDBLen = creq->cmd_len; + memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); + } else { + printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type); + BUG(); + } + + spin_lock_irq(q->queue_lock); + + addQ(&(h->reqQ), c); + h->Qdepth++; + if (h->Qdepth > h->maxQsinceinit) + h->maxQsinceinit = h->Qdepth; + + goto queue; +full: + blk_stop_queue(q); +startio: + /* We will already have the driver lock here so not need + * to lock it. + */ + start_io(h); +} + +static inline unsigned long get_next_completion(ctlr_info_t *h) +{ +#ifdef CONFIG_CISS_SCSI_TAPE + /* Any rejects from sendcmd() lying around? Process them first */ + if (h->scsi_rejects.ncompletions == 0) + return h->access.command_completed(h); + else { + struct sendcmd_reject_list *srl; + int n; + srl = &h->scsi_rejects; + n = --srl->ncompletions; + /* printk("cciss%d: processing saved reject\n", h->ctlr); */ + printk("p"); + return srl->complete[n]; + } +#else + return h->access.command_completed(h); +#endif +} + +static inline int interrupt_pending(ctlr_info_t *h) +{ +#ifdef CONFIG_CISS_SCSI_TAPE + return (h->access.intr_pending(h) + || (h->scsi_rejects.ncompletions > 0)); +#else + return h->access.intr_pending(h); +#endif +} + +static inline long interrupt_not_for_us(ctlr_info_t *h) +{ +#ifdef CONFIG_CISS_SCSI_TAPE + return (((h->access.intr_pending(h) == 0) || + (h->interrupts_enabled == 0)) + && (h->scsi_rejects.ncompletions == 0)); +#else + return (((h->access.intr_pending(h) == 0) || + (h->interrupts_enabled == 0))); +#endif +} + +static irqreturn_t do_cciss_intr(int irq, void *dev_id) +{ + ctlr_info_t *h = dev_id; + CommandList_struct *c; + unsigned long flags; + __u32 a, a1, a2; + + if (interrupt_not_for_us(h)) + return IRQ_NONE; + /* + * If there are completed commands in the completion queue, + * we had better do something about it. + */ + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); + while (interrupt_pending(h)) { + while ((a = get_next_completion(h)) != FIFO_EMPTY) { + a1 = a; + if ((a & 0x04)) { + a2 = (a >> 3); + if (a2 >= h->nr_cmds) { + printk(KERN_WARNING + "cciss: controller cciss%d failed, stopping.\n", + h->ctlr); + fail_all_cmds(h->ctlr); + return IRQ_HANDLED; + } + + c = h->cmd_pool + a2; + a = c->busaddr; + + } else { + a &= ~3; + if ((c = h->cmpQ) == NULL) { + printk(KERN_WARNING + "cciss: Completion of %08x ignored\n", + a1); + continue; + } + while (c->busaddr != a) { + c = c->next; + if (c == h->cmpQ) + break; + } + } + /* + * If we've found the command, take it off the + * completion Q and free it + */ + if (c->busaddr == a) { + removeQ(&h->cmpQ, c); + if (c->cmd_type == CMD_RWREQ) { + complete_command(h, c, 0); + } else if (c->cmd_type == CMD_IOCTL_PEND) { + complete(c->waiting); + } +# ifdef CONFIG_CISS_SCSI_TAPE + else if (c->cmd_type == CMD_SCSI) + complete_scsi_command(c, 0, a1); +# endif + continue; + } + } + } + + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); + return IRQ_HANDLED; +} + +/* + * We cannot read the structure directly, for portability we must use + * the io functions. + * This is for debug only. + */ +#ifdef CCISS_DEBUG +static void print_cfg_table(CfgTable_struct *tb) +{ + int i; + char temp_name[17]; + + printk("Controller Configuration information\n"); + printk("------------------------------------\n"); + for (i = 0; i < 4; i++) + temp_name[i] = readb(&(tb->Signature[i])); + temp_name[4] = '\0'; + printk(" Signature = %s\n", temp_name); + printk(" Spec Number = %d\n", readl(&(tb->SpecValence))); + printk(" Transport methods supported = 0x%x\n", + readl(&(tb->TransportSupport))); + printk(" Transport methods active = 0x%x\n", + readl(&(tb->TransportActive))); + printk(" Requested transport Method = 0x%x\n", + readl(&(tb->HostWrite.TransportRequest))); + printk(" Coalesce Interrupt Delay = 0x%x\n", + readl(&(tb->HostWrite.CoalIntDelay))); + printk(" Coalesce Interrupt Count = 0x%x\n", + readl(&(tb->HostWrite.CoalIntCount))); + printk(" Max outstanding commands = 0x%d\n", + readl(&(tb->CmdsOutMax))); + printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes))); + for (i = 0; i < 16; i++) + temp_name[i] = readb(&(tb->ServerName[i])); + temp_name[16] = '\0'; + printk(" Server Name = %s\n", temp_name); + printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat))); +} +#endif /* CCISS_DEBUG */ + +static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) +{ + int i, offset, mem_type, bar_type; + if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ + return 0; + offset = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; + if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) + offset += 4; + else { + mem_type = pci_resource_flags(pdev, i) & + PCI_BASE_ADDRESS_MEM_TYPE_MASK; + switch (mem_type) { + case PCI_BASE_ADDRESS_MEM_TYPE_32: + case PCI_BASE_ADDRESS_MEM_TYPE_1M: + offset += 4; /* 32 bit */ + break; + case PCI_BASE_ADDRESS_MEM_TYPE_64: + offset += 8; + break; + default: /* reserved in PCI 2.2 */ + printk(KERN_WARNING + "Base address is invalid\n"); + return -1; + break; + } + } + if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) + return i + 1; + } + return -1; +} + +/* If MSI/MSI-X is supported by the kernel we will try to enable it on + * controllers that are capable. If not, we use IO-APIC mode. + */ + +static void __devinit cciss_interrupt_mode(ctlr_info_t *c, + struct pci_dev *pdev, __u32 board_id) +{ +#ifdef CONFIG_PCI_MSI + int err; + struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1}, + {0, 2}, {0, 3} + }; + + /* Some boards advertise MSI but don't really support it */ + if ((board_id == 0x40700E11) || + (board_id == 0x40800E11) || + (board_id == 0x40820E11) || (board_id == 0x40830E11)) + goto default_int_mode; + + if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { + err = pci_enable_msix(pdev, cciss_msix_entries, 4); + if (!err) { + c->intr[0] = cciss_msix_entries[0].vector; + c->intr[1] = cciss_msix_entries[1].vector; + c->intr[2] = cciss_msix_entries[2].vector; + c->intr[3] = cciss_msix_entries[3].vector; + c->msix_vector = 1; + return; + } + if (err > 0) { + printk(KERN_WARNING "cciss: only %d MSI-X vectors " + "available\n", err); + goto default_int_mode; + } else { + printk(KERN_WARNING "cciss: MSI-X init failed %d\n", + err); + goto default_int_mode; + } + } + if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) { + if (!pci_enable_msi(pdev)) { + c->msi_vector = 1; + } else { + printk(KERN_WARNING "cciss: MSI init failed\n"); + } + } +default_int_mode: +#endif /* CONFIG_PCI_MSI */ + /* if we get here we're going to use the default interrupt mode */ + c->intr[SIMPLE_MODE_INT] = pdev->irq; + return; +} + +static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) +{ + ushort subsystem_vendor_id, subsystem_device_id, command; + __u32 board_id, scratchpad = 0; + __u64 cfg_offset; + __u32 cfg_base_addr; + __u64 cfg_base_addr_index; + int i, err; + + /* check to see if controller has been disabled */ + /* BEFORE trying to enable it */ + (void)pci_read_config_word(pdev, PCI_COMMAND, &command); + if (!(command & 0x02)) { + printk(KERN_WARNING + "cciss: controller appears to be disabled\n"); + return -ENODEV; + } + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "cciss: Unable to Enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, "cciss"); + if (err) { + printk(KERN_ERR "cciss: Cannot obtain PCI resources, " + "aborting\n"); + return err; + } + + subsystem_vendor_id = pdev->subsystem_vendor; + subsystem_device_id = pdev->subsystem_device; + board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | + subsystem_vendor_id); + +#ifdef CCISS_DEBUG + printk("command = %x\n", command); + printk("irq = %x\n", pdev->irq); + printk("board_id = %x\n", board_id); +#endif /* CCISS_DEBUG */ + +/* If the kernel supports MSI/MSI-X we will try to enable that functionality, + * else we use the IO-APIC interrupt assigned to us by system ROM. + */ + cciss_interrupt_mode(c, pdev, board_id); + + /* + * Memory base addr is first addr , the second points to the config + * table + */ + + c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */ +#ifdef CCISS_DEBUG + printk("address 0 = %lx\n", c->paddr); +#endif /* CCISS_DEBUG */ + c->vaddr = remap_pci_mem(c->paddr, 0x250); + + /* Wait for the board to become ready. (PCI hotplug needs this.) + * We poll for up to 120 secs, once per 100ms. */ + for (i = 0; i < 1200; i++) { + scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET); + if (scratchpad == CCISS_FIRMWARE_READY) + break; + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ / 10); /* wait 100ms */ + } + if (scratchpad != CCISS_FIRMWARE_READY) { + printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); + err = -ENODEV; + goto err_out_free_res; + } + + /* get the address index number */ + cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET); + cfg_base_addr &= (__u32) 0x0000ffff; +#ifdef CCISS_DEBUG + printk("cfg base address = %x\n", cfg_base_addr); +#endif /* CCISS_DEBUG */ + cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); +#ifdef CCISS_DEBUG + printk("cfg base address index = %llx\n", + (unsigned long long)cfg_base_addr_index); +#endif /* CCISS_DEBUG */ + if (cfg_base_addr_index == -1) { + printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n"); + err = -ENODEV; + goto err_out_free_res; + } + + cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET); +#ifdef CCISS_DEBUG + printk("cfg offset = %llx\n", (unsigned long long)cfg_offset); +#endif /* CCISS_DEBUG */ + c->cfgtable = remap_pci_mem(pci_resource_start(pdev, + cfg_base_addr_index) + + cfg_offset, sizeof(CfgTable_struct)); + c->board_id = board_id; + +#ifdef CCISS_DEBUG + print_cfg_table(c->cfgtable); +#endif /* CCISS_DEBUG */ + + /* Some controllers support Zero Memory Raid (ZMR). + * When configured in ZMR mode the number of supported + * commands drops to 64. So instead of just setting an + * arbitrary value we make the driver a little smarter. + * We read the config table to tell us how many commands + * are supported on the controller then subtract 4 to + * leave a little room for ioctl calls. + */ + c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); + for (i = 0; i < ARRAY_SIZE(products); i++) { + if (board_id == products[i].board_id) { + c->product_name = products[i].product_name; + c->access = *(products[i].access); + c->nr_cmds = c->max_commands - 4; + break; + } + } + if ((readb(&c->cfgtable->Signature[0]) != 'C') || + (readb(&c->cfgtable->Signature[1]) != 'I') || + (readb(&c->cfgtable->Signature[2]) != 'S') || + (readb(&c->cfgtable->Signature[3]) != 'S')) { + printk("Does not appear to be a valid CISS config table\n"); + err = -ENODEV; + goto err_out_free_res; + } + /* We didn't find the controller in our list. We know the + * signature is valid. If it's an HP device let's try to + * bind to the device and fire it up. Otherwise we bail. + */ + if (i == ARRAY_SIZE(products)) { + if (subsystem_vendor_id == PCI_VENDOR_ID_HP) { + c->product_name = products[i-1].product_name; + c->access = *(products[i-1].access); + c->nr_cmds = c->max_commands - 4; + printk(KERN_WARNING "cciss: This is an unknown " + "Smart Array controller.\n" + "cciss: Please update to the latest driver " + "available from www.hp.com.\n"); + } else { + printk(KERN_WARNING "cciss: Sorry, I don't know how" + " to access the Smart Array controller %08lx\n" + , (unsigned long)board_id); + err = -ENODEV; + goto err_out_free_res; + } + } +#ifdef CONFIG_X86 + { + /* Need to enable prefetch in the SCSI core for 6400 in x86 */ + __u32 prefetch; + prefetch = readl(&(c->cfgtable->SCSI_Prefetch)); + prefetch |= 0x100; + writel(prefetch, &(c->cfgtable->SCSI_Prefetch)); + } +#endif + + /* Disabling DMA prefetch and refetch for the P600. + * An ASIC bug may result in accesses to invalid memory addresses. + * We've disabled prefetch for some time now. Testing with XEN + * kernels revealed a bug in the refetch if dom0 resides on a P600. + */ + if(board_id == 0x3225103C) { + __u32 dma_prefetch; + __u32 dma_refetch; + dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG); + dma_prefetch |= 0x8000; + writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG); + pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch); + dma_refetch |= 0x1; + pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch); + } + +#ifdef CCISS_DEBUG + printk("Trying to put board into Simple mode\n"); +#endif /* CCISS_DEBUG */ + c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); + /* Update the field, and then ring the doorbell */ + writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest)); + writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL); + + /* under certain very rare conditions, this can take awhile. + * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right + * as we enter this code.) */ + for (i = 0; i < MAX_CONFIG_WAIT; i++) { + if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) + break; + /* delay and try again */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(10); + } + +#ifdef CCISS_DEBUG + printk(KERN_DEBUG "I counter got to %d %x\n", i, + readl(c->vaddr + SA5_DOORBELL)); +#endif /* CCISS_DEBUG */ +#ifdef CCISS_DEBUG + print_cfg_table(c->cfgtable); +#endif /* CCISS_DEBUG */ + + if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { + printk(KERN_WARNING "cciss: unable to get board into" + " simple mode\n"); + err = -ENODEV; + goto err_out_free_res; + } + return 0; + +err_out_free_res: + /* + * Deliberately omit pci_disable_device(): it does something nasty to + * Smart Array controllers that pci_enable_device does not undo + */ + pci_release_regions(pdev); + return err; +} + +/* Function to find the first free pointer into our hba[] array + * Returns -1 if no free entries are left. + */ +static int alloc_cciss_hba(void) +{ + int i; + + for (i = 0; i < MAX_CTLR; i++) { + if (!hba[i]) { + ctlr_info_t *p; + + p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); + if (!p) + goto Enomem; + hba[i] = p; + return i; + } + } + printk(KERN_WARNING "cciss: This driver supports a maximum" + " of %d controllers.\n", MAX_CTLR); + return -1; +Enomem: + printk(KERN_ERR "cciss: out of memory.\n"); + return -1; +} + +static void free_hba(int i) +{ + ctlr_info_t *p = hba[i]; + int n; + + hba[i] = NULL; + for (n = 0; n < CISS_MAX_LUN; n++) + put_disk(p->gendisk[n]); + kfree(p); +} + +/* + * This is it. Find all the controllers and register them. I really hate + * stealing all these major device numbers. + * returns the number of block devices registered. + */ +static int __devinit cciss_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int i; + int j = 0; + int rc; + int dac, return_code; + InquiryData_struct *inq_buff = NULL; + + i = alloc_cciss_hba(); + if (i < 0) + return -1; + + hba[i]->busy_initializing = 1; + + if (cciss_pci_init(hba[i], pdev) != 0) + goto clean1; + + sprintf(hba[i]->devname, "cciss%d", i); + hba[i]->ctlr = i; + hba[i]->pdev = pdev; + + /* configure PCI DMA stuff */ + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) + dac = 1; + else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) + dac = 0; + else { + printk(KERN_ERR "cciss: no suitable DMA available\n"); + goto clean1; + } + + /* + * register with the major number, or get a dynamic major number + * by passing 0 as argument. This is done for greater than + * 8 controller support. + */ + if (i < MAX_CTLR_ORIG) + hba[i]->major = COMPAQ_CISS_MAJOR + i; + rc = register_blkdev(hba[i]->major, hba[i]->devname); + if (rc == -EBUSY || rc == -EINVAL) { + printk(KERN_ERR + "cciss: Unable to get major number %d for %s " + "on hba %d\n", hba[i]->major, hba[i]->devname, i); + goto clean1; + } else { + if (i >= MAX_CTLR_ORIG) + hba[i]->major = rc; + } + + /* make sure the board interrupts are off */ + hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF); + if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr, + IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) { + printk(KERN_ERR "cciss: Unable to get irq %d for %s\n", + hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname); + goto clean2; + } + + printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", + hba[i]->devname, pdev->device, pci_name(pdev), + hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not"); + + hba[i]->cmd_pool_bits = + kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) + * sizeof(unsigned long), GFP_KERNEL); + hba[i]->cmd_pool = (CommandList_struct *) + pci_alloc_consistent(hba[i]->pdev, + hba[i]->nr_cmds * sizeof(CommandList_struct), + &(hba[i]->cmd_pool_dhandle)); + hba[i]->errinfo_pool = (ErrorInfo_struct *) + pci_alloc_consistent(hba[i]->pdev, + hba[i]->nr_cmds * sizeof(ErrorInfo_struct), + &(hba[i]->errinfo_pool_dhandle)); + if ((hba[i]->cmd_pool_bits == NULL) + || (hba[i]->cmd_pool == NULL) + || (hba[i]->errinfo_pool == NULL)) { + printk(KERN_ERR "cciss: out of memory"); + goto clean4; + } +#ifdef CONFIG_CISS_SCSI_TAPE + hba[i]->scsi_rejects.complete = + kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) * + (hba[i]->nr_cmds + 5), GFP_KERNEL); + if (hba[i]->scsi_rejects.complete == NULL) { + printk(KERN_ERR "cciss: out of memory"); + goto clean4; + } +#endif + spin_lock_init(&hba[i]->lock); + + /* Initialize the pdev driver private data. + have it point to hba[i]. */ + pci_set_drvdata(pdev, hba[i]); + /* command and error info recs zeroed out before + they are used */ + memset(hba[i]->cmd_pool_bits, 0, + DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) + * sizeof(unsigned long)); + + hba[i]->num_luns = 0; + hba[i]->highest_lun = -1; + for (j = 0; j < CISS_MAX_LUN; j++) { + hba[i]->drv[j].raid_level = -1; + hba[i]->drv[j].queue = NULL; + hba[i]->gendisk[j] = NULL; + } + + cciss_scsi_setup(i); + + /* Turn the interrupts on so we can service requests */ + hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); + + /* Get the firmware version */ + inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); + if (inq_buff == NULL) { + printk(KERN_ERR "cciss: out of memory\n"); + goto clean4; + } + + return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff, + sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD); + if (return_code == IO_OK) { + hba[i]->firm_ver[0] = inq_buff->data_byte[32]; + hba[i]->firm_ver[1] = inq_buff->data_byte[33]; + hba[i]->firm_ver[2] = inq_buff->data_byte[34]; + hba[i]->firm_ver[3] = inq_buff->data_byte[35]; + } else { /* send command failed */ + printk(KERN_WARNING "cciss: unable to determine firmware" + " version of controller\n"); + } + + cciss_procinit(i); + + hba[i]->cciss_max_sectors = 2048; + + hba[i]->busy_initializing = 0; + + rebuild_lun_table(hba[i], 1); + return 1; + +clean4: + kfree(inq_buff); +#ifdef CONFIG_CISS_SCSI_TAPE + kfree(hba[i]->scsi_rejects.complete); +#endif + kfree(hba[i]->cmd_pool_bits); + if (hba[i]->cmd_pool) + pci_free_consistent(hba[i]->pdev, + hba[i]->nr_cmds * sizeof(CommandList_struct), + hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); + if (hba[i]->errinfo_pool) + pci_free_consistent(hba[i]->pdev, + hba[i]->nr_cmds * sizeof(ErrorInfo_struct), + hba[i]->errinfo_pool, + hba[i]->errinfo_pool_dhandle); + free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]); +clean2: + unregister_blkdev(hba[i]->major, hba[i]->devname); +clean1: + hba[i]->busy_initializing = 0; + /* cleanup any queues that may have been initialized */ + for (j=0; j <= hba[i]->highest_lun; j++){ + drive_info_struct *drv = &(hba[i]->drv[j]); + if (drv->queue) + blk_cleanup_queue(drv->queue); + } + /* + * Deliberately omit pci_disable_device(): it does something nasty to + * Smart Array controllers that pci_enable_device does not undo + */ + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + free_hba(i); + return -1; +} + +static void cciss_shutdown(struct pci_dev *pdev) +{ + ctlr_info_t *tmp_ptr; + int i; + char flush_buf[4]; + int return_code; + + tmp_ptr = pci_get_drvdata(pdev); + if (tmp_ptr == NULL) + return; + i = tmp_ptr->ctlr; + if (hba[i] == NULL) + return; + + /* Turn board interrupts off and send the flush cache command */ + /* sendcmd will turn off interrupt, and send the flush... + * To write all data in the battery backed cache to disks */ + memset(flush_buf, 0, 4); + return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL, + TYPE_CMD); + if (return_code == IO_OK) { + printk(KERN_INFO "Completed flushing cache on controller %d\n", i); + } else { + printk(KERN_WARNING "Error flushing cache on controller %d\n", i); + } + free_irq(hba[i]->intr[2], hba[i]); +} + +static void __devexit cciss_remove_one(struct pci_dev *pdev) +{ + ctlr_info_t *tmp_ptr; + int i, j; + + if (pci_get_drvdata(pdev) == NULL) { + printk(KERN_ERR "cciss: Unable to remove device \n"); + return; + } + tmp_ptr = pci_get_drvdata(pdev); + i = tmp_ptr->ctlr; + if (hba[i] == NULL) { + printk(KERN_ERR "cciss: device appears to " + "already be removed \n"); + return; + } + + remove_proc_entry(hba[i]->devname, proc_cciss); + unregister_blkdev(hba[i]->major, hba[i]->devname); + + /* remove it from the disk list */ + for (j = 0; j < CISS_MAX_LUN; j++) { + struct gendisk *disk = hba[i]->gendisk[j]; + if (disk) { + struct request_queue *q = disk->queue; + + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + if (q) + blk_cleanup_queue(q); + } + } + +#ifdef CONFIG_CISS_SCSI_TAPE + cciss_unregister_scsi(i); /* unhook from SCSI subsystem */ +#endif + + cciss_shutdown(pdev); + +#ifdef CONFIG_PCI_MSI + if (hba[i]->msix_vector) + pci_disable_msix(hba[i]->pdev); + else if (hba[i]->msi_vector) + pci_disable_msi(hba[i]->pdev); +#endif /* CONFIG_PCI_MSI */ + + iounmap(hba[i]->vaddr); + + pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct), + hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); + pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), + hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); + kfree(hba[i]->cmd_pool_bits); +#ifdef CONFIG_CISS_SCSI_TAPE + kfree(hba[i]->scsi_rejects.complete); +#endif + /* + * Deliberately omit pci_disable_device(): it does something nasty to + * Smart Array controllers that pci_enable_device does not undo + */ + pci_release_regions(pdev); + pci_set_drvdata(pdev, NULL); + free_hba(i); +} + +static struct pci_driver cciss_pci_driver = { + .name = "cciss", + .probe = cciss_init_one, + .remove = __devexit_p(cciss_remove_one), + .id_table = cciss_pci_device_id, /* id_table */ + .shutdown = cciss_shutdown, +}; + +/* + * This is it. Register the PCI driver information for the cards we control + * the OS will call our registered routines when it finds one of our cards. + */ +static int __init cciss_init(void) +{ + printk(KERN_INFO DRIVER_NAME "\n"); + + /* Register for our PCI devices */ + return pci_register_driver(&cciss_pci_driver); +} + +static void __exit cciss_cleanup(void) +{ + int i; + + pci_unregister_driver(&cciss_pci_driver); + /* double check that all controller entrys have been removed */ + for (i = 0; i < MAX_CTLR; i++) { + if (hba[i] != NULL) { + printk(KERN_WARNING "cciss: had to remove" + " controller %d\n", i); + cciss_remove_one(hba[i]->pdev); + } + } + remove_proc_entry("driver/cciss", NULL); +} + +static void fail_all_cmds(unsigned long ctlr) +{ + /* If we get here, the board is apparently dead. */ + ctlr_info_t *h = hba[ctlr]; + CommandList_struct *c; + unsigned long flags; + + printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr); + h->alive = 0; /* the controller apparently died... */ + + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + + pci_disable_device(h->pdev); /* Make sure it is really dead. */ + + /* move everything off the request queue onto the completed queue */ + while ((c = h->reqQ) != NULL) { + removeQ(&(h->reqQ), c); + h->Qdepth--; + addQ(&(h->cmpQ), c); + } + + /* Now, fail everything on the completed queue with a HW error */ + while ((c = h->cmpQ) != NULL) { + removeQ(&h->cmpQ, c); + c->err_info->CommandStatus = CMD_HARDWARE_ERR; + if (c->cmd_type == CMD_RWREQ) { + complete_command(h, c, 0); + } else if (c->cmd_type == CMD_IOCTL_PEND) + complete(c->waiting); +#ifdef CONFIG_CISS_SCSI_TAPE + else if (c->cmd_type == CMD_SCSI) + complete_scsi_command(c, 0, 0); +#endif + } + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + return; +} + +module_init(cciss_init); +module_exit(cciss_cleanup); diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h new file mode 100644 index 0000000..24a7efa --- /dev/null +++ b/drivers/block/cciss.h @@ -0,0 +1,294 @@ +#ifndef CCISS_H +#define CCISS_H + +#include <linux/genhd.h> + +#include "cciss_cmd.h" + + +#define NWD_SHIFT 4 +#define MAX_PART (1 << NWD_SHIFT) + +#define IO_OK 0 +#define IO_ERROR 1 + +struct ctlr_info; +typedef struct ctlr_info ctlr_info_t; + +struct access_method { + void (*submit_command)(ctlr_info_t *h, CommandList_struct *c); + void (*set_intr_mask)(ctlr_info_t *h, unsigned long val); + unsigned long (*fifo_full)(ctlr_info_t *h); + unsigned long (*intr_pending)(ctlr_info_t *h); + unsigned long (*command_completed)(ctlr_info_t *h); +}; +typedef struct _drive_info_struct +{ + __u32 LunID; + int usage_count; + struct request_queue *queue; + sector_t nr_blocks; + int block_size; + int heads; + int sectors; + int cylinders; + int raid_level; /* set to -1 to indicate that + * the drive is not in use/configured + */ + int busy_configuring; /*This is set when the drive is being removed + *to prevent it from being opened or it's queue + *from being started. + */ + __u8 serial_no[16]; /* from inquiry page 0x83, */ + /* not necc. null terminated. */ +} drive_info_struct; + +#ifdef CONFIG_CISS_SCSI_TAPE + +struct sendcmd_reject_list { + int ncompletions; + unsigned long *complete; /* array of NR_CMDS tags */ +}; + +#endif +struct ctlr_info +{ + int ctlr; + char devname[8]; + char *product_name; + char firm_ver[4]; // Firmware version + struct pci_dev *pdev; + __u32 board_id; + void __iomem *vaddr; + unsigned long paddr; + int nr_cmds; /* Number of commands allowed on this controller */ + CfgTable_struct __iomem *cfgtable; + int interrupts_enabled; + int major; + int max_commands; + int commands_outstanding; + int max_outstanding; /* Debug */ + int num_luns; + int highest_lun; + int usage_count; /* number of opens all all minor devices */ +# define DOORBELL_INT 0 +# define PERF_MODE_INT 1 +# define SIMPLE_MODE_INT 2 +# define MEMQ_MODE_INT 3 + unsigned int intr[4]; + unsigned int msix_vector; + unsigned int msi_vector; + int cciss_max_sectors; + BYTE cciss_read; + BYTE cciss_write; + BYTE cciss_read_capacity; + + // information about each logical volume + drive_info_struct drv[CISS_MAX_LUN]; + + struct access_method access; + + /* queue and queue Info */ + CommandList_struct *reqQ; + CommandList_struct *cmpQ; + unsigned int Qdepth; + unsigned int maxQsinceinit; + unsigned int maxSG; + spinlock_t lock; + + //* pointers to command and error info pool */ + CommandList_struct *cmd_pool; + dma_addr_t cmd_pool_dhandle; + ErrorInfo_struct *errinfo_pool; + dma_addr_t errinfo_pool_dhandle; + unsigned long *cmd_pool_bits; + int nr_allocs; + int nr_frees; + int busy_configuring; + int busy_initializing; + + /* This element holds the zero based queue number of the last + * queue to be started. It is used for fairness. + */ + int next_to_run; + + // Disk structures we need to pass back + struct gendisk *gendisk[CISS_MAX_LUN]; +#ifdef CONFIG_CISS_SCSI_TAPE + void *scsi_ctlr; /* ptr to structure containing scsi related stuff */ + /* list of block side commands the scsi error handling sucked up */ + /* and saved for later processing */ + struct sendcmd_reject_list scsi_rejects; +#endif + unsigned char alive; +}; + +/* Defining the diffent access_menthods */ +/* + * Memory mapped FIFO interface (SMART 53xx cards) + */ +#define SA5_DOORBELL 0x20 +#define SA5_REQUEST_PORT_OFFSET 0x40 +#define SA5_REPLY_INTR_MASK_OFFSET 0x34 +#define SA5_REPLY_PORT_OFFSET 0x44 +#define SA5_INTR_STATUS 0x30 +#define SA5_SCRATCHPAD_OFFSET 0xB0 + +#define SA5_CTCFG_OFFSET 0xB4 +#define SA5_CTMEM_OFFSET 0xB8 + +#define SA5_INTR_OFF 0x08 +#define SA5B_INTR_OFF 0x04 +#define SA5_INTR_PENDING 0x08 +#define SA5B_INTR_PENDING 0x04 +#define FIFO_EMPTY 0xffffffff +#define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ + +#define CISS_ERROR_BIT 0x02 + +#define CCISS_INTR_ON 1 +#define CCISS_INTR_OFF 0 +/* + Send the command to the hardware +*/ +static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) +{ +#ifdef CCISS_DEBUG + printk("Sending %x - down to controller\n", c->busaddr ); +#endif /* CCISS_DEBUG */ + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); + h->commands_outstanding++; + if ( h->commands_outstanding > h->max_outstanding) + h->max_outstanding = h->commands_outstanding; +} + +/* + * This card is the opposite of the other cards. + * 0 turns interrupts on... + * 0x08 turns them off... + */ +static void SA5_intr_mask(ctlr_info_t *h, unsigned long val) +{ + if (val) + { /* Turn interrupts on */ + h->interrupts_enabled = 1; + writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } else /* Turn them off */ + { + h->interrupts_enabled = 0; + writel( SA5_INTR_OFF, + h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } +} +/* + * This card is the opposite of the other cards. + * 0 turns interrupts on... + * 0x04 turns them off... + */ +static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val) +{ + if (val) + { /* Turn interrupts on */ + h->interrupts_enabled = 1; + writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } else /* Turn them off */ + { + h->interrupts_enabled = 0; + writel( SA5B_INTR_OFF, + h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + } +} +/* + * Returns true if fifo is full. + * + */ +static unsigned long SA5_fifo_full(ctlr_info_t *h) +{ + if( h->commands_outstanding >= h->max_commands) + return(1); + else + return(0); + +} +/* + * returns value read from hardware. + * returns FIFO_EMPTY if there is nothing to read + */ +static unsigned long SA5_completed(ctlr_info_t *h) +{ + unsigned long register_value + = readl(h->vaddr + SA5_REPLY_PORT_OFFSET); + if(register_value != FIFO_EMPTY) + { + h->commands_outstanding--; +#ifdef CCISS_DEBUG + printk("cciss: Read %lx back from board\n", register_value); +#endif /* CCISS_DEBUG */ + } +#ifdef CCISS_DEBUG + else + { + printk("cciss: FIFO Empty read\n"); + } +#endif + return ( register_value); + +} +/* + * Returns true if an interrupt is pending.. + */ +static unsigned long SA5_intr_pending(ctlr_info_t *h) +{ + unsigned long register_value = + readl(h->vaddr + SA5_INTR_STATUS); +#ifdef CCISS_DEBUG + printk("cciss: intr_pending %lx\n", register_value); +#endif /* CCISS_DEBUG */ + if( register_value & SA5_INTR_PENDING) + return 1; + return 0 ; +} + +/* + * Returns true if an interrupt is pending.. + */ +static unsigned long SA5B_intr_pending(ctlr_info_t *h) +{ + unsigned long register_value = + readl(h->vaddr + SA5_INTR_STATUS); +#ifdef CCISS_DEBUG + printk("cciss: intr_pending %lx\n", register_value); +#endif /* CCISS_DEBUG */ + if( register_value & SA5B_INTR_PENDING) + return 1; + return 0 ; +} + + +static struct access_method SA5_access = { + SA5_submit_command, + SA5_intr_mask, + SA5_fifo_full, + SA5_intr_pending, + SA5_completed, +}; + +static struct access_method SA5B_access = { + SA5_submit_command, + SA5B_intr_mask, + SA5_fifo_full, + SA5B_intr_pending, + SA5_completed, +}; + +struct board_type { + __u32 board_id; + char *product_name; + struct access_method *access; + int nr_cmds; /* Max cmds this kind of ctlr can handle. */ +}; + +#define CCISS_LOCK(i) (&hba[i]->lock) + +#endif /* CCISS_H */ + diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h new file mode 100644 index 0000000..43bf559 --- /dev/null +++ b/drivers/block/cciss_cmd.h @@ -0,0 +1,299 @@ +#ifndef CCISS_CMD_H +#define CCISS_CMD_H +//########################################################################### +//DEFINES +//########################################################################### +#define CISS_VERSION "1.00" + +//general boundary defintions +#define SENSEINFOBYTES 32//note that this value may vary between host implementations +#define MAXSGENTRIES 31 +#define MAXREPLYQS 256 + +//Command Status value +#define CMD_SUCCESS 0x0000 +#define CMD_TARGET_STATUS 0x0001 +#define CMD_DATA_UNDERRUN 0x0002 +#define CMD_DATA_OVERRUN 0x0003 +#define CMD_INVALID 0x0004 +#define CMD_PROTOCOL_ERR 0x0005 +#define CMD_HARDWARE_ERR 0x0006 +#define CMD_CONNECTION_LOST 0x0007 +#define CMD_ABORTED 0x0008 +#define CMD_ABORT_FAILED 0x0009 +#define CMD_UNSOLICITED_ABORT 0x000A +#define CMD_TIMEOUT 0x000B +#define CMD_UNABORTABLE 0x000C + +//transfer direction +#define XFER_NONE 0x00 +#define XFER_WRITE 0x01 +#define XFER_READ 0x02 +#define XFER_RSVD 0x03 + +//task attribute +#define ATTR_UNTAGGED 0x00 +#define ATTR_SIMPLE 0x04 +#define ATTR_HEADOFQUEUE 0x05 +#define ATTR_ORDERED 0x06 +#define ATTR_ACA 0x07 + +//cdb type +#define TYPE_CMD 0x00 +#define TYPE_MSG 0x01 + +//config space register offsets +#define CFG_VENDORID 0x00 +#define CFG_DEVICEID 0x02 +#define CFG_I2OBAR 0x10 +#define CFG_MEM1BAR 0x14 + +//i2o space register offsets +#define I2O_IBDB_SET 0x20 +#define I2O_IBDB_CLEAR 0x70 +#define I2O_INT_STATUS 0x30 +#define I2O_INT_MASK 0x34 +#define I2O_IBPOST_Q 0x40 +#define I2O_OBPOST_Q 0x44 +#define I2O_DMA1_CFG 0x214 + +//Configuration Table +#define CFGTBL_ChangeReq 0x00000001l +#define CFGTBL_AccCmds 0x00000001l + +#define CFGTBL_Trans_Simple 0x00000002l + +#define CFGTBL_BusType_Ultra2 0x00000001l +#define CFGTBL_BusType_Ultra3 0x00000002l +#define CFGTBL_BusType_Fibre1G 0x00000100l +#define CFGTBL_BusType_Fibre2G 0x00000200l +typedef struct _vals32 +{ + __u32 lower; + __u32 upper; +} vals32; + +typedef union _u64bit +{ + vals32 val32; + __u64 val; +} u64bit; + +// Type defs used in the following structs +#define BYTE __u8 +#define WORD __u16 +#define HWORD __u16 +#define DWORD __u32 +#define QWORD vals32 + +//########################################################################### +//STRUCTURES +//########################################################################### +#define CISS_MAX_LUN 1024 +#define CISS_MAX_PHYS_LUN 1024 +// SCSI-3 Cmmands + +#pragma pack(1) + +#define CISS_INQUIRY 0x12 +//Date returned +typedef struct _InquiryData_struct +{ + BYTE data_byte[36]; +} InquiryData_struct; + +#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */ +#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */ +// Data returned +typedef struct _ReportLUNdata_struct +{ + BYTE LUNListLength[4]; + DWORD reserved; + BYTE LUN[CISS_MAX_LUN][8]; +} ReportLunData_struct; + +#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ +typedef struct _ReadCapdata_struct +{ + BYTE total_size[4]; // Total size in blocks + BYTE block_size[4]; // Size of blocks in bytes +} ReadCapdata_struct; + +#define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */ + +/* service action to differentiate a 16 byte read capacity from + other commands that use the 0x9e SCSI op code */ + +#define CCISS_READ_CAPACITY_16_SERVICE_ACT 0x10 + +typedef struct _ReadCapdata_struct_16 +{ + BYTE total_size[8]; /* Total size in blocks */ + BYTE block_size[4]; /* Size of blocks in bytes */ + BYTE prot_en:1; /* protection enable bit */ + BYTE rto_en:1; /* reference tag own enable bit */ + BYTE reserved:6; /* reserved bits */ + BYTE reserved2[18]; /* reserved bytes per spec */ +} ReadCapdata_struct_16; + +/* Define the supported read/write commands for cciss based controllers */ + +#define CCISS_READ_10 0x28 /* Read(10) */ +#define CCISS_WRITE_10 0x2a /* Write(10) */ +#define CCISS_READ_16 0x88 /* Read(16) */ +#define CCISS_WRITE_16 0x8a /* Write(16) */ + +/* Define the CDB lengths supported by cciss based controllers */ + +#define CDB_LEN10 10 +#define CDB_LEN16 16 + +// BMIC commands +#define BMIC_READ 0x26 +#define BMIC_WRITE 0x27 +#define BMIC_CACHE_FLUSH 0xc2 +#define CCISS_CACHE_FLUSH 0x01 //C2 was already being used by CCISS + +//Command List Structure +typedef union _SCSI3Addr_struct { + struct { + BYTE Dev; + BYTE Bus:6; + BYTE Mode:2; // b00 + } PeripDev; + struct { + BYTE DevLSB; + BYTE DevMSB:6; + BYTE Mode:2; // b01 + } LogDev; + struct { + BYTE Dev:5; + BYTE Bus:3; + BYTE Targ:6; + BYTE Mode:2; // b10 + } LogUnit; +} SCSI3Addr_struct; + +typedef struct _PhysDevAddr_struct { + DWORD TargetId:24; + DWORD Bus:6; + DWORD Mode:2; + SCSI3Addr_struct Target[2]; //2 level target device addr +} PhysDevAddr_struct; + +typedef struct _LogDevAddr_struct { + DWORD VolId:30; + DWORD Mode:2; + BYTE reserved[4]; +} LogDevAddr_struct; + +typedef union _LUNAddr_struct { + BYTE LunAddrBytes[8]; + SCSI3Addr_struct SCSI3Lun[4]; + PhysDevAddr_struct PhysDev; + LogDevAddr_struct LogDev; +} LUNAddr_struct; + +typedef struct _CommandListHeader_struct { + BYTE ReplyQueue; + BYTE SGList; + HWORD SGTotal; + QWORD Tag; + LUNAddr_struct LUN; +} CommandListHeader_struct; +typedef struct _RequestBlock_struct { + BYTE CDBLen; + struct { + BYTE Type:3; + BYTE Attribute:3; + BYTE Direction:2; + } Type; + HWORD Timeout; + BYTE CDB[16]; +} RequestBlock_struct; +typedef struct _ErrDescriptor_struct { + QWORD Addr; + DWORD Len; +} ErrDescriptor_struct; +typedef struct _SGDescriptor_struct { + QWORD Addr; + DWORD Len; + DWORD Ext; +} SGDescriptor_struct; + +typedef union _MoreErrInfo_struct{ + struct { + BYTE Reserved[3]; + BYTE Type; + DWORD ErrorInfo; + }Common_Info; + struct{ + BYTE Reserved[2]; + BYTE offense_size;//size of offending entry + BYTE offense_num; //byte # of offense 0-base + DWORD offense_value; + }Invalid_Cmd; +}MoreErrInfo_struct; +typedef struct _ErrorInfo_struct { + BYTE ScsiStatus; + BYTE SenseLen; + HWORD CommandStatus; + DWORD ResidualCnt; + MoreErrInfo_struct MoreErrInfo; + BYTE SenseInfo[SENSEINFOBYTES]; +} ErrorInfo_struct; + +/* Command types */ +#define CMD_RWREQ 0x00 +#define CMD_IOCTL_PEND 0x01 +#define CMD_SCSI 0x03 +#define CMD_MSG_DONE 0x04 +#define CMD_MSG_TIMEOUT 0x05 + +/* This structure needs to be divisible by 8 for new + * indexing method. + */ +#define PADSIZE (sizeof(long) - 4) +typedef struct _CommandList_struct { + CommandListHeader_struct Header; + RequestBlock_struct Request; + ErrDescriptor_struct ErrDesc; + SGDescriptor_struct SG[MAXSGENTRIES]; + /* information associated with the command */ + __u32 busaddr; /* physical address of this record */ + ErrorInfo_struct * err_info; /* pointer to the allocated mem */ + int ctlr; + int cmd_type; + long cmdindex; + struct _CommandList_struct *prev; + struct _CommandList_struct *next; + struct request * rq; + struct completion *waiting; + int retry_count; + void * scsi_cmd; + char pad[PADSIZE]; +} CommandList_struct; + +//Configuration Table Structure +typedef struct _HostWrite_struct { + DWORD TransportRequest; + DWORD Reserved; + DWORD CoalIntDelay; + DWORD CoalIntCount; +} HostWrite_struct; + +typedef struct _CfgTable_struct { + BYTE Signature[4]; + DWORD SpecValence; + DWORD TransportSupport; + DWORD TransportActive; + HostWrite_struct HostWrite; + DWORD CmdsOutMax; + DWORD BusTypes; + DWORD Reserved; + BYTE ServerName[16]; + DWORD HeartBeat; + DWORD SCSI_Prefetch; +} CfgTable_struct; +#pragma pack() +#endif // CCISS_CMD_H diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c new file mode 100644 index 0000000..a3fd87b --- /dev/null +++ b/drivers/block/cciss_scsi.c @@ -0,0 +1,1657 @@ +/* + * Disk Array driver for HP Smart Array controllers, SCSI Tape module. + * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA + * 02111-1307, USA. + * + * Questions/Comments/Bugfixes to iss_storagedev@hp.com + * + * Author: Stephen M. Cameron + */ +#ifdef CONFIG_CISS_SCSI_TAPE + +/* Here we have code to present the driver as a scsi driver + as it is simultaneously presented as a block driver. The + reason for doing this is to allow access to SCSI tape drives + through the array controller. Note in particular, neither + physical nor logical disks are presented through the scsi layer. */ + +#include <linux/timer.h> +#include <linux/completion.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include <asm/atomic.h> + +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> + +#include "cciss_scsi.h" + +#define CCISS_ABORT_MSG 0x00 +#define CCISS_RESET_MSG 0x01 + +/* some prototypes... */ +static int sendcmd( + __u8 cmd, + int ctlr, + void *buff, + size_t size, + unsigned int use_unit_num, /* 0: address the controller, + 1: address logical volume log_unit, + 2: address is in scsi3addr */ + unsigned int log_unit, + __u8 page_code, + unsigned char *scsi3addr, + int cmd_type); + + +static int cciss_scsi_proc_info( + struct Scsi_Host *sh, + char *buffer, /* data buffer */ + char **start, /* where data in buffer starts */ + off_t offset, /* offset from start of imaginary file */ + int length, /* length of data in buffer */ + int func); /* 0 == read, 1 == write */ + +static int cciss_scsi_queue_command (struct scsi_cmnd *cmd, + void (* done)(struct scsi_cmnd *)); +static int cciss_eh_device_reset_handler(struct scsi_cmnd *); +static int cciss_eh_abort_handler(struct scsi_cmnd *); + +static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR] = { + { .name = "cciss0", .ndevices = 0 }, + { .name = "cciss1", .ndevices = 0 }, + { .name = "cciss2", .ndevices = 0 }, + { .name = "cciss3", .ndevices = 0 }, + { .name = "cciss4", .ndevices = 0 }, + { .name = "cciss5", .ndevices = 0 }, + { .name = "cciss6", .ndevices = 0 }, + { .name = "cciss7", .ndevices = 0 }, +}; + +static struct scsi_host_template cciss_driver_template = { + .module = THIS_MODULE, + .name = "cciss", + .proc_name = "cciss", + .proc_info = cciss_scsi_proc_info, + .queuecommand = cciss_scsi_queue_command, + .can_queue = SCSI_CCISS_CAN_QUEUE, + .this_id = 7, + .sg_tablesize = MAXSGENTRIES, + .cmd_per_lun = 1, + .use_clustering = DISABLE_CLUSTERING, + /* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */ + .eh_device_reset_handler= cciss_eh_device_reset_handler, + .eh_abort_handler = cciss_eh_abort_handler, +}; + +#pragma pack(1) +struct cciss_scsi_cmd_stack_elem_t { + CommandList_struct cmd; + ErrorInfo_struct Err; + __u32 busaddr; + __u32 pad; +}; + +#pragma pack() + +#define CMD_STACK_SIZE (SCSI_CCISS_CAN_QUEUE * \ + CCISS_MAX_SCSI_DEVS_PER_HBA + 2) + // plus two for init time usage + +#pragma pack(1) +struct cciss_scsi_cmd_stack_t { + struct cciss_scsi_cmd_stack_elem_t *pool; + struct cciss_scsi_cmd_stack_elem_t *elem[CMD_STACK_SIZE]; + dma_addr_t cmd_pool_handle; + int top; +}; +#pragma pack() + +struct cciss_scsi_adapter_data_t { + struct Scsi_Host *scsi_host; + struct cciss_scsi_cmd_stack_t cmd_stack; + int registered; + spinlock_t lock; // to protect ccissscsi[ctlr]; +}; + +#define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \ + &(((struct cciss_scsi_adapter_data_t *) \ + hba[ctlr]->scsi_ctlr)->lock), flags); +#define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \ + &(((struct cciss_scsi_adapter_data_t *) \ + hba[ctlr]->scsi_ctlr)->lock), flags); + +static CommandList_struct * +scsi_cmd_alloc(ctlr_info_t *h) +{ + /* assume only one process in here at a time, locking done by caller. */ + /* use CCISS_LOCK(ctlr) */ + /* might be better to rewrite how we allocate scsi commands in a way that */ + /* needs no locking at all. */ + + /* take the top memory chunk off the stack and return it, if any. */ + struct cciss_scsi_cmd_stack_elem_t *c; + struct cciss_scsi_adapter_data_t *sa; + struct cciss_scsi_cmd_stack_t *stk; + u64bit temp64; + + sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr; + stk = &sa->cmd_stack; + + if (stk->top < 0) + return NULL; + c = stk->elem[stk->top]; + /* memset(c, 0, sizeof(*c)); */ + memset(&c->cmd, 0, sizeof(c->cmd)); + memset(&c->Err, 0, sizeof(c->Err)); + /* set physical addr of cmd and addr of scsi parameters */ + c->cmd.busaddr = c->busaddr; + /* (__u32) (stk->cmd_pool_handle + + (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */ + + temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct)); + /* (__u64) (stk->cmd_pool_handle + + (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top) + + sizeof(CommandList_struct)); */ + stk->top--; + c->cmd.ErrDesc.Addr.lower = temp64.val32.lower; + c->cmd.ErrDesc.Addr.upper = temp64.val32.upper; + c->cmd.ErrDesc.Len = sizeof(ErrorInfo_struct); + + c->cmd.ctlr = h->ctlr; + c->cmd.err_info = &c->Err; + + return (CommandList_struct *) c; +} + +static void +scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd) +{ + /* assume only one process in here at a time, locking done by caller. */ + /* use CCISS_LOCK(ctlr) */ + /* drop the free memory chunk on top of the stack. */ + + struct cciss_scsi_adapter_data_t *sa; + struct cciss_scsi_cmd_stack_t *stk; + + sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr; + stk = &sa->cmd_stack; + if (stk->top >= CMD_STACK_SIZE) { + printk("cciss: scsi_cmd_free called too many times.\n"); + BUG(); + } + stk->top++; + stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd; +} + +static int +scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa) +{ + int i; + struct cciss_scsi_cmd_stack_t *stk; + size_t size; + + stk = &sa->cmd_stack; + size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; + + // pci_alloc_consistent guarantees 32-bit DMA address will + // be used + + stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) + pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle); + + if (stk->pool == NULL) { + printk("stk->pool is null\n"); + return -1; + } + + for (i=0; i<CMD_STACK_SIZE; i++) { + stk->elem[i] = &stk->pool[i]; + stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + + (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i)); + } + stk->top = CMD_STACK_SIZE-1; + return 0; +} + +static void +scsi_cmd_stack_free(int ctlr) +{ + struct cciss_scsi_adapter_data_t *sa; + struct cciss_scsi_cmd_stack_t *stk; + size_t size; + + sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr; + stk = &sa->cmd_stack; + if (stk->top != CMD_STACK_SIZE-1) { + printk( "cciss: %d scsi commands are still outstanding.\n", + CMD_STACK_SIZE - stk->top); + // BUG(); + printk("WE HAVE A BUG HERE!!! stk=0x%p\n", stk); + } + size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; + + pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle); + stk->pool = NULL; +} + +#if 0 +static int xmargin=8; +static int amargin=60; + +static void +print_bytes (unsigned char *c, int len, int hex, int ascii) +{ + + int i; + unsigned char *x; + + if (hex) + { + x = c; + for (i=0;i<len;i++) + { + if ((i % xmargin) == 0 && i>0) printk("\n"); + if ((i % xmargin) == 0) printk("0x%04x:", i); + printk(" %02x", *x); + x++; + } + printk("\n"); + } + if (ascii) + { + x = c; + for (i=0;i<len;i++) + { + if ((i % amargin) == 0 && i>0) printk("\n"); + if ((i % amargin) == 0) printk("0x%04x:", i); + if (*x > 26 && *x < 128) printk("%c", *x); + else printk("."); + x++; + } + printk("\n"); + } +} + +static void +print_cmd(CommandList_struct *cp) +{ + printk("queue:%d\n", cp->Header.ReplyQueue); + printk("sglist:%d\n", cp->Header.SGList); + printk("sgtot:%d\n", cp->Header.SGTotal); + printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper, + cp->Header.Tag.lower); + printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + cp->Header.LUN.LunAddrBytes[0], + cp->Header.LUN.LunAddrBytes[1], + cp->Header.LUN.LunAddrBytes[2], + cp->Header.LUN.LunAddrBytes[3], + cp->Header.LUN.LunAddrBytes[4], + cp->Header.LUN.LunAddrBytes[5], + cp->Header.LUN.LunAddrBytes[6], + cp->Header.LUN.LunAddrBytes[7]); + printk("CDBLen:%d\n", cp->Request.CDBLen); + printk("Type:%d\n",cp->Request.Type.Type); + printk("Attr:%d\n",cp->Request.Type.Attribute); + printk(" Dir:%d\n",cp->Request.Type.Direction); + printk("Timeout:%d\n",cp->Request.Timeout); + printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x" + " %02x %02x %02x %02x %02x %02x %02x %02x\n", + cp->Request.CDB[0], cp->Request.CDB[1], + cp->Request.CDB[2], cp->Request.CDB[3], + cp->Request.CDB[4], cp->Request.CDB[5], + cp->Request.CDB[6], cp->Request.CDB[7], + cp->Request.CDB[8], cp->Request.CDB[9], + cp->Request.CDB[10], cp->Request.CDB[11], + cp->Request.CDB[12], cp->Request.CDB[13], + cp->Request.CDB[14], cp->Request.CDB[15]), + printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n", + cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower, + cp->ErrDesc.Len); + printk("sgs..........Errorinfo:\n"); + printk("scsistatus:%d\n", cp->err_info->ScsiStatus); + printk("senselen:%d\n", cp->err_info->SenseLen); + printk("cmd status:%d\n", cp->err_info->CommandStatus); + printk("resid cnt:%d\n", cp->err_info->ResidualCnt); + printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size); + printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num); + printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value); + +} + +#endif + +static int +find_bus_target_lun(int ctlr, int *bus, int *target, int *lun) +{ + /* finds an unused bus, target, lun for a new device */ + /* assumes hba[ctlr]->scsi_ctlr->lock is held */ + int i, found=0; + unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA]; + + memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA); + + target_taken[SELF_SCSI_ID] = 1; + for (i=0;i<ccissscsi[ctlr].ndevices;i++) + target_taken[ccissscsi[ctlr].dev[i].target] = 1; + + for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) { + if (!target_taken[i]) { + *bus = 0; *target=i; *lun = 0; found=1; + break; + } + } + return (!found); +} +struct scsi2map { + char scsi3addr[8]; + int bus, target, lun; +}; + +static int +cciss_scsi_add_entry(int ctlr, int hostno, + struct cciss_scsi_dev_t *device, + struct scsi2map *added, int *nadded) +{ + /* assumes hba[ctlr]->scsi_ctlr->lock is held */ + int n = ccissscsi[ctlr].ndevices; + struct cciss_scsi_dev_t *sd; + int i, bus, target, lun; + unsigned char addr1[8], addr2[8]; + + if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) { + printk("cciss%d: Too many devices, " + "some will be inaccessible.\n", ctlr); + return -1; + } + + bus = target = -1; + lun = 0; + /* Is this device a non-zero lun of a multi-lun device */ + /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */ + if (device->scsi3addr[4] != 0) { + /* Search through our list and find the device which */ + /* has the same 8 byte LUN address, excepting byte 4. */ + /* Assign the same bus and target for this new LUN. */ + /* Use the logical unit number from the firmware. */ + memcpy(addr1, device->scsi3addr, 8); + addr1[4] = 0; + for (i = 0; i < n; i++) { + sd = &ccissscsi[ctlr].dev[i]; + memcpy(addr2, sd->scsi3addr, 8); + addr2[4] = 0; + /* differ only in byte 4? */ + if (memcmp(addr1, addr2, 8) == 0) { + bus = sd->bus; + target = sd->target; + lun = device->scsi3addr[4]; + break; + } + } + } + + sd = &ccissscsi[ctlr].dev[n]; + if (lun == 0) { + if (find_bus_target_lun(ctlr, + &sd->bus, &sd->target, &sd->lun) != 0) + return -1; + } else { + sd->bus = bus; + sd->target = target; + sd->lun = lun; + } + added[*nadded].bus = sd->bus; + added[*nadded].target = sd->target; + added[*nadded].lun = sd->lun; + (*nadded)++; + + memcpy(sd->scsi3addr, device->scsi3addr, 8); + memcpy(sd->vendor, device->vendor, sizeof(sd->vendor)); + memcpy(sd->revision, device->revision, sizeof(sd->revision)); + memcpy(sd->device_id, device->device_id, sizeof(sd->device_id)); + sd->devtype = device->devtype; + + ccissscsi[ctlr].ndevices++; + + /* initially, (before registering with scsi layer) we don't + know our hostno and we don't want to print anything first + time anyway (the scsi layer's inquiries will show that info) */ + if (hostno != -1) + printk("cciss%d: %s device c%db%dt%dl%d added.\n", + ctlr, scsi_device_type(sd->devtype), hostno, + sd->bus, sd->target, sd->lun); + return 0; +} + +static void +cciss_scsi_remove_entry(int ctlr, int hostno, int entry, + struct scsi2map *removed, int *nremoved) +{ + /* assumes hba[ctlr]->scsi_ctlr->lock is held */ + int i; + struct cciss_scsi_dev_t sd; + + if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return; + sd = ccissscsi[ctlr].dev[entry]; + removed[*nremoved].bus = sd.bus; + removed[*nremoved].target = sd.target; + removed[*nremoved].lun = sd.lun; + (*nremoved)++; + for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++) + ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1]; + ccissscsi[ctlr].ndevices--; + printk("cciss%d: %s device c%db%dt%dl%d removed.\n", + ctlr, scsi_device_type(sd.devtype), hostno, + sd.bus, sd.target, sd.lun); +} + + +#define SCSI3ADDR_EQ(a,b) ( \ + (a)[7] == (b)[7] && \ + (a)[6] == (b)[6] && \ + (a)[5] == (b)[5] && \ + (a)[4] == (b)[4] && \ + (a)[3] == (b)[3] && \ + (a)[2] == (b)[2] && \ + (a)[1] == (b)[1] && \ + (a)[0] == (b)[0]) + +static void fixup_botched_add(int ctlr, char *scsi3addr) +{ + /* called when scsi_add_device fails in order to re-adjust */ + /* ccissscsi[] to match the mid layer's view. */ + unsigned long flags; + int i, j; + CPQ_TAPE_LOCK(ctlr, flags); + for (i = 0; i < ccissscsi[ctlr].ndevices; i++) { + if (memcmp(scsi3addr, + ccissscsi[ctlr].dev[i].scsi3addr, 8) == 0) { + for (j = i; j < ccissscsi[ctlr].ndevices-1; j++) + ccissscsi[ctlr].dev[j] = + ccissscsi[ctlr].dev[j+1]; + ccissscsi[ctlr].ndevices--; + break; + } + } + CPQ_TAPE_UNLOCK(ctlr, flags); +} + +static int device_is_the_same(struct cciss_scsi_dev_t *dev1, + struct cciss_scsi_dev_t *dev2) +{ + return dev1->devtype == dev2->devtype && + memcmp(dev1->scsi3addr, dev2->scsi3addr, + sizeof(dev1->scsi3addr)) == 0 && + memcmp(dev1->device_id, dev2->device_id, + sizeof(dev1->device_id)) == 0 && + memcmp(dev1->vendor, dev2->vendor, + sizeof(dev1->vendor)) == 0 && + memcmp(dev1->model, dev2->model, + sizeof(dev1->model)) == 0 && + memcmp(dev1->revision, dev2->revision, + sizeof(dev1->revision)) == 0; +} + +static int +adjust_cciss_scsi_table(int ctlr, int hostno, + struct cciss_scsi_dev_t sd[], int nsds) +{ + /* sd contains scsi3 addresses and devtypes, but + bus target and lun are not filled in. This funciton + takes what's in sd to be the current and adjusts + ccissscsi[] to be in line with what's in sd. */ + + int i,j, found, changes=0; + struct cciss_scsi_dev_t *csd; + unsigned long flags; + struct scsi2map *added, *removed; + int nadded, nremoved; + struct Scsi_Host *sh = NULL; + + added = kzalloc(sizeof(*added) * CCISS_MAX_SCSI_DEVS_PER_HBA, + GFP_KERNEL); + removed = kzalloc(sizeof(*removed) * CCISS_MAX_SCSI_DEVS_PER_HBA, + GFP_KERNEL); + + if (!added || !removed) { + printk(KERN_WARNING "cciss%d: Out of memory in " + "adjust_cciss_scsi_table\n", ctlr); + goto free_and_out; + } + + CPQ_TAPE_LOCK(ctlr, flags); + + if (hostno != -1) /* if it's not the first time... */ + sh = ((struct cciss_scsi_adapter_data_t *) + hba[ctlr]->scsi_ctlr)->scsi_host; + + /* find any devices in ccissscsi[] that are not in + sd[] and remove them from ccissscsi[] */ + + i = 0; + nremoved = 0; + nadded = 0; + while(i<ccissscsi[ctlr].ndevices) { + csd = &ccissscsi[ctlr].dev[i]; + found=0; + for (j=0;j<nsds;j++) { + if (SCSI3ADDR_EQ(sd[j].scsi3addr, + csd->scsi3addr)) { + if (device_is_the_same(&sd[j], csd)) + found=2; + else + found=1; + break; + } + } + + if (found == 0) { /* device no longer present. */ + changes++; + /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n", + ctlr, scsi_device_type(csd->devtype), hostno, + csd->bus, csd->target, csd->lun); */ + cciss_scsi_remove_entry(ctlr, hostno, i, + removed, &nremoved); + /* remove ^^^, hence i not incremented */ + } else if (found == 1) { /* device is different in some way */ + changes++; + printk("cciss%d: device c%db%dt%dl%d has changed.\n", + ctlr, hostno, csd->bus, csd->target, csd->lun); + cciss_scsi_remove_entry(ctlr, hostno, i, + removed, &nremoved); + /* remove ^^^, hence i not incremented */ + if (cciss_scsi_add_entry(ctlr, hostno, &sd[j], + added, &nadded) != 0) + /* we just removed one, so add can't fail. */ + BUG(); + csd->devtype = sd[j].devtype; + memcpy(csd->device_id, sd[j].device_id, + sizeof(csd->device_id)); + memcpy(csd->vendor, sd[j].vendor, + sizeof(csd->vendor)); + memcpy(csd->model, sd[j].model, + sizeof(csd->model)); + memcpy(csd->revision, sd[j].revision, + sizeof(csd->revision)); + } else /* device is same as it ever was, */ + i++; /* so just move along. */ + } + + /* Now, make sure every device listed in sd[] is also + listed in ccissscsi[], adding them if they aren't found */ + + for (i=0;i<nsds;i++) { + found=0; + for (j=0;j<ccissscsi[ctlr].ndevices;j++) { + csd = &ccissscsi[ctlr].dev[j]; + if (SCSI3ADDR_EQ(sd[i].scsi3addr, + csd->scsi3addr)) { + if (device_is_the_same(&sd[i], csd)) + found=2; /* found device */ + else + found=1; /* found a bug. */ + break; + } + } + if (!found) { + changes++; + if (cciss_scsi_add_entry(ctlr, hostno, &sd[i], + added, &nadded) != 0) + break; + } else if (found == 1) { + /* should never happen... */ + changes++; + printk(KERN_WARNING "cciss%d: device " + "unexpectedly changed\n", ctlr); + /* but if it does happen, we just ignore that device */ + } + } + CPQ_TAPE_UNLOCK(ctlr, flags); + + /* Don't notify scsi mid layer of any changes the first time through */ + /* (or if there are no changes) scsi_scan_host will do it later the */ + /* first time through. */ + if (hostno == -1 || !changes) + goto free_and_out; + + /* Notify scsi mid layer of any removed devices */ + for (i = 0; i < nremoved; i++) { + struct scsi_device *sdev = + scsi_device_lookup(sh, removed[i].bus, + removed[i].target, removed[i].lun); + if (sdev != NULL) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else { + /* We don't expect to get here. */ + /* future cmds to this device will get selection */ + /* timeout as if the device was gone. */ + printk(KERN_WARNING "cciss%d: didn't find " + "c%db%dt%dl%d\n for removal.", + ctlr, hostno, removed[i].bus, + removed[i].target, removed[i].lun); + } + } + + /* Notify scsi mid layer of any added devices */ + for (i = 0; i < nadded; i++) { + int rc; + rc = scsi_add_device(sh, added[i].bus, + added[i].target, added[i].lun); + if (rc == 0) + continue; + printk(KERN_WARNING "cciss%d: scsi_add_device " + "c%db%dt%dl%d failed, device not added.\n", + ctlr, hostno, + added[i].bus, added[i].target, added[i].lun); + /* now we have to remove it from ccissscsi, */ + /* since it didn't get added to scsi mid layer */ + fixup_botched_add(ctlr, added[i].scsi3addr); + } + +free_and_out: + kfree(added); + kfree(removed); + return 0; +} + +static int +lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr) +{ + int i; + struct cciss_scsi_dev_t *sd; + unsigned long flags; + + CPQ_TAPE_LOCK(ctlr, flags); + for (i=0;i<ccissscsi[ctlr].ndevices;i++) { + sd = &ccissscsi[ctlr].dev[i]; + if (sd->bus == bus && + sd->target == target && + sd->lun == lun) { + memcpy(scsi3addr, &sd->scsi3addr[0], 8); + CPQ_TAPE_UNLOCK(ctlr, flags); + return 0; + } + } + CPQ_TAPE_UNLOCK(ctlr, flags); + return -1; +} + +static void +cciss_scsi_setup(int cntl_num) +{ + struct cciss_scsi_adapter_data_t * shba; + + ccissscsi[cntl_num].ndevices = 0; + shba = (struct cciss_scsi_adapter_data_t *) + kmalloc(sizeof(*shba), GFP_KERNEL); + if (shba == NULL) + return; + shba->scsi_host = NULL; + spin_lock_init(&shba->lock); + shba->registered = 0; + if (scsi_cmd_stack_setup(cntl_num, shba) != 0) { + kfree(shba); + shba = NULL; + } + hba[cntl_num]->scsi_ctlr = (void *) shba; + return; +} + +static void +complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag) +{ + struct scsi_cmnd *cmd; + ctlr_info_t *ctlr; + ErrorInfo_struct *ei; + + ei = cp->err_info; + + /* First, see if it was a message rather than a command */ + if (cp->Request.Type.Type == TYPE_MSG) { + cp->cmd_type = CMD_MSG_DONE; + return; + } + + cmd = (struct scsi_cmnd *) cp->scsi_cmd; + ctlr = hba[cp->ctlr]; + + scsi_dma_unmap(cmd); + + cmd->result = (DID_OK << 16); /* host byte */ + cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ + /* cmd->result |= (GOOD < 1); */ /* status byte */ + + cmd->result |= (ei->ScsiStatus); + /* printk("Scsistatus is 0x%02x\n", ei->ScsiStatus); */ + + /* copy the sense data whether we need to or not. */ + + memcpy(cmd->sense_buffer, ei->SenseInfo, + ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? + SCSI_SENSE_BUFFERSIZE : + ei->SenseLen); + scsi_set_resid(cmd, ei->ResidualCnt); + + if(ei->CommandStatus != 0) + { /* an error has occurred */ + switch(ei->CommandStatus) + { + case CMD_TARGET_STATUS: + /* Pass it up to the upper layers... */ + if( ei->ScsiStatus) + { +#if 0 + printk(KERN_WARNING "cciss: cmd %p " + "has SCSI Status = %x\n", + cp, + ei->ScsiStatus); +#endif + cmd->result |= (ei->ScsiStatus < 1); + } + else { /* scsi status is zero??? How??? */ + + /* Ordinarily, this case should never happen, but there is a bug + in some released firmware revisions that allows it to happen + if, for example, a 4100 backplane loses power and the tape + drive is in it. We assume that it's a fatal error of some + kind because we can't show that it wasn't. We will make it + look like selection timeout since that is the most common + reason for this to occur, and it's severe enough. */ + + cmd->result = DID_NO_CONNECT << 16; + } + break; + case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ + break; + case CMD_DATA_OVERRUN: + printk(KERN_WARNING "cciss: cp %p has" + " completed with data overrun " + "reported\n", cp); + break; + case CMD_INVALID: { + /* print_bytes(cp, sizeof(*cp), 1, 0); + print_cmd(cp); */ + /* We get CMD_INVALID if you address a non-existent tape drive instead + of a selection timeout (no response). You will see this if you yank + out a tape drive, then try to access it. This is kind of a shame + because it means that any other CMD_INVALID (e.g. driver bug) will + get interpreted as a missing target. */ + cmd->result = DID_NO_CONNECT << 16; + } + break; + case CMD_PROTOCOL_ERR: + printk(KERN_WARNING "cciss: cp %p has " + "protocol error \n", cp); + break; + case CMD_HARDWARE_ERR: + cmd->result = DID_ERROR << 16; + printk(KERN_WARNING "cciss: cp %p had " + " hardware error\n", cp); + break; + case CMD_CONNECTION_LOST: + cmd->result = DID_ERROR << 16; + printk(KERN_WARNING "cciss: cp %p had " + "connection lost\n", cp); + break; + case CMD_ABORTED: + cmd->result = DID_ABORT << 16; + printk(KERN_WARNING "cciss: cp %p was " + "aborted\n", cp); + break; + case CMD_ABORT_FAILED: + cmd->result = DID_ERROR << 16; + printk(KERN_WARNING "cciss: cp %p reports " + "abort failed\n", cp); + break; + case CMD_UNSOLICITED_ABORT: + cmd->result = DID_ABORT << 16; + printk(KERN_WARNING "cciss: cp %p aborted " + "do to an unsolicited abort\n", cp); + break; + case CMD_TIMEOUT: + cmd->result = DID_TIME_OUT << 16; + printk(KERN_WARNING "cciss: cp %p timedout\n", + cp); + break; + default: + cmd->result = DID_ERROR << 16; + printk(KERN_WARNING "cciss: cp %p returned " + "unknown status %x\n", cp, + ei->CommandStatus); + } + } + // printk("c:%p:c%db%dt%dl%d ", cmd, ctlr->ctlr, cmd->channel, + // cmd->target, cmd->lun); + cmd->scsi_done(cmd); + scsi_cmd_free(ctlr, cp); +} + +static int +cciss_scsi_detect(int ctlr) +{ + struct Scsi_Host *sh; + int error; + + sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *)); + if (sh == NULL) + goto fail; + sh->io_port = 0; // good enough? FIXME, + sh->n_io_port = 0; // I don't think we use these two... + sh->this_id = SELF_SCSI_ID; + + ((struct cciss_scsi_adapter_data_t *) + hba[ctlr]->scsi_ctlr)->scsi_host = (void *) sh; + sh->hostdata[0] = (unsigned long) hba[ctlr]; + sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT]; + sh->unique_id = sh->irq; + error = scsi_add_host(sh, &hba[ctlr]->pdev->dev); + if (error) + goto fail_host_put; + scsi_scan_host(sh); + return 1; + + fail_host_put: + scsi_host_put(sh); + fail: + return 0; +} + +static void +cciss_unmap_one(struct pci_dev *pdev, + CommandList_struct *cp, + size_t buflen, + int data_direction) +{ + u64bit addr64; + + addr64.val32.lower = cp->SG[0].Addr.lower; + addr64.val32.upper = cp->SG[0].Addr.upper; + pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction); +} + +static void +cciss_map_one(struct pci_dev *pdev, + CommandList_struct *cp, + unsigned char *buf, + size_t buflen, + int data_direction) +{ + __u64 addr64; + + addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); + cp->SG[0].Addr.lower = + (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); + cp->SG[0].Addr.upper = + (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); + cp->SG[0].Len = buflen; + cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ + cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ +} + +static int +cciss_scsi_do_simple_cmd(ctlr_info_t *c, + CommandList_struct *cp, + unsigned char *scsi3addr, + unsigned char *cdb, + unsigned char cdblen, + unsigned char *buf, int bufsize, + int direction) +{ + unsigned long flags; + DECLARE_COMPLETION_ONSTACK(wait); + + cp->cmd_type = CMD_IOCTL_PEND; // treat this like an ioctl + cp->scsi_cmd = NULL; + cp->Header.ReplyQueue = 0; // unused in simple mode + memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN)); + cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag + // Fill in the request block... + + /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", + scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], + scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */ + + memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB)); + memcpy(cp->Request.CDB, cdb, cdblen); + cp->Request.Timeout = 0; + cp->Request.CDBLen = cdblen; + cp->Request.Type.Type = TYPE_CMD; + cp->Request.Type.Attribute = ATTR_SIMPLE; + cp->Request.Type.Direction = direction; + + /* Fill in the SG list and do dma mapping */ + cciss_map_one(c->pdev, cp, (unsigned char *) buf, + bufsize, DMA_FROM_DEVICE); + + cp->waiting = &wait; + + /* Put the request on the tail of the request queue */ + spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); + addQ(&c->reqQ, cp); + c->Qdepth++; + start_io(c); + spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); + + wait_for_completion(&wait); + + /* undo the dma mapping */ + cciss_unmap_one(c->pdev, cp, bufsize, DMA_FROM_DEVICE); + return(0); +} + +static void +cciss_scsi_interpret_error(CommandList_struct *cp) +{ + ErrorInfo_struct *ei; + + ei = cp->err_info; + switch(ei->CommandStatus) + { + case CMD_TARGET_STATUS: + printk(KERN_WARNING "cciss: cmd %p has " + "completed with errors\n", cp); + printk(KERN_WARNING "cciss: cmd %p " + "has SCSI Status = %x\n", + cp, + ei->ScsiStatus); + if (ei->ScsiStatus == 0) + printk(KERN_WARNING + "cciss:SCSI status is abnormally zero. " + "(probably indicates selection timeout " + "reported incorrectly due to a known " + "firmware bug, circa July, 2001.)\n"); + break; + case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ + printk("UNDERRUN\n"); + break; + case CMD_DATA_OVERRUN: + printk(KERN_WARNING "cciss: cp %p has" + " completed with data overrun " + "reported\n", cp); + break; + case CMD_INVALID: { + /* controller unfortunately reports SCSI passthru's */ + /* to non-existent targets as invalid commands. */ + printk(KERN_WARNING "cciss: cp %p is " + "reported invalid (probably means " + "target device no longer present)\n", + cp); + /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); + print_cmd(cp); */ + } + break; + case CMD_PROTOCOL_ERR: + printk(KERN_WARNING "cciss: cp %p has " + "protocol error \n", cp); + break; + case CMD_HARDWARE_ERR: + /* cmd->result = DID_ERROR << 16; */ + printk(KERN_WARNING "cciss: cp %p had " + " hardware error\n", cp); + break; + case CMD_CONNECTION_LOST: + printk(KERN_WARNING "cciss: cp %p had " + "connection lost\n", cp); + break; + case CMD_ABORTED: + printk(KERN_WARNING "cciss: cp %p was " + "aborted\n", cp); + break; + case CMD_ABORT_FAILED: + printk(KERN_WARNING "cciss: cp %p reports " + "abort failed\n", cp); + break; + case CMD_UNSOLICITED_ABORT: + printk(KERN_WARNING "cciss: cp %p aborted " + "do to an unsolicited abort\n", cp); + break; + case CMD_TIMEOUT: + printk(KERN_WARNING "cciss: cp %p timedout\n", + cp); + break; + default: + printk(KERN_WARNING "cciss: cp %p returned " + "unknown status %x\n", cp, + ei->CommandStatus); + } +} + +static int +cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, + unsigned char page, unsigned char *buf, + unsigned char bufsize) +{ + int rc; + CommandList_struct *cp; + char cdb[6]; + ErrorInfo_struct *ei; + unsigned long flags; + + spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); + cp = scsi_cmd_alloc(c); + spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); + + if (cp == NULL) { /* trouble... */ + printk("cmd_alloc returned NULL!\n"); + return -1; + } + + ei = cp->err_info; + + cdb[0] = CISS_INQUIRY; + cdb[1] = (page != 0); + cdb[2] = page; + cdb[3] = 0; + cdb[4] = bufsize; + cdb[5] = 0; + rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb, + 6, buf, bufsize, XFER_READ); + + if (rc != 0) return rc; /* something went wrong */ + + if (ei->CommandStatus != 0 && + ei->CommandStatus != CMD_DATA_UNDERRUN) { + cciss_scsi_interpret_error(cp); + rc = -1; + } + spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); + scsi_cmd_free(c, cp); + spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); + return rc; +} + +/* Get the device id from inquiry page 0x83 */ +static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr, + unsigned char *device_id, int buflen) +{ + int rc; + unsigned char *buf; + + if (buflen > 16) + buflen = 16; + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return -1; + rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64); + if (rc == 0) + memcpy(device_id, &buf[8], buflen); + kfree(buf); + return rc != 0; +} + +static int +cciss_scsi_do_report_phys_luns(ctlr_info_t *c, + ReportLunData_struct *buf, int bufsize) +{ + int rc; + CommandList_struct *cp; + unsigned char cdb[12]; + unsigned char scsi3addr[8]; + ErrorInfo_struct *ei; + unsigned long flags; + + spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); + cp = scsi_cmd_alloc(c); + spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); + if (cp == NULL) { /* trouble... */ + printk("cmd_alloc returned NULL!\n"); + return -1; + } + + memset(&scsi3addr[0], 0, 8); /* address the controller */ + cdb[0] = CISS_REPORT_PHYS; + cdb[1] = 0; + cdb[2] = 0; + cdb[3] = 0; + cdb[4] = 0; + cdb[5] = 0; + cdb[6] = (bufsize >> 24) & 0xFF; //MSB + cdb[7] = (bufsize >> 16) & 0xFF; + cdb[8] = (bufsize >> 8) & 0xFF; + cdb[9] = bufsize & 0xFF; + cdb[10] = 0; + cdb[11] = 0; + + rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, + cdb, 12, + (unsigned char *) buf, + bufsize, XFER_READ); + + if (rc != 0) return rc; /* something went wrong */ + + ei = cp->err_info; + if (ei->CommandStatus != 0 && + ei->CommandStatus != CMD_DATA_UNDERRUN) { + cciss_scsi_interpret_error(cp); + rc = -1; + } + spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); + scsi_cmd_free(c, cp); + spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); + return rc; +} + +static void +cciss_update_non_disk_devices(int cntl_num, int hostno) +{ + /* the idea here is we could get notified from /proc + that some devices have changed, so we do a report + physical luns cmd, and adjust our list of devices + accordingly. (We can't rely on the scsi-mid layer just + doing inquiries, because the "busses" that the scsi + mid-layer probes are totally fabricated by this driver, + so new devices wouldn't show up. + + the scsi3addr's of devices won't change so long as the + adapter is not reset. That means we can rescan and + tell which devices we already know about, vs. new + devices, vs. disappearing devices. + + Also, if you yank out a tape drive, then put in a disk + in it's place, (say, a configured volume from another + array controller for instance) _don't_ poke this driver + (so it thinks it's still a tape, but _do_ poke the scsi + mid layer, so it does an inquiry... the scsi mid layer + will see the physical disk. This would be bad. Need to + think about how to prevent that. One idea would be to + snoop all scsi responses and if an inquiry repsonse comes + back that reports a disk, chuck it an return selection + timeout instead and adjust our table... Not sure i like + that though. + + */ +#define OBDR_TAPE_INQ_SIZE 49 +#define OBDR_TAPE_SIG "$DR-10" + ReportLunData_struct *ld_buff; + unsigned char *inq_buff; + unsigned char scsi3addr[8]; + ctlr_info_t *c; + __u32 num_luns=0; + unsigned char *ch; + struct cciss_scsi_dev_t *currentsd, *this_device; + int ncurrent=0; + int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8; + int i; + + c = (ctlr_info_t *) hba[cntl_num]; + ld_buff = kzalloc(reportlunsize, GFP_KERNEL); + inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); + currentsd = kzalloc(sizeof(*currentsd) * + (CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL); + if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) { + printk(KERN_ERR "cciss: out of memory\n"); + goto out; + } + this_device = ¤tsd[CCISS_MAX_SCSI_DEVS_PER_HBA]; + if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) { + ch = &ld_buff->LUNListLength[0]; + num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8; + if (num_luns > CISS_MAX_PHYS_LUN) { + printk(KERN_WARNING + "cciss: Maximum physical LUNs (%d) exceeded. " + "%d LUNs ignored.\n", CISS_MAX_PHYS_LUN, + num_luns - CISS_MAX_PHYS_LUN); + num_luns = CISS_MAX_PHYS_LUN; + } + } + else { + printk(KERN_ERR "cciss: Report physical LUNs failed.\n"); + goto out; + } + + + /* adjust our table of devices */ + for (i = 0; i < num_luns; i++) { + /* for each physical lun, do an inquiry */ + if (ld_buff->LUN[i][3] & 0xC0) continue; + memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); + memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); + + if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff, + (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) + /* Inquiry failed (msg printed already) */ + continue; /* so we will skip this device. */ + + this_device->devtype = (inq_buff[0] & 0x1f); + this_device->bus = -1; + this_device->target = -1; + this_device->lun = -1; + memcpy(this_device->scsi3addr, scsi3addr, 8); + memcpy(this_device->vendor, &inq_buff[8], + sizeof(this_device->vendor)); + memcpy(this_device->model, &inq_buff[16], + sizeof(this_device->model)); + memcpy(this_device->revision, &inq_buff[32], + sizeof(this_device->revision)); + memset(this_device->device_id, 0, + sizeof(this_device->device_id)); + cciss_scsi_get_device_id(hba[cntl_num], scsi3addr, + this_device->device_id, sizeof(this_device->device_id)); + + switch (this_device->devtype) + { + case 0x05: /* CD-ROM */ { + + /* We don't *really* support actual CD-ROM devices, + * just this "One Button Disaster Recovery" tape drive + * which temporarily pretends to be a CD-ROM drive. + * So we check that the device is really an OBDR tape + * device by checking for "$DR-10" in bytes 43-48 of + * the inquiry data. + */ + char obdr_sig[7]; + + strncpy(obdr_sig, &inq_buff[43], 6); + obdr_sig[6] = '\0'; + if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) + /* Not OBDR device, ignore it. */ + break; + } + /* fall through . . . */ + case 0x01: /* sequential access, (tape) */ + case 0x08: /* medium changer */ + if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { + printk(KERN_INFO "cciss%d: %s ignored, " + "too many devices.\n", cntl_num, + scsi_device_type(this_device->devtype)); + break; + } + currentsd[ncurrent] = *this_device; + ncurrent++; + break; + default: + break; + } + } + + adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent); +out: + kfree(inq_buff); + kfree(ld_buff); + kfree(currentsd); + return; +} + +static int +is_keyword(char *ptr, int len, char *verb) // Thanks to ncr53c8xx.c +{ + int verb_len = strlen(verb); + if (len >= verb_len && !memcmp(verb,ptr,verb_len)) + return verb_len; + else + return 0; +} + +static int +cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length) +{ + int arg_len; + + if ((arg_len = is_keyword(buffer, length, "rescan")) != 0) + cciss_update_non_disk_devices(ctlr, hostno); + else + return -EINVAL; + return length; +} + + +static int +cciss_scsi_proc_info(struct Scsi_Host *sh, + char *buffer, /* data buffer */ + char **start, /* where data in buffer starts */ + off_t offset, /* offset from start of imaginary file */ + int length, /* length of data in buffer */ + int func) /* 0 == read, 1 == write */ +{ + + int buflen, datalen; + ctlr_info_t *ci; + int i; + int cntl_num; + + + ci = (ctlr_info_t *) sh->hostdata[0]; + if (ci == NULL) /* This really shouldn't ever happen. */ + return -EINVAL; + + cntl_num = ci->ctlr; /* Get our index into the hba[] array */ + + if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */ + buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n", + cntl_num, sh->host_no); + + /* this information is needed by apps to know which cciss + device corresponds to which scsi host number without + having to open a scsi target device node. The device + information is not a duplicate of /proc/scsi/scsi because + the two may be out of sync due to scsi hotplug, rather + this info is for an app to be able to use to know how to + get them back in sync. */ + + for (i=0;i<ccissscsi[cntl_num].ndevices;i++) { + struct cciss_scsi_dev_t *sd = &ccissscsi[cntl_num].dev[i]; + buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d " + "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + sh->host_no, sd->bus, sd->target, sd->lun, + sd->devtype, + sd->scsi3addr[0], sd->scsi3addr[1], + sd->scsi3addr[2], sd->scsi3addr[3], + sd->scsi3addr[4], sd->scsi3addr[5], + sd->scsi3addr[6], sd->scsi3addr[7]); + } + datalen = buflen - offset; + if (datalen < 0) { /* they're reading past EOF. */ + datalen = 0; + *start = buffer+buflen; + } else + *start = buffer + offset; + return(datalen); + } else /* User is writing to /proc/scsi/cciss*?/?* ... */ + return cciss_scsi_user_command(cntl_num, sh->host_no, + buffer, length); +} + +/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci + dma mapping and fills in the scatter gather entries of the + cciss command, cp. */ + +static void +cciss_scatter_gather(struct pci_dev *pdev, + CommandList_struct *cp, + struct scsi_cmnd *cmd) +{ + unsigned int len; + struct scatterlist *sg; + __u64 addr64; + int use_sg, i; + + BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); + + use_sg = scsi_dma_map(cmd); + if (use_sg) { /* not too many addrs? */ + scsi_for_each_sg(cmd, sg, use_sg, i) { + addr64 = (__u64) sg_dma_address(sg); + len = sg_dma_len(sg); + cp->SG[i].Addr.lower = + (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); + cp->SG[i].Addr.upper = + (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); + cp->SG[i].Len = len; + cp->SG[i].Ext = 0; // we are not chaining + } + } + + cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */ + cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */ + return; +} + + +static int +cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) +{ + ctlr_info_t **c; + int ctlr, rc; + unsigned char scsi3addr[8]; + CommandList_struct *cp; + unsigned long flags; + + // Get the ptr to our adapter structure (hba[i]) out of cmd->host. + // We violate cmd->host privacy here. (Is there another way?) + c = (ctlr_info_t **) &cmd->device->host->hostdata[0]; + ctlr = (*c)->ctlr; + + rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, + cmd->device->lun, scsi3addr); + if (rc != 0) { + /* the scsi nexus does not match any that we presented... */ + /* pretend to mid layer that we got selection timeout */ + cmd->result = DID_NO_CONNECT << 16; + done(cmd); + /* we might want to think about registering controller itself + as a processor device on the bus so sg binds to it. */ + return 0; + } + + /* printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n", + cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);*/ + // printk("q:%p:c%db%dt%dl%d ", cmd, ctlr, cmd->channel, + // cmd->target, cmd->lun); + + /* Ok, we have a reasonable scsi nexus, so send the cmd down, and + see what the device thinks of it. */ + + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + cp = scsi_cmd_alloc(*c); + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + if (cp == NULL) { /* trouble... */ + printk("scsi_cmd_alloc returned NULL!\n"); + /* FIXME: next 3 lines are -> BAD! <- */ + cmd->result = DID_NO_CONNECT << 16; + done(cmd); + return 0; + } + + // Fill in the command list header + + cmd->scsi_done = done; // save this for use by completion code + + // save cp in case we have to abort it + cmd->host_scribble = (unsigned char *) cp; + + cp->cmd_type = CMD_SCSI; + cp->scsi_cmd = cmd; + cp->Header.ReplyQueue = 0; // unused in simple mode + memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); + cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag + + // Fill in the request block... + + cp->Request.Timeout = 0; + memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB)); + BUG_ON(cmd->cmd_len > sizeof(cp->Request.CDB)); + cp->Request.CDBLen = cmd->cmd_len; + memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len); + cp->Request.Type.Type = TYPE_CMD; + cp->Request.Type.Attribute = ATTR_SIMPLE; + switch(cmd->sc_data_direction) + { + case DMA_TO_DEVICE: cp->Request.Type.Direction = XFER_WRITE; break; + case DMA_FROM_DEVICE: cp->Request.Type.Direction = XFER_READ; break; + case DMA_NONE: cp->Request.Type.Direction = XFER_NONE; break; + case DMA_BIDIRECTIONAL: + // This can happen if a buggy application does a scsi passthru + // and sets both inlen and outlen to non-zero. ( see + // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) + + cp->Request.Type.Direction = XFER_RSVD; + // This is technically wrong, and cciss controllers should + // reject it with CMD_INVALID, which is the most correct + // response, but non-fibre backends appear to let it + // slide by, and give the same results as if this field + // were set correctly. Either way is acceptable for + // our purposes here. + + break; + + default: + printk("cciss: unknown data direction: %d\n", + cmd->sc_data_direction); + BUG(); + break; + } + + cciss_scatter_gather((*c)->pdev, cp, cmd); // Fill the SG list + + /* Put the request on the tail of the request queue */ + + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + addQ(&(*c)->reqQ, cp); + (*c)->Qdepth++; + start_io(*c); + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + + /* the cmd'll come back via intr handler in complete_scsi_command() */ + return 0; +} + +static void +cciss_unregister_scsi(int ctlr) +{ + struct cciss_scsi_adapter_data_t *sa; + struct cciss_scsi_cmd_stack_t *stk; + unsigned long flags; + + /* we are being forcibly unloaded, and may not refuse. */ + + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr; + stk = &sa->cmd_stack; + + /* if we weren't ever actually registered, don't unregister */ + if (sa->registered) { + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + scsi_remove_host(sa->scsi_host); + scsi_host_put(sa->scsi_host); + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + } + + /* set scsi_host to NULL so our detect routine will + find us on register */ + sa->scsi_host = NULL; + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + scsi_cmd_stack_free(ctlr); + kfree(sa); +} + +static int +cciss_engage_scsi(int ctlr) +{ + struct cciss_scsi_adapter_data_t *sa; + struct cciss_scsi_cmd_stack_t *stk; + unsigned long flags; + + spin_lock_irqsave(CCISS_LOCK(ctlr), flags); + sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr; + stk = &sa->cmd_stack; + + if (sa->registered) { + printk("cciss%d: SCSI subsystem already engaged.\n", ctlr); + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + return ENXIO; + } + sa->registered = 1; + spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); + cciss_update_non_disk_devices(ctlr, -1); + cciss_scsi_detect(ctlr); + return 0; +} + +static void +cciss_seq_tape_report(struct seq_file *seq, int ctlr) +{ + unsigned long flags; + + CPQ_TAPE_LOCK(ctlr, flags); + seq_printf(seq, + "Sequential access devices: %d\n\n", + ccissscsi[ctlr].ndevices); + CPQ_TAPE_UNLOCK(ctlr, flags); +} + + +/* Need at least one of these error handlers to keep ../scsi/hosts.c from + * complaining. Doing a host- or bus-reset can't do anything good here. + * Despite what it might say in scsi_error.c, there may well be commands + * on the controller, as the cciss driver registers twice, once as a block + * device for the logical drives, and once as a scsi device, for any tape + * drives. So we know there are no commands out on the tape drives, but we + * don't know there are no commands on the controller, and it is likely + * that there probably are, as the cciss block device is most commonly used + * as a boot device (embedded controller on HP/Compaq systems.) +*/ + +static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd) +{ + int rc; + CommandList_struct *cmd_in_trouble; + ctlr_info_t **c; + int ctlr; + + /* find the controller to which the command to be aborted was sent */ + c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0]; + if (c == NULL) /* paranoia */ + return FAILED; + ctlr = (*c)->ctlr; + printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr); + + /* find the command that's giving us trouble */ + cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; + if (cmd_in_trouble == NULL) { /* paranoia */ + return FAILED; + } + /* send a reset to the SCSI LUN which the command was sent to */ + rc = sendcmd(CCISS_RESET_MSG, ctlr, NULL, 0, 2, 0, 0, + (unsigned char *) &cmd_in_trouble->Header.LUN.LunAddrBytes[0], + TYPE_MSG); + /* sendcmd turned off interrupts on the board, turn 'em back on. */ + (*c)->access.set_intr_mask(*c, CCISS_INTR_ON); + if (rc == 0) + return SUCCESS; + printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr); + return FAILED; +} + +static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) +{ + int rc; + CommandList_struct *cmd_to_abort; + ctlr_info_t **c; + int ctlr; + + /* find the controller to which the command to be aborted was sent */ + c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0]; + if (c == NULL) /* paranoia */ + return FAILED; + ctlr = (*c)->ctlr; + printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr); + + /* find the command to be aborted */ + cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble; + if (cmd_to_abort == NULL) /* paranoia */ + return FAILED; + rc = sendcmd(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag, + 0, 2, 0, 0, + (unsigned char *) &cmd_to_abort->Header.LUN.LunAddrBytes[0], + TYPE_MSG); + /* sendcmd turned off interrupts on the board, turn 'em back on. */ + (*c)->access.set_intr_mask(*c, CCISS_INTR_ON); + if (rc == 0) + return SUCCESS; + return FAILED; + +} + +#else /* no CONFIG_CISS_SCSI_TAPE */ + +/* If no tape support, then these become defined out of existence */ + +#define cciss_scsi_setup(cntl_num) + +#endif /* CONFIG_CISS_SCSI_TAPE */ diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h new file mode 100644 index 0000000..7b75024 --- /dev/null +++ b/drivers/block/cciss_scsi.h @@ -0,0 +1,83 @@ +/* + * Disk Array driver for HP Smart Array controllers, SCSI Tape module. + * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA + * 02111-1307, USA. + * + * Questions/Comments/Bugfixes to iss_storagedev@hp.com + * + */ +#ifdef CONFIG_CISS_SCSI_TAPE +#ifndef _CCISS_SCSI_H_ +#define _CCISS_SCSI_H_ + +#include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */ + + // the scsi id of the adapter... +#define SELF_SCSI_ID 15 + // 15 is somewhat arbitrary, since the scsi-2 bus + // that's presented by the driver to the OS is + // fabricated. The "real" scsi-3 bus the + // hardware presents is fabricated too. + // The actual, honest-to-goodness physical + // bus that the devices are attached to is not + // addressible natively, and may in fact turn + // out to be not scsi at all. + +#define SCSI_CCISS_CAN_QUEUE 2 + +/* + +Note, cmd_per_lun could give us some trouble, so I'm setting it very low. +Likewise, SCSI_CCISS_CAN_QUEUE is set very conservatively. + +If the upper scsi layer tries to track how many commands we have +outstanding, it will be operating under the misapprehension that it is +the only one sending us requests. We also have the block interface, +which is where most requests must surely come from, so the upper layer's +notion of how many requests we have outstanding will be wrong most or +all of the time. + +Note, the normal SCSI mid-layer error handling doesn't work well +for this driver because 1) it takes the io_request_lock before +calling error handlers and uses a local variable to store flags, +so the io_request_lock cannot be released and interrupts enabled +inside the error handlers, and, the error handlers cannot poll +for command completion because they might get commands from the +block half of the driver completing, and not know what to do +with them. That's what we get for making a hybrid scsi/block +driver, I suppose. + +*/ + +struct cciss_scsi_dev_t { + int devtype; + int bus, target, lun; /* as presented to the OS */ + unsigned char scsi3addr[8]; /* as presented to the HW */ + unsigned char device_id[16]; /* from inquiry pg. 0x83 */ + unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ + unsigned char model[16]; /* bytes 16-31 of inquiry data */ + unsigned char revision[4]; /* bytes 32-35 of inquiry data */ +}; + +struct cciss_scsi_hba_t { + char *name; + int ndevices; +#define CCISS_MAX_SCSI_DEVS_PER_HBA 16 + struct cciss_scsi_dev_t dev[CCISS_MAX_SCSI_DEVS_PER_HBA]; +}; + +#endif /* _CCISS_SCSI_H_ */ +#endif /* CONFIG_CISS_SCSI_TAPE */ diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c new file mode 100644 index 0000000..5d39df1 --- /dev/null +++ b/drivers/block/cpqarray.c @@ -0,0 +1,1808 @@ +/* + * Disk Array driver for Compaq SMART2 Controllers + * Copyright 1998 Compaq Computer Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Questions/Comments/Bugfixes to iss_storagedev@hp.com + * + */ +#include <linux/module.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/bio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/major.h> +#include <linux/fs.h> +#include <linux/blkpg.h> +#include <linux/timer.h> +#include <linux/proc_fs.h> +#include <linux/init.h> +#include <linux/hdreg.h> +#include <linux/spinlock.h> +#include <linux/blkdev.h> +#include <linux/genhd.h> +#include <linux/scatterlist.h> +#include <asm/uaccess.h> +#include <asm/io.h> + + +#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) + +#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)" +#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0) + +/* Embedded module documentation macros - see modules.h */ +/* Original author Chris Frantz - Compaq Computer Corporation */ +MODULE_AUTHOR("Compaq Computer Corporation"); +MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0"); +MODULE_LICENSE("GPL"); + +#include "cpqarray.h" +#include "ida_cmd.h" +#include "smart1,2.h" +#include "ida_ioctl.h" + +#define READ_AHEAD 128 +#define NR_CMDS 128 /* This could probably go as high as ~400 */ + +#define MAX_CTLR 8 +#define CTLR_SHIFT 8 + +#define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */ + +static int nr_ctlr; +static ctlr_info_t *hba[MAX_CTLR]; + +static int eisa[8]; + +#define NR_PRODUCTS ARRAY_SIZE(products) + +/* board_id = Subsystem Device ID & Vendor ID + * product = Marketing Name for the board + * access = Address of the struct of function pointers + */ +static struct board_type products[] = { + { 0x0040110E, "IDA", &smart1_access }, + { 0x0140110E, "IDA-2", &smart1_access }, + { 0x1040110E, "IAES", &smart1_access }, + { 0x2040110E, "SMART", &smart1_access }, + { 0x3040110E, "SMART-2/E", &smart2e_access }, + { 0x40300E11, "SMART-2/P", &smart2_access }, + { 0x40310E11, "SMART-2SL", &smart2_access }, + { 0x40320E11, "Smart Array 3200", &smart2_access }, + { 0x40330E11, "Smart Array 3100ES", &smart2_access }, + { 0x40340E11, "Smart Array 221", &smart2_access }, + { 0x40400E11, "Integrated Array", &smart4_access }, + { 0x40480E11, "Compaq Raid LC2", &smart4_access }, + { 0x40500E11, "Smart Array 4200", &smart4_access }, + { 0x40510E11, "Smart Array 4250ES", &smart4_access }, + { 0x40580E11, "Smart Array 431", &smart4_access }, +}; + +/* define the PCI info for the PCI cards this driver can control */ +static const struct pci_device_id cpqarray_pci_device_id[] = +{ + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX, + 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */ + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX, + 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */ + { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX, + 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */ + { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510, + 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */ + { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510, + 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */ + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, + 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */ + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, + 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/ + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, + 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/ + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, + 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/ + { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, + 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */ + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id); + +static struct gendisk *ida_gendisk[MAX_CTLR][NWD]; + +/* Debug... */ +#define DBG(s) do { s } while(0) +/* Debug (general info)... */ +#define DBGINFO(s) do { } while(0) +/* Debug Paranoid... */ +#define DBGP(s) do { } while(0) +/* Debug Extra Paranoid... */ +#define DBGPX(s) do { } while(0) + +static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev); +static void __iomem *remap_pci_mem(ulong base, ulong size); +static int cpqarray_eisa_detect(void); +static int pollcomplete(int ctlr); +static void getgeometry(int ctlr); +static void start_fwbk(int ctlr); + +static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool); +static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool); + +static void free_hba(int i); +static int alloc_cpqarray_hba(void); + +static int sendcmd( + __u8 cmd, + int ctlr, + void *buff, + size_t size, + unsigned int blk, + unsigned int blkcnt, + unsigned int log_unit ); + +static int ida_open(struct block_device *bdev, fmode_t mode); +static int ida_release(struct gendisk *disk, fmode_t mode); +static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); +static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); +static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io); + +static void do_ida_request(struct request_queue *q); +static void start_io(ctlr_info_t *h); + +static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c); +static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c); +static inline void complete_command(cmdlist_t *cmd, int timeout); + +static irqreturn_t do_ida_intr(int irq, void *dev_id); +static void ida_timer(unsigned long tdata); +static int ida_revalidate(struct gendisk *disk); +static int revalidate_allvol(ctlr_info_t *host); +static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev); + +#ifdef CONFIG_PROC_FS +static void ida_procinit(int i); +static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data); +#else +static void ida_procinit(int i) {} +#endif + +static inline drv_info_t *get_drv(struct gendisk *disk) +{ + return disk->private_data; +} + +static inline ctlr_info_t *get_host(struct gendisk *disk) +{ + return disk->queue->queuedata; +} + + +static struct block_device_operations ida_fops = { + .owner = THIS_MODULE, + .open = ida_open, + .release = ida_release, + .locked_ioctl = ida_ioctl, + .getgeo = ida_getgeo, + .revalidate_disk= ida_revalidate, +}; + + +#ifdef CONFIG_PROC_FS + +static struct proc_dir_entry *proc_array; + +/* + * Get us a file in /proc/array that says something about each controller. + * Create /proc/array if it doesn't exist yet. + */ +static void __init ida_procinit(int i) +{ + if (proc_array == NULL) { + proc_array = proc_mkdir("driver/cpqarray", NULL); + if (!proc_array) return; + } + + create_proc_read_entry(hba[i]->devname, 0, proc_array, + ida_proc_get_info, hba[i]); +} + +/* + * Report information about this controller. + */ +static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) +{ + off_t pos = 0; + off_t len = 0; + int size, i, ctlr; + ctlr_info_t *h = (ctlr_info_t*)data; + drv_info_t *drv; +#ifdef CPQ_PROC_PRINT_QUEUES + cmdlist_t *c; + unsigned long flags; +#endif + + ctlr = h->ctlr; + size = sprintf(buffer, "%s: Compaq %s Controller\n" + " Board ID: 0x%08lx\n" + " Firmware Revision: %c%c%c%c\n" + " Controller Sig: 0x%08lx\n" + " Memory Address: 0x%08lx\n" + " I/O Port: 0x%04x\n" + " IRQ: %d\n" + " Logical drives: %d\n" + " Physical drives: %d\n\n" + " Current Q depth: %d\n" + " Max Q depth since init: %d\n\n", + h->devname, + h->product_name, + (unsigned long)h->board_id, + h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3], + (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr, + (unsigned int) h->io_mem_addr, (unsigned int)h->intr, + h->log_drives, h->phys_drives, + h->Qdepth, h->maxQsinceinit); + + pos += size; len += size; + + size = sprintf(buffer+len, "Logical Drive Info:\n"); + pos += size; len += size; + + for(i=0; i<h->log_drives; i++) { + drv = &h->drv[i]; + size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n", + ctlr, i, drv->blk_size, drv->nr_blks); + pos += size; len += size; + } + +#ifdef CPQ_PROC_PRINT_QUEUES + spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); + size = sprintf(buffer+len, "\nCurrent Queues:\n"); + pos += size; len += size; + + c = h->reqQ; + size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size; + if (c) c=c->next; + while(c && c != h->reqQ) { + size = sprintf(buffer+len, "->%p", c); + pos += size; len += size; + c=c->next; + } + + c = h->cmpQ; + size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size; + if (c) c=c->next; + while(c && c != h->cmpQ) { + size = sprintf(buffer+len, "->%p", c); + pos += size; len += size; + c=c->next; + } + + size = sprintf(buffer+len, "\n"); pos += size; len += size; + spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); +#endif + size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n", + h->nr_allocs, h->nr_frees); + pos += size; len += size; + + *eof = 1; + *start = buffer+offset; + len -= offset; + if (len>length) + len = length; + return len; +} +#endif /* CONFIG_PROC_FS */ + +module_param_array(eisa, int, NULL, 0); + +static void release_io_mem(ctlr_info_t *c) +{ + /* if IO mem was not protected do nothing */ + if( c->io_mem_addr == 0) + return; + release_region(c->io_mem_addr, c->io_mem_length); + c->io_mem_addr = 0; + c->io_mem_length = 0; +} + +static void __devexit cpqarray_remove_one(int i) +{ + int j; + char buff[4]; + + /* sendcmd will turn off interrupt, and send the flush... + * To write all data in the battery backed cache to disks + * no data returned, but don't want to send NULL to sendcmd */ + if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0)) + { + printk(KERN_WARNING "Unable to flush cache on controller %d\n", + i); + } + free_irq(hba[i]->intr, hba[i]); + iounmap(hba[i]->vaddr); + unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname); + del_timer(&hba[i]->timer); + remove_proc_entry(hba[i]->devname, proc_array); + pci_free_consistent(hba[i]->pci_dev, + NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool), + hba[i]->cmd_pool_dhandle); + kfree(hba[i]->cmd_pool_bits); + for(j = 0; j < NWD; j++) { + if (ida_gendisk[i][j]->flags & GENHD_FL_UP) + del_gendisk(ida_gendisk[i][j]); + put_disk(ida_gendisk[i][j]); + } + blk_cleanup_queue(hba[i]->queue); + release_io_mem(hba[i]); + free_hba(i); +} + +static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev) +{ + int i; + ctlr_info_t *tmp_ptr; + + if (pci_get_drvdata(pdev) == NULL) { + printk( KERN_ERR "cpqarray: Unable to remove device \n"); + return; + } + + tmp_ptr = pci_get_drvdata(pdev); + i = tmp_ptr->ctlr; + if (hba[i] == NULL) { + printk(KERN_ERR "cpqarray: controller %d appears to have" + "already been removed \n", i); + return; + } + pci_set_drvdata(pdev, NULL); + + cpqarray_remove_one(i); +} + +/* removing an instance that was not removed automatically.. + * must be an eisa card. + */ +static void __devexit cpqarray_remove_one_eisa (int i) +{ + if (hba[i] == NULL) { + printk(KERN_ERR "cpqarray: controller %d appears to have" + "already been removed \n", i); + return; + } + cpqarray_remove_one(i); +} + +/* pdev is NULL for eisa */ +static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) +{ + struct request_queue *q; + int j; + + /* + * register block devices + * Find disks and fill in structs + * Get an interrupt, set the Q depth and get into /proc + */ + + /* If this successful it should insure that we are the only */ + /* instance of the driver */ + if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) { + goto Enomem4; + } + hba[i]->access.set_intr_mask(hba[i], 0); + if (request_irq(hba[i]->intr, do_ida_intr, + IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i])) + { + printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n", + hba[i]->intr, hba[i]->devname); + goto Enomem3; + } + + for (j=0; j<NWD; j++) { + ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT); + if (!ida_gendisk[i][j]) + goto Enomem2; + } + + hba[i]->cmd_pool = pci_alloc_consistent( + hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t), + &(hba[i]->cmd_pool_dhandle)); + hba[i]->cmd_pool_bits = kcalloc( + DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long), + GFP_KERNEL); + + if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool) + goto Enomem1; + + memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t)); + printk(KERN_INFO "cpqarray: Finding drives on %s", + hba[i]->devname); + + spin_lock_init(&hba[i]->lock); + q = blk_init_queue(do_ida_request, &hba[i]->lock); + if (!q) + goto Enomem1; + + hba[i]->queue = q; + q->queuedata = hba[i]; + + getgeometry(i); + start_fwbk(i); + + ida_procinit(i); + + if (pdev) + blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); + + /* This is a hardware imposed limit. */ + blk_queue_max_hw_segments(q, SG_MAX); + + /* This is a driver limit and could be eliminated. */ + blk_queue_max_phys_segments(q, SG_MAX); + + init_timer(&hba[i]->timer); + hba[i]->timer.expires = jiffies + IDA_TIMER; + hba[i]->timer.data = (unsigned long)hba[i]; + hba[i]->timer.function = ida_timer; + add_timer(&hba[i]->timer); + + /* Enable IRQ now that spinlock and rate limit timer are set up */ + hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY); + + for(j=0; j<NWD; j++) { + struct gendisk *disk = ida_gendisk[i][j]; + drv_info_t *drv = &hba[i]->drv[j]; + sprintf(disk->disk_name, "ida/c%dd%d", i, j); + disk->major = COMPAQ_SMART2_MAJOR + i; + disk->first_minor = j<<NWD_SHIFT; + disk->fops = &ida_fops; + if (j && !drv->nr_blks) + continue; + blk_queue_hardsect_size(hba[i]->queue, drv->blk_size); + set_capacity(disk, drv->nr_blks); + disk->queue = hba[i]->queue; + disk->private_data = drv; + add_disk(disk); + } + + /* done ! */ + return(i); + +Enomem1: + nr_ctlr = i; + kfree(hba[i]->cmd_pool_bits); + if (hba[i]->cmd_pool) + pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t), + hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); +Enomem2: + while (j--) { + put_disk(ida_gendisk[i][j]); + ida_gendisk[i][j] = NULL; + } + free_irq(hba[i]->intr, hba[i]); +Enomem3: + unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname); +Enomem4: + if (pdev) + pci_set_drvdata(pdev, NULL); + release_io_mem(hba[i]); + free_hba(i); + + printk( KERN_ERR "cpqarray: out of memory"); + + return -1; +} + +static int __init cpqarray_init_one( struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int i; + + printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at" + " bus %d dev %d func %d\n", + pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + i = alloc_cpqarray_hba(); + if( i < 0 ) + return (-1); + memset(hba[i], 0, sizeof(ctlr_info_t)); + sprintf(hba[i]->devname, "ida%d", i); + hba[i]->ctlr = i; + /* Initialize the pdev driver private data */ + pci_set_drvdata(pdev, hba[i]); + + if (cpqarray_pci_init(hba[i], pdev) != 0) { + pci_set_drvdata(pdev, NULL); + release_io_mem(hba[i]); + free_hba(i); + return -1; + } + + return (cpqarray_register_ctlr(i, pdev)); +} + +static struct pci_driver cpqarray_pci_driver = { + .name = "cpqarray", + .probe = cpqarray_init_one, + .remove = __devexit_p(cpqarray_remove_one_pci), + .id_table = cpqarray_pci_device_id, +}; + +/* + * This is it. Find all the controllers and register them. + * returns the number of block devices registered. + */ +static int __init cpqarray_init(void) +{ + int num_cntlrs_reg = 0; + int i; + int rc = 0; + + /* detect controllers */ + printk(DRIVER_NAME "\n"); + + rc = pci_register_driver(&cpqarray_pci_driver); + if (rc) + return rc; + cpqarray_eisa_detect(); + + for (i=0; i < MAX_CTLR; i++) { + if (hba[i] != NULL) + num_cntlrs_reg++; + } + + if (num_cntlrs_reg) + return 0; + else { + pci_unregister_driver(&cpqarray_pci_driver); + return -ENODEV; + } +} + +/* Function to find the first free pointer into our hba[] array */ +/* Returns -1 if no free entries are left. */ +static int alloc_cpqarray_hba(void) +{ + int i; + + for(i=0; i< MAX_CTLR; i++) { + if (hba[i] == NULL) { + hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL); + if(hba[i]==NULL) { + printk(KERN_ERR "cpqarray: out of memory.\n"); + return (-1); + } + return (i); + } + } + printk(KERN_WARNING "cpqarray: This driver supports a maximum" + " of 8 controllers.\n"); + return(-1); +} + +static void free_hba(int i) +{ + kfree(hba[i]); + hba[i]=NULL; +} + +/* + * Find the IO address of the controller, its IRQ and so forth. Fill + * in some basic stuff into the ctlr_info_t structure. + */ +static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev) +{ + ushort vendor_id, device_id, command; + unchar cache_line_size, latency_timer; + unchar irq, revision; + unsigned long addr[6]; + __u32 board_id; + + int i; + + c->pci_dev = pdev; + if (pci_enable_device(pdev)) { + printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n"); + return -1; + } + vendor_id = pdev->vendor; + device_id = pdev->device; + irq = pdev->irq; + + for(i=0; i<6; i++) + addr[i] = pci_resource_start(pdev, i); + + if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0) + { + printk(KERN_ERR "cpqarray: Unable to set DMA mask\n"); + return -1; + } + + pci_read_config_word(pdev, PCI_COMMAND, &command); + pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size); + pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer); + + pci_read_config_dword(pdev, 0x2c, &board_id); + + /* check to see if controller has been disabled */ + if(!(command & 0x02)) { + printk(KERN_WARNING + "cpqarray: controller appears to be disabled\n"); + return(-1); + } + +DBGINFO( + printk("vendor_id = %x\n", vendor_id); + printk("device_id = %x\n", device_id); + printk("command = %x\n", command); + for(i=0; i<6; i++) + printk("addr[%d] = %lx\n", i, addr[i]); + printk("revision = %x\n", revision); + printk("irq = %x\n", irq); + printk("cache_line_size = %x\n", cache_line_size); + printk("latency_timer = %x\n", latency_timer); + printk("board_id = %x\n", board_id); +); + + c->intr = irq; + + for(i=0; i<6; i++) { + if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) + { /* IO space */ + c->io_mem_addr = addr[i]; + c->io_mem_length = pci_resource_end(pdev, i) + - pci_resource_start(pdev, i) + 1; + if(!request_region( c->io_mem_addr, c->io_mem_length, + "cpqarray")) + { + printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length); + c->io_mem_addr = 0; + c->io_mem_length = 0; + } + break; + } + } + + c->paddr = 0; + for(i=0; i<6; i++) + if (!(pci_resource_flags(pdev, i) & + PCI_BASE_ADDRESS_SPACE_IO)) { + c->paddr = pci_resource_start (pdev, i); + break; + } + if (!c->paddr) + return -1; + c->vaddr = remap_pci_mem(c->paddr, 128); + if (!c->vaddr) + return -1; + c->board_id = board_id; + + for(i=0; i<NR_PRODUCTS; i++) { + if (board_id == products[i].board_id) { + c->product_name = products[i].product_name; + c->access = *(products[i].access); + break; + } + } + if (i == NR_PRODUCTS) { + printk(KERN_WARNING "cpqarray: Sorry, I don't know how" + " to access the SMART Array controller %08lx\n", + (unsigned long)board_id); + return -1; + } + + return 0; +} + +/* + * Map (physical) PCI mem into (virtual) kernel space + */ +static void __iomem *remap_pci_mem(ulong base, ulong size) +{ + ulong page_base = ((ulong) base) & PAGE_MASK; + ulong page_offs = ((ulong) base) - page_base; + void __iomem *page_remapped = ioremap(page_base, page_offs+size); + + return (page_remapped ? (page_remapped + page_offs) : NULL); +} + +#ifndef MODULE +/* + * Config string is a comma separated set of i/o addresses of EISA cards. + */ +static int cpqarray_setup(char *str) +{ + int i, ints[9]; + + (void)get_options(str, ARRAY_SIZE(ints), ints); + + for(i=0; i<ints[0] && i<8; i++) + eisa[i] = ints[i+1]; + return 1; +} + +__setup("smart2=", cpqarray_setup); + +#endif + +/* + * Find an EISA controller's signature. Set up an hba if we find it. + */ +static int __init cpqarray_eisa_detect(void) +{ + int i=0, j; + __u32 board_id; + int intr; + int ctlr; + int num_ctlr = 0; + + while(i<8 && eisa[i]) { + ctlr = alloc_cpqarray_hba(); + if(ctlr == -1) + break; + board_id = inl(eisa[i]+0xC80); + for(j=0; j < NR_PRODUCTS; j++) + if (board_id == products[j].board_id) + break; + + if (j == NR_PRODUCTS) { + printk(KERN_WARNING "cpqarray: Sorry, I don't know how" + " to access the SMART Array controller %08lx\n", (unsigned long)board_id); + continue; + } + + memset(hba[ctlr], 0, sizeof(ctlr_info_t)); + hba[ctlr]->io_mem_addr = eisa[i]; + hba[ctlr]->io_mem_length = 0x7FF; + if(!request_region(hba[ctlr]->io_mem_addr, + hba[ctlr]->io_mem_length, + "cpqarray")) + { + printk(KERN_WARNING "cpqarray: I/O range already in " + "use addr = %lx length = %ld\n", + hba[ctlr]->io_mem_addr, + hba[ctlr]->io_mem_length); + free_hba(ctlr); + continue; + } + + /* + * Read the config register to find our interrupt + */ + intr = inb(eisa[i]+0xCC0) >> 4; + if (intr & 1) intr = 11; + else if (intr & 2) intr = 10; + else if (intr & 4) intr = 14; + else if (intr & 8) intr = 15; + + hba[ctlr]->intr = intr; + sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr); + hba[ctlr]->product_name = products[j].product_name; + hba[ctlr]->access = *(products[j].access); + hba[ctlr]->ctlr = ctlr; + hba[ctlr]->board_id = board_id; + hba[ctlr]->pci_dev = NULL; /* not PCI */ + +DBGINFO( + printk("i = %d, j = %d\n", i, j); + printk("irq = %x\n", intr); + printk("product name = %s\n", products[j].product_name); + printk("board_id = %x\n", board_id); +); + + num_ctlr++; + i++; + + if (cpqarray_register_ctlr(ctlr, NULL) == -1) + printk(KERN_WARNING + "cpqarray: Can't register EISA controller %d\n", + ctlr); + + } + + return num_ctlr; +} + +/* + * Open. Make sure the device is really there. + */ +static int ida_open(struct block_device *bdev, fmode_t mode) +{ + drv_info_t *drv = get_drv(bdev->bd_disk); + ctlr_info_t *host = get_host(bdev->bd_disk); + + DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name)); + /* + * Root is allowed to open raw volume zero even if it's not configured + * so array config can still work. I don't think I really like this, + * but I'm already using way to many device nodes to claim another one + * for "raw controller". + */ + if (!drv->nr_blks) { + if (!capable(CAP_SYS_RAWIO)) + return -ENXIO; + if (!capable(CAP_SYS_ADMIN) && drv != host->drv) + return -ENXIO; + } + host->usage_count++; + return 0; +} + +/* + * Close. Sync first. + */ +static int ida_release(struct gendisk *disk, fmode_t mode) +{ + ctlr_info_t *host = get_host(disk); + host->usage_count--; + return 0; +} + +/* + * Enqueuing and dequeuing functions for cmdlists. + */ +static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c) +{ + if (*Qptr == NULL) { + *Qptr = c; + c->next = c->prev = c; + } else { + c->prev = (*Qptr)->prev; + c->next = (*Qptr); + (*Qptr)->prev->next = c; + (*Qptr)->prev = c; + } +} + +static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c) +{ + if (c && c->next != c) { + if (*Qptr == c) *Qptr = c->next; + c->prev->next = c->next; + c->next->prev = c->prev; + } else { + *Qptr = NULL; + } + return c; +} + +/* + * Get a request and submit it to the controller. + * This routine needs to grab all the requests it possibly can from the + * req Q and submit them. Interrupts are off (and need to be off) when you + * are in here (either via the dummy do_ida_request functions or by being + * called from the interrupt handler + */ +static void do_ida_request(struct request_queue *q) +{ + ctlr_info_t *h = q->queuedata; + cmdlist_t *c; + struct request *creq; + struct scatterlist tmp_sg[SG_MAX]; + int i, dir, seg; + + if (blk_queue_plugged(q)) + goto startio; + +queue_next: + creq = elv_next_request(q); + if (!creq) + goto startio; + + BUG_ON(creq->nr_phys_segments > SG_MAX); + + if ((c = cmd_alloc(h,1)) == NULL) + goto startio; + + blkdev_dequeue_request(creq); + + c->ctlr = h->ctlr; + c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv; + c->hdr.size = sizeof(rblk_t) >> 2; + c->size += sizeof(rblk_t); + + c->req.hdr.blk = creq->sector; + c->rq = creq; +DBGPX( + printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); +); + sg_init_table(tmp_sg, SG_MAX); + seg = blk_rq_map_sg(q, creq, tmp_sg); + + /* Now do all the DMA Mappings */ + if (rq_data_dir(creq) == READ) + dir = PCI_DMA_FROMDEVICE; + else + dir = PCI_DMA_TODEVICE; + for( i=0; i < seg; i++) + { + c->req.sg[i].size = tmp_sg[i].length; + c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev, + sg_page(&tmp_sg[i]), + tmp_sg[i].offset, + tmp_sg[i].length, dir); + } +DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); ); + c->req.hdr.sg_cnt = seg; + c->req.hdr.blk_cnt = creq->nr_sectors; + c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE; + c->type = CMD_RWREQ; + + /* Put the request on the tail of the request queue */ + addQ(&h->reqQ, c); + h->Qdepth++; + if (h->Qdepth > h->maxQsinceinit) + h->maxQsinceinit = h->Qdepth; + + goto queue_next; + +startio: + start_io(h); +} + +/* + * start_io submits everything on a controller's request queue + * and moves it to the completion queue. + * + * Interrupts had better be off if you're in here + */ +static void start_io(ctlr_info_t *h) +{ + cmdlist_t *c; + + while((c = h->reqQ) != NULL) { + /* Can't do anything if we're busy */ + if (h->access.fifo_full(h) == 0) + return; + + /* Get the first entry from the request Q */ + removeQ(&h->reqQ, c); + h->Qdepth--; + + /* Tell the controller to do our bidding */ + h->access.submit_command(h, c); + + /* Get onto the completion Q */ + addQ(&h->cmpQ, c); + } +} + +/* + * Mark all buffers that cmd was responsible for + */ +static inline void complete_command(cmdlist_t *cmd, int timeout) +{ + struct request *rq = cmd->rq; + int error = 0; + int i, ddir; + + if (cmd->req.hdr.rcode & RCODE_NONFATAL && + (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) { + printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n", + cmd->ctlr, cmd->hdr.unit); + hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN; + } + if (cmd->req.hdr.rcode & RCODE_FATAL) { + printk(KERN_WARNING "Fatal error on ida/c%dd%d\n", + cmd->ctlr, cmd->hdr.unit); + error = -EIO; + } + if (cmd->req.hdr.rcode & RCODE_INVREQ) { + printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n", + cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd, + cmd->req.hdr.blk, cmd->req.hdr.blk_cnt, + cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode); + error = -EIO; + } + if (timeout) + error = -EIO; + /* unmap the DMA mapping for all the scatter gather elements */ + if (cmd->req.hdr.cmd == IDA_READ) + ddir = PCI_DMA_FROMDEVICE; + else + ddir = PCI_DMA_TODEVICE; + for(i=0; i<cmd->req.hdr.sg_cnt; i++) + pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr, + cmd->req.sg[i].size, ddir); + + DBGPX(printk("Done with %p\n", rq);); + if (__blk_end_request(rq, error, blk_rq_bytes(rq))) + BUG(); +} + +/* + * The controller will interrupt us upon completion of commands. + * Find the command on the completion queue, remove it, tell the OS and + * try to queue up more IO + */ +static irqreturn_t do_ida_intr(int irq, void *dev_id) +{ + ctlr_info_t *h = dev_id; + cmdlist_t *c; + unsigned long istat; + unsigned long flags; + __u32 a,a1; + + istat = h->access.intr_pending(h); + /* Is this interrupt for us? */ + if (istat == 0) + return IRQ_NONE; + + /* + * If there are completed commands in the completion queue, + * we had better do something about it. + */ + spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); + if (istat & FIFO_NOT_EMPTY) { + while((a = h->access.command_completed(h))) { + a1 = a; a &= ~3; + if ((c = h->cmpQ) == NULL) + { + printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1); + continue; + } + while(c->busaddr != a) { + c = c->next; + if (c == h->cmpQ) + break; + } + /* + * If we've found the command, take it off the + * completion Q and free it + */ + if (c->busaddr == a) { + removeQ(&h->cmpQ, c); + /* Check for invalid command. + * Controller returns command error, + * But rcode = 0. + */ + + if((a1 & 0x03) && (c->req.hdr.rcode == 0)) + { + c->req.hdr.rcode = RCODE_INVREQ; + } + if (c->type == CMD_RWREQ) { + complete_command(c, 0); + cmd_free(h, c, 1); + } else if (c->type == CMD_IOCTL_PEND) { + c->type = CMD_IOCTL_DONE; + } + continue; + } + } + } + + /* + * See if we can queue up some more IO + */ + do_ida_request(h->queue); + spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); + return IRQ_HANDLED; +} + +/* + * This timer was for timing out requests that haven't happened after + * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to + * reset a flags structure so we don't flood the user with + * "Non-Fatal error" messages. + */ +static void ida_timer(unsigned long tdata) +{ + ctlr_info_t *h = (ctlr_info_t*)tdata; + + h->timer.expires = jiffies + IDA_TIMER; + add_timer(&h->timer); + h->misc_tflags = 0; +} + +static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + drv_info_t *drv = get_drv(bdev->bd_disk); + + if (drv->cylinders) { + geo->heads = drv->heads; + geo->sectors = drv->sectors; + geo->cylinders = drv->cylinders; + } else { + geo->heads = 0xff; + geo->sectors = 0x3f; + geo->cylinders = drv->nr_blks / (0xff*0x3f); + } + + return 0; +} + +/* + * ida_ioctl does some miscellaneous stuff like reporting drive geometry, + * setting readahead and submitting commands from userspace to the controller. + */ +static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) +{ + drv_info_t *drv = get_drv(bdev->bd_disk); + ctlr_info_t *host = get_host(bdev->bd_disk); + int error; + ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg; + ida_ioctl_t *my_io; + + switch(cmd) { + case IDAGETDRVINFO: + if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t))) + return -EFAULT; + return 0; + case IDAPASSTHRU: + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL); + if (!my_io) + return -ENOMEM; + error = -EFAULT; + if (copy_from_user(my_io, io, sizeof(*my_io))) + goto out_passthru; + error = ida_ctlr_ioctl(host, drv - host->drv, my_io); + if (error) + goto out_passthru; + error = -EFAULT; + if (copy_to_user(io, my_io, sizeof(*my_io))) + goto out_passthru; + error = 0; +out_passthru: + kfree(my_io); + return error; + case IDAGETCTLRSIG: + if (!arg) return -EINVAL; + put_user(host->ctlr_sig, (int __user *)arg); + return 0; + case IDAREVALIDATEVOLS: + if (MINOR(bdev->bd_dev) != 0) + return -ENXIO; + return revalidate_allvol(host); + case IDADRIVERVERSION: + if (!arg) return -EINVAL; + put_user(DRIVER_VERSION, (unsigned long __user *)arg); + return 0; + case IDAGETPCIINFO: + { + + ida_pci_info_struct pciinfo; + + if (!arg) return -EINVAL; + pciinfo.bus = host->pci_dev->bus->number; + pciinfo.dev_fn = host->pci_dev->devfn; + pciinfo.board_id = host->board_id; + if(copy_to_user((void __user *) arg, &pciinfo, + sizeof( ida_pci_info_struct))) + return -EFAULT; + return(0); + } + + default: + return -EINVAL; + } + +} +/* + * ida_ctlr_ioctl is for passing commands to the controller from userspace. + * The command block (io) has already been copied to kernel space for us, + * however, any elements in the sglist need to be copied to kernel space + * or copied back to userspace. + * + * Only root may perform a controller passthru command, however I'm not doing + * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and + * putting a 64M buffer in the sglist is probably a *bad* idea. + */ +static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io) +{ + int ctlr = h->ctlr; + cmdlist_t *c; + void *p = NULL; + unsigned long flags; + int error; + + if ((c = cmd_alloc(h, 0)) == NULL) + return -ENOMEM; + c->ctlr = ctlr; + c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk; + c->hdr.size = sizeof(rblk_t) >> 2; + c->size += sizeof(rblk_t); + + c->req.hdr.cmd = io->cmd; + c->req.hdr.blk = io->blk; + c->req.hdr.blk_cnt = io->blk_cnt; + c->type = CMD_IOCTL_PEND; + + /* Pre submit processing */ + switch(io->cmd) { + case PASSTHRU_A: + p = kmalloc(io->sg[0].size, GFP_KERNEL); + if (!p) + { + error = -ENOMEM; + cmd_free(h, c, 0); + return(error); + } + if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) { + kfree(p); + cmd_free(h, c, 0); + return -EFAULT; + } + c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), + sizeof(ida_ioctl_t), + PCI_DMA_BIDIRECTIONAL); + c->req.sg[0].size = io->sg[0].size; + c->req.sg[0].addr = pci_map_single(h->pci_dev, p, + c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); + c->req.hdr.sg_cnt = 1; + break; + case IDA_READ: + case READ_FLASH_ROM: + case SENSE_CONTROLLER_PERFORMANCE: + p = kmalloc(io->sg[0].size, GFP_KERNEL); + if (!p) + { + error = -ENOMEM; + cmd_free(h, c, 0); + return(error); + } + + c->req.sg[0].size = io->sg[0].size; + c->req.sg[0].addr = pci_map_single(h->pci_dev, p, + c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); + c->req.hdr.sg_cnt = 1; + break; + case IDA_WRITE: + case IDA_WRITE_MEDIA: + case DIAG_PASS_THRU: + case COLLECT_BUFFER: + case WRITE_FLASH_ROM: + p = kmalloc(io->sg[0].size, GFP_KERNEL); + if (!p) + { + error = -ENOMEM; + cmd_free(h, c, 0); + return(error); + } + if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) { + kfree(p); + cmd_free(h, c, 0); + return -EFAULT; + } + c->req.sg[0].size = io->sg[0].size; + c->req.sg[0].addr = pci_map_single(h->pci_dev, p, + c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); + c->req.hdr.sg_cnt = 1; + break; + default: + c->req.sg[0].size = sizeof(io->c); + c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c, + c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); + c->req.hdr.sg_cnt = 1; + } + + /* Put the request on the tail of the request queue */ + spin_lock_irqsave(IDA_LOCK(ctlr), flags); + addQ(&h->reqQ, c); + h->Qdepth++; + start_io(h); + spin_unlock_irqrestore(IDA_LOCK(ctlr), flags); + + /* Wait for completion */ + while(c->type != CMD_IOCTL_DONE) + schedule(); + + /* Unmap the DMA */ + pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size, + PCI_DMA_BIDIRECTIONAL); + /* Post submit processing */ + switch(io->cmd) { + case PASSTHRU_A: + pci_unmap_single(h->pci_dev, c->req.hdr.blk, + sizeof(ida_ioctl_t), + PCI_DMA_BIDIRECTIONAL); + case IDA_READ: + case DIAG_PASS_THRU: + case SENSE_CONTROLLER_PERFORMANCE: + case READ_FLASH_ROM: + if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) { + kfree(p); + return -EFAULT; + } + /* fall through and free p */ + case IDA_WRITE: + case IDA_WRITE_MEDIA: + case COLLECT_BUFFER: + case WRITE_FLASH_ROM: + kfree(p); + break; + default:; + /* Nothing to do */ + } + + io->rcode = c->req.hdr.rcode; + cmd_free(h, c, 0); + return(0); +} + +/* + * Commands are pre-allocated in a large block. Here we use a simple bitmap + * scheme to suballocte them to the driver. Operations that are not time + * critical (and can wait for kmalloc and possibly sleep) can pass in NULL + * as the first argument to get a new command. + */ +static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool) +{ + cmdlist_t * c; + int i; + dma_addr_t cmd_dhandle; + + if (!get_from_pool) { + c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev, + sizeof(cmdlist_t), &cmd_dhandle); + if(c==NULL) + return NULL; + } else { + do { + i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS); + if (i == NR_CMDS) + return NULL; + } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0); + c = h->cmd_pool + i; + cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t); + h->nr_allocs++; + } + + memset(c, 0, sizeof(cmdlist_t)); + c->busaddr = cmd_dhandle; + return c; +} + +static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool) +{ + int i; + + if (!got_from_pool) { + pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c, + c->busaddr); + } else { + i = c - h->cmd_pool; + clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)); + h->nr_frees++; + } +} + +/*********************************************************************** + name: sendcmd + Send a command to an IDA using the memory mapped FIFO interface + and wait for it to complete. + This routine should only be called at init time. +***********************************************************************/ +static int sendcmd( + __u8 cmd, + int ctlr, + void *buff, + size_t size, + unsigned int blk, + unsigned int blkcnt, + unsigned int log_unit ) +{ + cmdlist_t *c; + int complete; + unsigned long temp; + unsigned long i; + ctlr_info_t *info_p = hba[ctlr]; + + c = cmd_alloc(info_p, 1); + if(!c) + return IO_ERROR; + c->ctlr = ctlr; + c->hdr.unit = log_unit; + c->hdr.prio = 0; + c->hdr.size = sizeof(rblk_t) >> 2; + c->size += sizeof(rblk_t); + + /* The request information. */ + c->req.hdr.next = 0; + c->req.hdr.rcode = 0; + c->req.bp = 0; + c->req.hdr.sg_cnt = 1; + c->req.hdr.reserved = 0; + + if (size == 0) + c->req.sg[0].size = 512; + else + c->req.sg[0].size = size; + + c->req.hdr.blk = blk; + c->req.hdr.blk_cnt = blkcnt; + c->req.hdr.cmd = (unsigned char) cmd; + c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev, + buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); + /* + * Disable interrupt + */ + info_p->access.set_intr_mask(info_p, 0); + /* Make sure there is room in the command FIFO */ + /* Actually it should be completely empty at this time. */ + for (i = 200000; i > 0; i--) { + temp = info_p->access.fifo_full(info_p); + if (temp != 0) { + break; + } + udelay(10); +DBG( + printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full," + " waiting!\n", ctlr); +); + } + /* + * Send the cmd + */ + info_p->access.submit_command(info_p, c); + complete = pollcomplete(ctlr); + + pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, + c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); + if (complete != 1) { + if (complete != c->busaddr) { + printk( KERN_WARNING + "cpqarray ida%d: idaSendPciCmd " + "Invalid command list address returned! (%08lx)\n", + ctlr, (unsigned long)complete); + cmd_free(info_p, c, 1); + return (IO_ERROR); + } + } else { + printk( KERN_WARNING + "cpqarray ida%d: idaSendPciCmd Timeout out, " + "No command list address returned!\n", + ctlr); + cmd_free(info_p, c, 1); + return (IO_ERROR); + } + + if (c->req.hdr.rcode & 0x00FE) { + if (!(c->req.hdr.rcode & BIG_PROBLEM)) { + printk( KERN_WARNING + "cpqarray ida%d: idaSendPciCmd, error: " + "Controller failed at init time " + "cmd: 0x%x, return code = 0x%x\n", + ctlr, c->req.hdr.cmd, c->req.hdr.rcode); + + cmd_free(info_p, c, 1); + return (IO_ERROR); + } + } + cmd_free(info_p, c, 1); + return (IO_OK); +} + +/* + * revalidate_allvol is for online array config utilities. After a + * utility reconfigures the drives in the array, it can use this function + * (through an ioctl) to make the driver zap any previous disk structs for + * that controller and get new ones. + * + * Right now I'm using the getgeometry() function to do this, but this + * function should probably be finer grained and allow you to revalidate one + * particualar logical volume (instead of all of them on a particular + * controller). + */ +static int revalidate_allvol(ctlr_info_t *host) +{ + int ctlr = host->ctlr; + int i; + unsigned long flags; + + spin_lock_irqsave(IDA_LOCK(ctlr), flags); + if (host->usage_count > 1) { + spin_unlock_irqrestore(IDA_LOCK(ctlr), flags); + printk(KERN_WARNING "cpqarray: Device busy for volume" + " revalidation (usage=%d)\n", host->usage_count); + return -EBUSY; + } + host->usage_count++; + spin_unlock_irqrestore(IDA_LOCK(ctlr), flags); + + /* + * Set the partition and block size structures for all volumes + * on this controller to zero. We will reread all of this data + */ + set_capacity(ida_gendisk[ctlr][0], 0); + for (i = 1; i < NWD; i++) { + struct gendisk *disk = ida_gendisk[ctlr][i]; + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + } + memset(host->drv, 0, sizeof(drv_info_t)*NWD); + + /* + * Tell the array controller not to give us any interrupts while + * we check the new geometry. Then turn interrupts back on when + * we're done. + */ + host->access.set_intr_mask(host, 0); + getgeometry(ctlr); + host->access.set_intr_mask(host, FIFO_NOT_EMPTY); + + for(i=0; i<NWD; i++) { + struct gendisk *disk = ida_gendisk[ctlr][i]; + drv_info_t *drv = &host->drv[i]; + if (i && !drv->nr_blks) + continue; + blk_queue_hardsect_size(host->queue, drv->blk_size); + set_capacity(disk, drv->nr_blks); + disk->queue = host->queue; + disk->private_data = drv; + if (i) + add_disk(disk); + } + + host->usage_count--; + return 0; +} + +static int ida_revalidate(struct gendisk *disk) +{ + drv_info_t *drv = disk->private_data; + set_capacity(disk, drv->nr_blks); + return 0; +} + +/******************************************************************** + name: pollcomplete + Wait polling for a command to complete. + The memory mapped FIFO is polled for the completion. + Used only at init time, interrupts disabled. + ********************************************************************/ +static int pollcomplete(int ctlr) +{ + int done; + int i; + + /* Wait (up to 2 seconds) for a command to complete */ + + for (i = 200000; i > 0; i--) { + done = hba[ctlr]->access.command_completed(hba[ctlr]); + if (done == 0) { + udelay(10); /* a short fixed delay */ + } else + return (done); + } + /* Invalid address to tell caller we ran out of time */ + return 1; +} +/***************************************************************** + start_fwbk + Starts controller firmwares background processing. + Currently only the Integrated Raid controller needs this done. + If the PCI mem address registers are written to after this, + data corruption may occur +*****************************************************************/ +static void start_fwbk(int ctlr) +{ + id_ctlr_t *id_ctlr_buf; + int ret_code; + + if( (hba[ctlr]->board_id != 0x40400E11) + && (hba[ctlr]->board_id != 0x40480E11) ) + + /* Not a Integrated Raid, so there is nothing for us to do */ + return; + printk(KERN_DEBUG "cpqarray: Starting firmware's background" + " processing\n"); + /* Command does not return anything, but idasend command needs a + buffer */ + id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL); + if(id_ctlr_buf==NULL) + { + printk(KERN_WARNING "cpqarray: Out of memory. " + "Unable to start background processing.\n"); + return; + } + ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr, + id_ctlr_buf, 0, 0, 0, 0); + if(ret_code != IO_OK) + printk(KERN_WARNING "cpqarray: Unable to start" + " background processing\n"); + + kfree(id_ctlr_buf); +} +/***************************************************************** + getgeometry + Get ida logical volume geometry from the controller + This is a large bit of code which once existed in two flavors, + It is used only at init time. +*****************************************************************/ +static void getgeometry(int ctlr) +{ + id_log_drv_t *id_ldrive; + id_ctlr_t *id_ctlr_buf; + sense_log_drv_stat_t *id_lstatus_buf; + config_t *sense_config_buf; + unsigned int log_unit, log_index; + int ret_code, size; + drv_info_t *drv; + ctlr_info_t *info_p = hba[ctlr]; + int i; + + info_p->log_drv_map = 0; + + id_ldrive = kzalloc(sizeof(id_log_drv_t), GFP_KERNEL); + if (!id_ldrive) { + printk( KERN_ERR "cpqarray: out of memory.\n"); + goto err_0; + } + + id_ctlr_buf = kzalloc(sizeof(id_ctlr_t), GFP_KERNEL); + if (!id_ctlr_buf) { + printk( KERN_ERR "cpqarray: out of memory.\n"); + goto err_1; + } + + id_lstatus_buf = kzalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL); + if (!id_lstatus_buf) { + printk( KERN_ERR "cpqarray: out of memory.\n"); + goto err_2; + } + + sense_config_buf = kzalloc(sizeof(config_t), GFP_KERNEL); + if (!sense_config_buf) { + printk( KERN_ERR "cpqarray: out of memory.\n"); + goto err_3; + } + + info_p->phys_drives = 0; + info_p->log_drv_map = 0; + info_p->drv_assign_map = 0; + info_p->drv_spare_map = 0; + info_p->mp_failed_drv_map = 0; /* only initialized here */ + /* Get controllers info for this logical drive */ + ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0); + if (ret_code == IO_ERROR) { + /* + * If can't get controller info, set the logical drive map to 0, + * so the idastubopen will fail on all logical drives + * on the controller. + */ + printk(KERN_ERR "cpqarray: error sending ID controller\n"); + goto err_4; + } + + info_p->log_drives = id_ctlr_buf->nr_drvs; + for(i=0;i<4;i++) + info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i]; + info_p->ctlr_sig = id_ctlr_buf->cfg_sig; + + printk(" (%s)\n", info_p->product_name); + /* + * Initialize logical drive map to zero + */ + log_index = 0; + /* + * Get drive geometry for all logical drives + */ + if (id_ctlr_buf->nr_drvs > 16) + printk(KERN_WARNING "cpqarray ida%d: This driver supports " + "16 logical drives per controller.\n. " + " Additional drives will not be " + "detected\n", ctlr); + + for (log_unit = 0; + (log_index < id_ctlr_buf->nr_drvs) + && (log_unit < NWD); + log_unit++) { + size = sizeof(sense_log_drv_stat_t); + + /* + Send "Identify logical drive status" cmd + */ + ret_code = sendcmd(SENSE_LOG_DRV_STAT, + ctlr, id_lstatus_buf, size, 0, 0, log_unit); + if (ret_code == IO_ERROR) { + /* + If can't get logical drive status, set + the logical drive map to 0, so the + idastubopen will fail for all logical drives + on the controller. + */ + info_p->log_drv_map = 0; + printk( KERN_WARNING + "cpqarray ida%d: idaGetGeometry - Controller" + " failed to report status of logical drive %d\n" + "Access to this controller has been disabled\n", + ctlr, log_unit); + goto err_4; + } + /* + Make sure the logical drive is configured + */ + if (id_lstatus_buf->status != LOG_NOT_CONF) { + ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive, + sizeof(id_log_drv_t), 0, 0, log_unit); + /* + If error, the bit for this + logical drive won't be set and + idastubopen will return error. + */ + if (ret_code != IO_ERROR) { + drv = &info_p->drv[log_unit]; + drv->blk_size = id_ldrive->blk_size; + drv->nr_blks = id_ldrive->nr_blks; + drv->cylinders = id_ldrive->drv.cyl; + drv->heads = id_ldrive->drv.heads; + drv->sectors = id_ldrive->drv.sect_per_track; + info_p->log_drv_map |= (1 << log_unit); + + printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n", + ctlr, log_unit, drv->blk_size, drv->nr_blks); + ret_code = sendcmd(SENSE_CONFIG, + ctlr, sense_config_buf, + sizeof(config_t), 0, 0, log_unit); + if (ret_code == IO_ERROR) { + info_p->log_drv_map = 0; + printk(KERN_ERR "cpqarray: error sending sense config\n"); + goto err_4; + } + + info_p->phys_drives = + sense_config_buf->ctlr_phys_drv; + info_p->drv_assign_map + |= sense_config_buf->drv_asgn_map; + info_p->drv_assign_map + |= sense_config_buf->spare_asgn_map; + info_p->drv_spare_map + |= sense_config_buf->spare_asgn_map; + } /* end of if no error on id_ldrive */ + log_index = log_index + 1; + } /* end of if logical drive configured */ + } /* end of for log_unit */ + + /* Free all the buffers and return */ +err_4: + kfree(sense_config_buf); +err_3: + kfree(id_lstatus_buf); +err_2: + kfree(id_ctlr_buf); +err_1: + kfree(id_ldrive); +err_0: + return; +} + +static void __exit cpqarray_exit(void) +{ + int i; + + pci_unregister_driver(&cpqarray_pci_driver); + + /* Double check that all controller entries have been removed */ + for(i=0; i<MAX_CTLR; i++) { + if (hba[i] != NULL) { + printk(KERN_WARNING "cpqarray: Removing EISA " + "controller %d\n", i); + cpqarray_remove_one_eisa(i); + } + } + + remove_proc_entry("driver/cpqarray", NULL); +} + +module_init(cpqarray_init) +module_exit(cpqarray_exit) diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h new file mode 100644 index 0000000..be73e9d --- /dev/null +++ b/drivers/block/cpqarray.h @@ -0,0 +1,126 @@ +/* + * Disk Array driver for Compaq SMART2 Controllers + * Copyright 1998 Compaq Computer Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Questions/Comments/Bugfixes to iss_storagedev@hp.com + * + * If you want to make changes, improve or add functionality to this + * driver, you'll probably need the Compaq Array Controller Interface + * Specificiation (Document number ECG086/1198) + */ +#ifndef CPQARRAY_H +#define CPQARRAY_H + +#ifdef __KERNEL__ +#include <linux/blkdev.h> +#include <linux/slab.h> +#include <linux/proc_fs.h> +#include <linux/timer.h> +#endif + +#include "ida_cmd.h" + +#define IO_OK 0 +#define IO_ERROR 1 +#define NWD 16 +#define NWD_SHIFT 4 + +#define IDA_TIMER (5*HZ) +#define IDA_TIMEOUT (10*HZ) + +#define MISC_NONFATAL_WARN 0x01 + +typedef struct { + unsigned blk_size; + unsigned nr_blks; + unsigned cylinders; + unsigned heads; + unsigned sectors; + int usage_count; +} drv_info_t; + +#ifdef __KERNEL__ + +struct ctlr_info; +typedef struct ctlr_info ctlr_info_t; + +struct access_method { + void (*submit_command)(ctlr_info_t *h, cmdlist_t *c); + void (*set_intr_mask)(ctlr_info_t *h, unsigned long val); + unsigned long (*fifo_full)(ctlr_info_t *h); + unsigned long (*intr_pending)(ctlr_info_t *h); + unsigned long (*command_completed)(ctlr_info_t *h); +}; + +struct board_type { + __u32 board_id; + char *product_name; + struct access_method *access; +}; + +struct ctlr_info { + int ctlr; + char devname[8]; + __u32 log_drv_map; + __u32 drv_assign_map; + __u32 drv_spare_map; + __u32 mp_failed_drv_map; + + char firm_rev[4]; + int ctlr_sig; + + int log_drives; + int phys_drives; + + struct pci_dev *pci_dev; /* NULL if EISA */ + __u32 board_id; + char *product_name; + + void __iomem *vaddr; + unsigned long paddr; + unsigned long io_mem_addr; + unsigned long io_mem_length; + int intr; + int usage_count; + drv_info_t drv[NWD]; + struct proc_dir_entry *proc; + + struct access_method access; + + cmdlist_t *reqQ; + cmdlist_t *cmpQ; + cmdlist_t *cmd_pool; + dma_addr_t cmd_pool_dhandle; + unsigned long *cmd_pool_bits; + struct request_queue *queue; + spinlock_t lock; + + unsigned int Qdepth; + unsigned int maxQsinceinit; + + unsigned int nr_requests; + unsigned int nr_allocs; + unsigned int nr_frees; + struct timer_list timer; + unsigned int misc_tflags; +}; + +#define IDA_LOCK(i) (&hba[i]->lock) + +#endif + +#endif /* CPQARRAY_H */ diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c new file mode 100644 index 0000000..8b6bb76 --- /dev/null +++ b/drivers/block/cryptoloop.c @@ -0,0 +1,216 @@ +/* + Linux loop encryption enabling module + + Copyright (C) 2002 Herbert Valerio Riedel <hvr@gnu.org> + Copyright (C) 2003 Fruhwirth Clemens <clemens@endorphin.org> + + This module is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This module is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this module; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/module.h> + +#include <linux/init.h> +#include <linux/string.h> +#include <linux/crypto.h> +#include <linux/blkdev.h> +#include <linux/loop.h> +#include <linux/scatterlist.h> +#include <asm/uaccess.h> + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI"); +MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>"); + +#define LOOP_IV_SECTOR_BITS 9 +#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS) + +static int +cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) +{ + int err = -EINVAL; + int cipher_len; + int mode_len; + char cms[LO_NAME_SIZE]; /* cipher-mode string */ + char *cipher; + char *mode; + char *cmsp = cms; /* c-m string pointer */ + struct crypto_blkcipher *tfm; + + /* encryption breaks for non sector aligned offsets */ + + if (info->lo_offset % LOOP_IV_SECTOR_SIZE) + goto out; + + strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); + cms[LO_NAME_SIZE - 1] = 0; + + cipher = cmsp; + cipher_len = strcspn(cmsp, "-"); + + mode = cmsp + cipher_len; + mode_len = 0; + if (*mode) { + mode++; + mode_len = strcspn(mode, "-"); + } + + if (!mode_len) { + mode = "cbc"; + mode_len = 3; + } + + if (cipher_len + mode_len + 3 > LO_NAME_SIZE) + return -EINVAL; + + memmove(cms, mode, mode_len); + cmsp = cms + mode_len; + *cmsp++ = '('; + memcpy(cmsp, info->lo_crypt_name, cipher_len); + cmsp += cipher_len; + *cmsp++ = ')'; + *cmsp = 0; + + tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key, + info->lo_encrypt_key_size); + + if (err != 0) + goto out_free_tfm; + + lo->key_data = tfm; + return 0; + + out_free_tfm: + crypto_free_blkcipher(tfm); + + out: + return err; +} + + +typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc, + struct scatterlist *sg_out, + struct scatterlist *sg_in, + unsigned int nsg); + +static int +cryptoloop_transfer(struct loop_device *lo, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t IV) +{ + struct crypto_blkcipher *tfm = lo->key_data; + struct blkcipher_desc desc = { + .tfm = tfm, + .flags = CRYPTO_TFM_REQ_MAY_SLEEP, + }; + struct scatterlist sg_out; + struct scatterlist sg_in; + + encdec_cbc_t encdecfunc; + struct page *in_page, *out_page; + unsigned in_offs, out_offs; + int err; + + sg_init_table(&sg_out, 1); + sg_init_table(&sg_in, 1); + + if (cmd == READ) { + in_page = raw_page; + in_offs = raw_off; + out_page = loop_page; + out_offs = loop_off; + encdecfunc = crypto_blkcipher_crt(tfm)->decrypt; + } else { + in_page = loop_page; + in_offs = loop_off; + out_page = raw_page; + out_offs = raw_off; + encdecfunc = crypto_blkcipher_crt(tfm)->encrypt; + } + + while (size > 0) { + const int sz = min(size, LOOP_IV_SECTOR_SIZE); + u32 iv[4] = { 0, }; + iv[0] = cpu_to_le32(IV & 0xffffffff); + + sg_set_page(&sg_in, in_page, sz, in_offs); + sg_set_page(&sg_out, out_page, sz, out_offs); + + desc.info = iv; + err = encdecfunc(&desc, &sg_out, &sg_in, sz); + if (err) + return err; + + IV++; + size -= sz; + in_offs += sz; + out_offs += sz; + } + + return 0; +} + +static int +cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) +{ + return -EINVAL; +} + +static int +cryptoloop_release(struct loop_device *lo) +{ + struct crypto_blkcipher *tfm = lo->key_data; + if (tfm != NULL) { + crypto_free_blkcipher(tfm); + lo->key_data = NULL; + return 0; + } + printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n"); + return -EINVAL; +} + +static struct loop_func_table cryptoloop_funcs = { + .number = LO_CRYPT_CRYPTOAPI, + .init = cryptoloop_init, + .ioctl = cryptoloop_ioctl, + .transfer = cryptoloop_transfer, + .release = cryptoloop_release, + .owner = THIS_MODULE +}; + +static int __init +init_cryptoloop(void) +{ + int rc = loop_register_transfer(&cryptoloop_funcs); + + if (rc) + printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n"); + return rc; +} + +static void __exit +cleanup_cryptoloop(void) +{ + if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI)) + printk(KERN_ERR + "cryptoloop: loop_unregister_transfer failed\n"); +} + +module_init(init_cryptoloop); +module_exit(cleanup_cryptoloop); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c new file mode 100644 index 0000000..cf29cc4 --- /dev/null +++ b/drivers/block/floppy.c @@ -0,0 +1,4582 @@ +/* + * linux/drivers/block/floppy.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1993, 1994 Alain Knaff + * Copyright (C) 1998 Alan Cox + */ + +/* + * 02.12.91 - Changed to static variables to indicate need for reset + * and recalibrate. This makes some things easier (output_byte reset + * checking etc), and means less interrupt jumping in case of errors, + * so the code is hopefully easier to understand. + */ + +/* + * This file is certainly a mess. I've tried my best to get it working, + * but I don't like programming floppies, and I have only one anyway. + * Urgel. I should check for more errors, and do more graceful error + * recovery. Seems there are problems with several drives. I've tried to + * correct them. No promises. + */ + +/* + * As with hd.c, all routines within this file can (and will) be called + * by interrupts, so extreme caution is needed. A hardware interrupt + * handler may not sleep, or a kernel panic will happen. Thus I cannot + * call "floppy-on" directly, but have to set a special timer interrupt + * etc. + */ + +/* + * 28.02.92 - made track-buffering routines, based on the routines written + * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus. + */ + +/* + * Automatic floppy-detection and formatting written by Werner Almesberger + * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with + * the floppy-change signal detection. + */ + +/* + * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed + * FDC data overrun bug, added some preliminary stuff for vertical + * recording support. + * + * 1992/9/17: Added DMA allocation & DMA functions. -- hhb. + * + * TODO: Errors are still not counted properly. + */ + +/* 1992/9/20 + * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl) + * modeled after the freeware MS-DOS program fdformat/88 V1.8 by + * Christoph H. Hochst\"atter. + * I have fixed the shift values to the ones I always use. Maybe a new + * ioctl() should be created to be able to modify them. + * There is a bug in the driver that makes it impossible to format a + * floppy as the first thing after bootup. + */ + +/* + * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and + * this helped the floppy driver as well. Much cleaner, and still seems to + * work. + */ + +/* 1994/6/24 --bbroad-- added the floppy table entries and made + * minor modifications to allow 2.88 floppies to be run. + */ + +/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more + * disk types. + */ + +/* + * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger + * format bug fixes, but unfortunately some new bugs too... + */ + +/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write + * errors to allow safe writing by specialized programs. + */ + +/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks + * by defining bit 1 of the "stretch" parameter to mean put sectors on the + * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's + * drives are "upside-down"). + */ + +/* + * 1995/8/26 -- Andreas Busse -- added Mips support. + */ + +/* + * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent + * features to asm/floppy.h. + */ + +/* + * 1998/1/21 -- Richard Gooch <rgooch@atnf.csiro.au> -- devfs support + */ + +/* + * 1998/05/07 -- Russell King -- More portability cleanups; moved definition of + * interrupt and dma channel to asm/floppy.h. Cleaned up some formatting & + * use of '0' for NULL. + */ + +/* + * 1998/06/07 -- Alan Cox -- Merged the 2.0.34 fixes for resource allocation + * failures. + */ + +/* + * 1998/09/20 -- David Weinehall -- Added slow-down code for buggy PS/2-drives. + */ + +/* + * 1999/08/13 -- Paul Slootman -- floppy stopped working on Alpha after 24 + * days, 6 hours, 32 minutes and 32 seconds (i.e. MAXINT jiffies; ints were + * being used to store jiffies, which are unsigned longs). + */ + +/* + * 2000/08/28 -- Arnaldo Carvalho de Melo <acme@conectiva.com.br> + * - get rid of check_region + * - s/suser/capable/ + */ + +/* + * 2001/08/26 -- Paul Gortmaker - fix insmod oops on machines with no + * floppy controller (lingering task on list after module is gone... boom.) + */ + +/* + * 2002/02/07 -- Anton Altaparmakov - Fix io ports reservation to correct range + * (0x3f2-0x3f5, 0x3f7). This fix is a bit of a hack but the proper fix + * requires many non-obvious changes in arch dependent code. + */ + +/* 2003/07/28 -- Daniele Bellucci <bellucda@tiscali.it>. + * Better audit of register_blkdev. + */ + +#define FLOPPY_SANITY_CHECK +#undef FLOPPY_SILENT_DCL_CLEAR + +#define REALLY_SLOW_IO + +#define DEBUGT 2 +#define DCL_DEBUG /* debug disk change line */ + +/* do print messages for unexpected interrupts */ +static int print_unex = 1; +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#define FDPATCHES +#include <linux/fdreg.h> +#include <linux/fd.h> +#include <linux/hdreg.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/bio.h> +#include <linux/string.h> +#include <linux/jiffies.h> +#include <linux/fcntl.h> +#include <linux/delay.h> +#include <linux/mc146818rtc.h> /* CMOS defines */ +#include <linux/ioport.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/buffer_head.h> /* for invalidate_buffers() */ +#include <linux/mutex.h> + +/* + * PS/2 floppies have much slower step rates than regular floppies. + * It's been recommended that take about 1/4 of the default speed + * in some more extreme cases. + */ +static int slow_floppy; + +#include <asm/dma.h> +#include <asm/irq.h> +#include <asm/system.h> +#include <asm/io.h> +#include <asm/uaccess.h> + +static int FLOPPY_IRQ = 6; +static int FLOPPY_DMA = 2; +static int can_use_virtual_dma = 2; +/* ======= + * can use virtual DMA: + * 0 = use of virtual DMA disallowed by config + * 1 = use of virtual DMA prescribed by config + * 2 = no virtual DMA preference configured. By default try hard DMA, + * but fall back on virtual DMA when not enough memory available + */ + +static int use_virtual_dma; +/* ======= + * use virtual DMA + * 0 using hard DMA + * 1 using virtual DMA + * This variable is set to virtual when a DMA mem problem arises, and + * reset back in floppy_grab_irq_and_dma. + * It is not safe to reset it in other circumstances, because the floppy + * driver may have several buffers in use at once, and we do currently not + * record each buffers capabilities + */ + +static DEFINE_SPINLOCK(floppy_lock); + +static unsigned short virtual_dma_port = 0x3f0; +irqreturn_t floppy_interrupt(int irq, void *dev_id); +static int set_dor(int fdc, char mask, char data); + +#define K_64 0x10000 /* 64KB */ + +/* the following is the mask of allowed drives. By default units 2 and + * 3 of both floppy controllers are disabled, because switching on the + * motor of these drives causes system hangs on some PCI computers. drive + * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if + * a drive is allowed. + * + * NOTE: This must come before we include the arch floppy header because + * some ports reference this variable from there. -DaveM + */ + +static int allowed_drive_mask = 0x33; + +#include <asm/floppy.h> + +static int irqdma_allocated; + +#define DEVICE_NAME "floppy" + +#include <linux/blkdev.h> +#include <linux/blkpg.h> +#include <linux/cdrom.h> /* for the compatibility eject ioctl */ +#include <linux/completion.h> + +static struct request *current_req; +static struct request_queue *floppy_queue; +static void do_fd_request(struct request_queue * q); + +#ifndef fd_get_dma_residue +#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA) +#endif + +/* Dma Memory related stuff */ + +#ifndef fd_dma_mem_free +#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size)) +#endif + +#ifndef fd_dma_mem_alloc +#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size)) +#endif + +static inline void fallback_on_nodma_alloc(char **addr, size_t l) +{ +#ifdef FLOPPY_CAN_FALLBACK_ON_NODMA + if (*addr) + return; /* we have the memory */ + if (can_use_virtual_dma != 2) + return; /* no fallback allowed */ + printk("DMA memory shortage. Temporarily falling back on virtual DMA\n"); + *addr = (char *)nodma_mem_alloc(l); +#else + return; +#endif +} + +/* End dma memory related stuff */ + +static unsigned long fake_change; +static int initialising = 1; + +#define ITYPE(x) (((x)>>2) & 0x1f) +#define TOMINOR(x) ((x & 3) | ((x & 4) << 5)) +#define UNIT(x) ((x) & 0x03) /* drive on fdc */ +#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */ + /* reverse mapping from unit and fdc to drive */ +#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2)) +#define DP (&drive_params[current_drive]) +#define DRS (&drive_state[current_drive]) +#define DRWE (&write_errors[current_drive]) +#define FDCS (&fdc_state[fdc]) +#define CLEARF(x) clear_bit(x##_BIT, &DRS->flags) +#define SETF(x) set_bit(x##_BIT, &DRS->flags) +#define TESTF(x) test_bit(x##_BIT, &DRS->flags) + +#define UDP (&drive_params[drive]) +#define UDRS (&drive_state[drive]) +#define UDRWE (&write_errors[drive]) +#define UFDCS (&fdc_state[FDC(drive)]) +#define UCLEARF(x) clear_bit(x##_BIT, &UDRS->flags) +#define USETF(x) set_bit(x##_BIT, &UDRS->flags) +#define UTESTF(x) test_bit(x##_BIT, &UDRS->flags) + +#define DPRINT(format, args...) printk(DEVICE_NAME "%d: " format, current_drive , ## args) + +#define PH_HEAD(floppy,head) (((((floppy)->stretch & 2) >>1) ^ head) << 2) +#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH) + +#define CLEARSTRUCT(x) memset((x), 0, sizeof(*(x))) + +/* read/write */ +#define COMMAND raw_cmd->cmd[0] +#define DR_SELECT raw_cmd->cmd[1] +#define TRACK raw_cmd->cmd[2] +#define HEAD raw_cmd->cmd[3] +#define SECTOR raw_cmd->cmd[4] +#define SIZECODE raw_cmd->cmd[5] +#define SECT_PER_TRACK raw_cmd->cmd[6] +#define GAP raw_cmd->cmd[7] +#define SIZECODE2 raw_cmd->cmd[8] +#define NR_RW 9 + +/* format */ +#define F_SIZECODE raw_cmd->cmd[2] +#define F_SECT_PER_TRACK raw_cmd->cmd[3] +#define F_GAP raw_cmd->cmd[4] +#define F_FILL raw_cmd->cmd[5] +#define NR_F 6 + +/* + * Maximum disk size (in kilobytes). This default is used whenever the + * current disk size is unknown. + * [Now it is rather a minimum] + */ +#define MAX_DISK_SIZE 4 /* 3984 */ + +/* + * globals used by 'result()' + */ +#define MAX_REPLIES 16 +static unsigned char reply_buffer[MAX_REPLIES]; +static int inr; /* size of reply buffer, when called from interrupt */ +#define ST0 (reply_buffer[0]) +#define ST1 (reply_buffer[1]) +#define ST2 (reply_buffer[2]) +#define ST3 (reply_buffer[0]) /* result of GETSTATUS */ +#define R_TRACK (reply_buffer[3]) +#define R_HEAD (reply_buffer[4]) +#define R_SECTOR (reply_buffer[5]) +#define R_SIZECODE (reply_buffer[6]) +#define SEL_DLY (2*HZ/100) + +/* + * this struct defines the different floppy drive types. + */ +static struct { + struct floppy_drive_params params; + const char *name; /* name printed while booting */ +} default_drive_params[] = { +/* NOTE: the time values in jiffies should be in msec! + CMOS drive type + | Maximum data rate supported by drive type + | | Head load time, msec + | | | Head unload time, msec (not used) + | | | | Step rate interval, usec + | | | | | Time needed for spinup time (jiffies) + | | | | | | Timeout for spinning down (jiffies) + | | | | | | | Spindown offset (where disk stops) + | | | | | | | | Select delay + | | | | | | | | | RPS + | | | | | | | | | | Max number of tracks + | | | | | | | | | | | Interrupt timeout + | | | | | | | | | | | | Max nonintlv. sectors + | | | | | | | | | | | | | -Max Errors- flags */ +{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0, + 0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" }, + +{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0, + 0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/ + +{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0, + 0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/ + +{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0, + 0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/ + +{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0, + 0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/ + +{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0, + 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/ + +{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0, + 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/ +/* | --autodetected formats--- | | | + * read_track | | Name printed when booting + * | Native format + * Frequency of disk change checks */ +}; + +static struct floppy_drive_params drive_params[N_DRIVE]; +static struct floppy_drive_struct drive_state[N_DRIVE]; +static struct floppy_write_errors write_errors[N_DRIVE]; +static struct timer_list motor_off_timer[N_DRIVE]; +static struct gendisk *disks[N_DRIVE]; +static struct block_device *opened_bdev[N_DRIVE]; +static DEFINE_MUTEX(open_lock); +static struct floppy_raw_cmd *raw_cmd, default_raw_cmd; + +/* + * This struct defines the different floppy types. + * + * Bit 0 of 'stretch' tells if the tracks need to be doubled for some + * types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch' + * tells if the disk is in Commodore 1581 format, which means side 0 sectors + * are located on side 1 of the disk but with a side 0 ID, and vice-versa. + * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the + * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical + * side 0 is on physical side 0 (but with the misnamed sector IDs). + * 'stretch' should probably be renamed to something more general, like + * 'options'. + * + * Bits 2 through 9 of 'stretch' tell the number of the first sector. + * The LSB (bit 2) is flipped. For most disks, the first sector + * is 1 (represented by 0x00<<2). For some CP/M and music sampler + * disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2). + * For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2). + * + * Other parameters should be self-explanatory (see also setfdprm(8)). + */ +/* + Size + | Sectors per track + | | Head + | | | Tracks + | | | | Stretch + | | | | | Gap 1 size + | | | | | | Data rate, | 0x40 for perp + | | | | | | | Spec1 (stepping rate, head unload + | | | | | | | | /fmt gap (gap2) */ +static struct floppy_struct floppy_type[32] = { + { 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */ + { 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */ + { 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */ + { 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */ + { 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */ + { 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */ + { 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */ + { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */ + { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */ + { 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120" }, /* 9 3.12MB 3.5" */ + + { 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */ + { 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */ + { 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */ + { 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */ + { 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */ + { 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */ + { 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */ + { 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */ + { 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */ + { 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */ + + { 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */ + { 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */ + { 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */ + { 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */ + { 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */ + { 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */ + { 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */ + { 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */ + { 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */ + { 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */ + + { 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */ + { 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */ +}; + +#define SECTSIZE (_FD_SECTSIZE(*floppy)) + +/* Auto-detection: Disk type used until the next media change occurs. */ +static struct floppy_struct *current_type[N_DRIVE]; + +/* + * User-provided type information. current_type points to + * the respective entry of this array. + */ +static struct floppy_struct user_params[N_DRIVE]; + +static sector_t floppy_sizes[256]; + +static char floppy_device_name[] = "floppy"; + +/* + * The driver is trying to determine the correct media format + * while probing is set. rw_interrupt() clears it after a + * successful access. + */ +static int probing; + +/* Synchronization of FDC access. */ +#define FD_COMMAND_NONE -1 +#define FD_COMMAND_ERROR 2 +#define FD_COMMAND_OKAY 3 + +static volatile int command_status = FD_COMMAND_NONE; +static unsigned long fdc_busy; +static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); +static DECLARE_WAIT_QUEUE_HEAD(command_done); + +#define NO_SIGNAL (!interruptible || !signal_pending(current)) +#define CALL(x) if ((x) == -EINTR) return -EINTR +#define ECALL(x) if ((ret = (x))) return ret; +#define _WAIT(x,i) CALL(ret=wait_til_done((x),i)) +#define WAIT(x) _WAIT((x),interruptible) +#define IWAIT(x) _WAIT((x),1) + +/* Errors during formatting are counted here. */ +static int format_errors; + +/* Format request descriptor. */ +static struct format_descr format_req; + +/* + * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps + * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc), + * H is head unload time (1=16ms, 2=32ms, etc) + */ + +/* + * Track buffer + * Because these are written to by the DMA controller, they must + * not contain a 64k byte boundary crossing, or data will be + * corrupted/lost. + */ +static char *floppy_track_buffer; +static int max_buffer_sectors; + +static int *errors; +typedef void (*done_f)(int); +static struct cont_t { + void (*interrupt)(void); /* this is called after the interrupt of the + * main command */ + void (*redo)(void); /* this is called to retry the operation */ + void (*error)(void); /* this is called to tally an error */ + done_f done; /* this is called to say if the operation has + * succeeded/failed */ +} *cont; + +static void floppy_ready(void); +static void floppy_start(void); +static void process_fd_request(void); +static void recalibrate_floppy(void); +static void floppy_shutdown(unsigned long); + +static int floppy_grab_irq_and_dma(void); +static void floppy_release_irq_and_dma(void); + +/* + * The "reset" variable should be tested whenever an interrupt is scheduled, + * after the commands have been sent. This is to ensure that the driver doesn't + * get wedged when the interrupt doesn't come because of a failed command. + * reset doesn't need to be tested before sending commands, because + * output_byte is automatically disabled when reset is set. + */ +#define CHECK_RESET { if (FDCS->reset){ reset_fdc(); return; } } +static void reset_fdc(void); + +/* + * These are global variables, as that's the easiest way to give + * information to interrupts. They are the data used for the current + * request. + */ +#define NO_TRACK -1 +#define NEED_1_RECAL -2 +#define NEED_2_RECAL -3 + +static int usage_count; + +/* buffer related variables */ +static int buffer_track = -1; +static int buffer_drive = -1; +static int buffer_min = -1; +static int buffer_max = -1; + +/* fdc related variables, should end up in a struct */ +static struct floppy_fdc_state fdc_state[N_FDC]; +static int fdc; /* current fdc */ + +static struct floppy_struct *_floppy = floppy_type; +static unsigned char current_drive; +static long current_count_sectors; +static unsigned char fsector_t; /* sector in track */ +static unsigned char in_sector_offset; /* offset within physical sector, + * expressed in units of 512 bytes */ + +#ifndef fd_eject +static inline int fd_eject(int drive) +{ + return -EINVAL; +} +#endif + +/* + * Debugging + * ========= + */ +#ifdef DEBUGT +static long unsigned debugtimer; + +static inline void set_debugt(void) +{ + debugtimer = jiffies; +} + +static inline void debugt(const char *message) +{ + if (DP->flags & DEBUGT) + printk("%s dtime=%lu\n", message, jiffies - debugtimer); +} +#else +static inline void set_debugt(void) { } +static inline void debugt(const char *message) { } +#endif /* DEBUGT */ + +typedef void (*timeout_fn) (unsigned long); +static DEFINE_TIMER(fd_timeout, floppy_shutdown, 0, 0); + +static const char *timeout_message; + +#ifdef FLOPPY_SANITY_CHECK +static void is_alive(const char *message) +{ + /* this routine checks whether the floppy driver is "alive" */ + if (test_bit(0, &fdc_busy) && command_status < 2 + && !timer_pending(&fd_timeout)) { + DPRINT("timeout handler died: %s\n", message); + } +} +#endif + +static void (*do_floppy) (void) = NULL; + +#ifdef FLOPPY_SANITY_CHECK + +#define OLOGSIZE 20 + +static void (*lasthandler) (void); +static unsigned long interruptjiffies; +static unsigned long resultjiffies; +static int resultsize; +static unsigned long lastredo; + +static struct output_log { + unsigned char data; + unsigned char status; + unsigned long jiffies; +} output_log[OLOGSIZE]; + +static int output_log_pos; +#endif + +#define current_reqD -1 +#define MAXTIMEOUT -2 + +static void __reschedule_timeout(int drive, const char *message, int marg) +{ + if (drive == current_reqD) + drive = current_drive; + del_timer(&fd_timeout); + if (drive < 0 || drive >= N_DRIVE) { + fd_timeout.expires = jiffies + 20UL * HZ; + drive = 0; + } else + fd_timeout.expires = jiffies + UDP->timeout; + add_timer(&fd_timeout); + if (UDP->flags & FD_DEBUG) { + DPRINT("reschedule timeout "); + printk(message, marg); + printk("\n"); + } + timeout_message = message; +} + +static void reschedule_timeout(int drive, const char *message, int marg) +{ + unsigned long flags; + + spin_lock_irqsave(&floppy_lock, flags); + __reschedule_timeout(drive, message, marg); + spin_unlock_irqrestore(&floppy_lock, flags); +} + +#define INFBOUND(a,b) (a)=max_t(int, a, b) +#define SUPBOUND(a,b) (a)=min_t(int, a, b) + +/* + * Bottom half floppy driver. + * ========================== + * + * This part of the file contains the code talking directly to the hardware, + * and also the main service loop (seek-configure-spinup-command) + */ + +/* + * disk change. + * This routine is responsible for maintaining the FD_DISK_CHANGE flag, + * and the last_checked date. + * + * last_checked is the date of the last check which showed 'no disk change' + * FD_DISK_CHANGE is set under two conditions: + * 1. The floppy has been changed after some i/o to that floppy already + * took place. + * 2. No floppy disk is in the drive. This is done in order to ensure that + * requests are quickly flushed in case there is no disk in the drive. It + * follows that FD_DISK_CHANGE can only be cleared if there is a disk in + * the drive. + * + * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet. + * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on + * each seek. If a disk is present, the disk change line should also be + * cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk + * change line is set, this means either that no disk is in the drive, or + * that it has been removed since the last seek. + * + * This means that we really have a third possibility too: + * The floppy has been changed after the last seek. + */ + +static int disk_change(int drive) +{ + int fdc = FDC(drive); + +#ifdef FLOPPY_SANITY_CHECK + if (time_before(jiffies, UDRS->select_date + UDP->select_delay)) + DPRINT("WARNING disk change called early\n"); + if (!(FDCS->dor & (0x10 << UNIT(drive))) || + (FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) { + DPRINT("probing disk change on unselected drive\n"); + DPRINT("drive=%d fdc=%d dor=%x\n", drive, FDC(drive), + (unsigned int)FDCS->dor); + } +#endif + +#ifdef DCL_DEBUG + if (UDP->flags & FD_DEBUG) { + DPRINT("checking disk change line for drive %d\n", drive); + DPRINT("jiffies=%lu\n", jiffies); + DPRINT("disk change line=%x\n", fd_inb(FD_DIR) & 0x80); + DPRINT("flags=%lx\n", UDRS->flags); + } +#endif + if (UDP->flags & FD_BROKEN_DCL) + return UTESTF(FD_DISK_CHANGED); + if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80) { + USETF(FD_VERIFY); /* verify write protection */ + if (UDRS->maxblock) { + /* mark it changed */ + USETF(FD_DISK_CHANGED); + } + + /* invalidate its geometry */ + if (UDRS->keep_data >= 0) { + if ((UDP->flags & FTD_MSG) && + current_type[drive] != NULL) + DPRINT("Disk type is undefined after " + "disk change\n"); + current_type[drive] = NULL; + floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE << 1; + } + + return 1; + } else { + UDRS->last_checked = jiffies; + UCLEARF(FD_DISK_NEWCHANGE); + } + return 0; +} + +static inline int is_selected(int dor, int unit) +{ + return ((dor & (0x10 << unit)) && (dor & 3) == unit); +} + +static int set_dor(int fdc, char mask, char data) +{ + unsigned char unit; + unsigned char drive; + unsigned char newdor; + unsigned char olddor; + + if (FDCS->address == -1) + return -1; + + olddor = FDCS->dor; + newdor = (olddor & mask) | data; + if (newdor != olddor) { + unit = olddor & 0x3; + if (is_selected(olddor, unit) && !is_selected(newdor, unit)) { + drive = REVDRIVE(fdc, unit); +#ifdef DCL_DEBUG + if (UDP->flags & FD_DEBUG) { + DPRINT("calling disk change from set_dor\n"); + } +#endif + disk_change(drive); + } + FDCS->dor = newdor; + fd_outb(newdor, FD_DOR); + + unit = newdor & 0x3; + if (!is_selected(olddor, unit) && is_selected(newdor, unit)) { + drive = REVDRIVE(fdc, unit); + UDRS->select_date = jiffies; + } + } + return olddor; +} + +static void twaddle(void) +{ + if (DP->select_delay) + return; + fd_outb(FDCS->dor & ~(0x10 << UNIT(current_drive)), FD_DOR); + fd_outb(FDCS->dor, FD_DOR); + DRS->select_date = jiffies; +} + +/* reset all driver information about the current fdc. This is needed after + * a reset, and after a raw command. */ +static void reset_fdc_info(int mode) +{ + int drive; + + FDCS->spec1 = FDCS->spec2 = -1; + FDCS->need_configure = 1; + FDCS->perp_mode = 1; + FDCS->rawcmd = 0; + for (drive = 0; drive < N_DRIVE; drive++) + if (FDC(drive) == fdc && (mode || UDRS->track != NEED_1_RECAL)) + UDRS->track = NEED_2_RECAL; +} + +/* selects the fdc and drive, and enables the fdc's input/dma. */ +static void set_fdc(int drive) +{ + if (drive >= 0 && drive < N_DRIVE) { + fdc = FDC(drive); + current_drive = drive; + } + if (fdc != 1 && fdc != 0) { + printk("bad fdc value\n"); + return; + } + set_dor(fdc, ~0, 8); +#if N_FDC > 1 + set_dor(1 - fdc, ~8, 0); +#endif + if (FDCS->rawcmd == 2) + reset_fdc_info(1); + if (fd_inb(FD_STATUS) != STATUS_READY) + FDCS->reset = 1; +} + +/* locks the driver */ +static int _lock_fdc(int drive, int interruptible, int line) +{ + if (!usage_count) { + printk(KERN_ERR + "Trying to lock fdc while usage count=0 at line %d\n", + line); + return -1; + } + + if (test_and_set_bit(0, &fdc_busy)) { + DECLARE_WAITQUEUE(wait, current); + add_wait_queue(&fdc_wait, &wait); + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + if (!test_and_set_bit(0, &fdc_busy)) + break; + + schedule(); + + if (!NO_SIGNAL) { + remove_wait_queue(&fdc_wait, &wait); + return -EINTR; + } + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&fdc_wait, &wait); + flush_scheduled_work(); + } + command_status = FD_COMMAND_NONE; + + __reschedule_timeout(drive, "lock fdc", 0); + set_fdc(drive); + return 0; +} + +#define lock_fdc(drive,interruptible) _lock_fdc(drive,interruptible, __LINE__) + +#define LOCK_FDC(drive,interruptible) \ +if (lock_fdc(drive,interruptible)) return -EINTR; + +/* unlocks the driver */ +static inline void unlock_fdc(void) +{ + unsigned long flags; + + raw_cmd = NULL; + if (!test_bit(0, &fdc_busy)) + DPRINT("FDC access conflict!\n"); + + if (do_floppy) + DPRINT("device interrupt still active at FDC release: %p!\n", + do_floppy); + command_status = FD_COMMAND_NONE; + spin_lock_irqsave(&floppy_lock, flags); + del_timer(&fd_timeout); + cont = NULL; + clear_bit(0, &fdc_busy); + if (elv_next_request(floppy_queue)) + do_fd_request(floppy_queue); + spin_unlock_irqrestore(&floppy_lock, flags); + wake_up(&fdc_wait); +} + +/* switches the motor off after a given timeout */ +static void motor_off_callback(unsigned long nr) +{ + unsigned char mask = ~(0x10 << UNIT(nr)); + + set_dor(FDC(nr), mask, 0); +} + +/* schedules motor off */ +static void floppy_off(unsigned int drive) +{ + unsigned long volatile delta; + int fdc = FDC(drive); + + if (!(FDCS->dor & (0x10 << UNIT(drive)))) + return; + + del_timer(motor_off_timer + drive); + + /* make spindle stop in a position which minimizes spinup time + * next time */ + if (UDP->rps) { + delta = jiffies - UDRS->first_read_date + HZ - + UDP->spindown_offset; + delta = ((delta * UDP->rps) % HZ) / UDP->rps; + motor_off_timer[drive].expires = + jiffies + UDP->spindown - delta; + } + add_timer(motor_off_timer + drive); +} + +/* + * cycle through all N_DRIVE floppy drives, for disk change testing. + * stopping at current drive. This is done before any long operation, to + * be sure to have up to date disk change information. + */ +static void scandrives(void) +{ + int i; + int drive; + int saved_drive; + + if (DP->select_delay) + return; + + saved_drive = current_drive; + for (i = 0; i < N_DRIVE; i++) { + drive = (saved_drive + i + 1) % N_DRIVE; + if (UDRS->fd_ref == 0 || UDP->select_delay != 0) + continue; /* skip closed drives */ + set_fdc(drive); + if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) & + (0x10 << UNIT(drive)))) + /* switch the motor off again, if it was off to + * begin with */ + set_dor(fdc, ~(0x10 << UNIT(drive)), 0); + } + set_fdc(saved_drive); +} + +static void empty(void) +{ +} + +static DECLARE_WORK(floppy_work, NULL); + +static void schedule_bh(void (*handler) (void)) +{ + PREPARE_WORK(&floppy_work, (work_func_t)handler); + schedule_work(&floppy_work); +} + +static DEFINE_TIMER(fd_timer, NULL, 0, 0); + +static void cancel_activity(void) +{ + unsigned long flags; + + spin_lock_irqsave(&floppy_lock, flags); + do_floppy = NULL; + PREPARE_WORK(&floppy_work, (work_func_t)empty); + del_timer(&fd_timer); + spin_unlock_irqrestore(&floppy_lock, flags); +} + +/* this function makes sure that the disk stays in the drive during the + * transfer */ +static void fd_watchdog(void) +{ +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG) { + DPRINT("calling disk change from watchdog\n"); + } +#endif + + if (disk_change(current_drive)) { + DPRINT("disk removed during i/o\n"); + cancel_activity(); + cont->done(0); + reset_fdc(); + } else { + del_timer(&fd_timer); + fd_timer.function = (timeout_fn) fd_watchdog; + fd_timer.expires = jiffies + HZ / 10; + add_timer(&fd_timer); + } +} + +static void main_command_interrupt(void) +{ + del_timer(&fd_timer); + cont->interrupt(); +} + +/* waits for a delay (spinup or select) to pass */ +static int fd_wait_for_completion(unsigned long delay, timeout_fn function) +{ + if (FDCS->reset) { + reset_fdc(); /* do the reset during sleep to win time + * if we don't need to sleep, it's a good + * occasion anyways */ + return 1; + } + + if (time_before(jiffies, delay)) { + del_timer(&fd_timer); + fd_timer.function = function; + fd_timer.expires = delay; + add_timer(&fd_timer); + return 1; + } + return 0; +} + +static DEFINE_SPINLOCK(floppy_hlt_lock); +static int hlt_disabled; +static void floppy_disable_hlt(void) +{ + unsigned long flags; + + spin_lock_irqsave(&floppy_hlt_lock, flags); + if (!hlt_disabled) { + hlt_disabled = 1; +#ifdef HAVE_DISABLE_HLT + disable_hlt(); +#endif + } + spin_unlock_irqrestore(&floppy_hlt_lock, flags); +} + +static void floppy_enable_hlt(void) +{ + unsigned long flags; + + spin_lock_irqsave(&floppy_hlt_lock, flags); + if (hlt_disabled) { + hlt_disabled = 0; +#ifdef HAVE_DISABLE_HLT + enable_hlt(); +#endif + } + spin_unlock_irqrestore(&floppy_hlt_lock, flags); +} + +static void setup_DMA(void) +{ + unsigned long f; + +#ifdef FLOPPY_SANITY_CHECK + if (raw_cmd->length == 0) { + int i; + + printk("zero dma transfer size:"); + for (i = 0; i < raw_cmd->cmd_count; i++) + printk("%x,", raw_cmd->cmd[i]); + printk("\n"); + cont->done(0); + FDCS->reset = 1; + return; + } + if (((unsigned long)raw_cmd->kernel_data) % 512) { + printk("non aligned address: %p\n", raw_cmd->kernel_data); + cont->done(0); + FDCS->reset = 1; + return; + } +#endif + f = claim_dma_lock(); + fd_disable_dma(); +#ifdef fd_dma_setup + if (fd_dma_setup(raw_cmd->kernel_data, raw_cmd->length, + (raw_cmd->flags & FD_RAW_READ) ? + DMA_MODE_READ : DMA_MODE_WRITE, FDCS->address) < 0) { + release_dma_lock(f); + cont->done(0); + FDCS->reset = 1; + return; + } + release_dma_lock(f); +#else + fd_clear_dma_ff(); + fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length); + fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ) ? + DMA_MODE_READ : DMA_MODE_WRITE); + fd_set_dma_addr(raw_cmd->kernel_data); + fd_set_dma_count(raw_cmd->length); + virtual_dma_port = FDCS->address; + fd_enable_dma(); + release_dma_lock(f); +#endif + floppy_disable_hlt(); +} + +static void show_floppy(void); + +/* waits until the fdc becomes ready */ +static int wait_til_ready(void) +{ + int status; + int counter; + + if (FDCS->reset) + return -1; + for (counter = 0; counter < 10000; counter++) { + status = fd_inb(FD_STATUS); + if (status & STATUS_READY) + return status; + } + if (!initialising) { + DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc); + show_floppy(); + } + FDCS->reset = 1; + return -1; +} + +/* sends a command byte to the fdc */ +static int output_byte(char byte) +{ + int status; + + if ((status = wait_til_ready()) < 0) + return -1; + if ((status & (STATUS_READY | STATUS_DIR | STATUS_DMA)) == STATUS_READY) { + fd_outb(byte, FD_DATA); +#ifdef FLOPPY_SANITY_CHECK + output_log[output_log_pos].data = byte; + output_log[output_log_pos].status = status; + output_log[output_log_pos].jiffies = jiffies; + output_log_pos = (output_log_pos + 1) % OLOGSIZE; +#endif + return 0; + } + FDCS->reset = 1; + if (!initialising) { + DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n", + byte, fdc, status); + show_floppy(); + } + return -1; +} + +#define LAST_OUT(x) if (output_byte(x)<0){ reset_fdc();return;} + +/* gets the response from the fdc */ +static int result(void) +{ + int i; + int status = 0; + + for (i = 0; i < MAX_REPLIES; i++) { + if ((status = wait_til_ready()) < 0) + break; + status &= STATUS_DIR | STATUS_READY | STATUS_BUSY | STATUS_DMA; + if ((status & ~STATUS_BUSY) == STATUS_READY) { +#ifdef FLOPPY_SANITY_CHECK + resultjiffies = jiffies; + resultsize = i; +#endif + return i; + } + if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY)) + reply_buffer[i] = fd_inb(FD_DATA); + else + break; + } + if (!initialising) { + DPRINT + ("get result error. Fdc=%d Last status=%x Read bytes=%d\n", + fdc, status, i); + show_floppy(); + } + FDCS->reset = 1; + return -1; +} + +#define MORE_OUTPUT -2 +/* does the fdc need more output? */ +static int need_more_output(void) +{ + int status; + + if ((status = wait_til_ready()) < 0) + return -1; + if ((status & (STATUS_READY | STATUS_DIR | STATUS_DMA)) == STATUS_READY) + return MORE_OUTPUT; + return result(); +} + +/* Set perpendicular mode as required, based on data rate, if supported. + * 82077 Now tested. 1Mbps data rate only possible with 82077-1. + */ +static inline void perpendicular_mode(void) +{ + unsigned char perp_mode; + + if (raw_cmd->rate & 0x40) { + switch (raw_cmd->rate & 3) { + case 0: + perp_mode = 2; + break; + case 3: + perp_mode = 3; + break; + default: + DPRINT("Invalid data rate for perpendicular mode!\n"); + cont->done(0); + FDCS->reset = 1; /* convenient way to return to + * redo without to much hassle (deep + * stack et al. */ + return; + } + } else + perp_mode = 0; + + if (FDCS->perp_mode == perp_mode) + return; + if (FDCS->version >= FDC_82077_ORIG) { + output_byte(FD_PERPENDICULAR); + output_byte(perp_mode); + FDCS->perp_mode = perp_mode; + } else if (perp_mode) { + DPRINT("perpendicular mode not supported by this FDC.\n"); + } +} /* perpendicular_mode */ + +static int fifo_depth = 0xa; +static int no_fifo; + +static int fdc_configure(void) +{ + /* Turn on FIFO */ + output_byte(FD_CONFIGURE); + if (need_more_output() != MORE_OUTPUT) + return 0; + output_byte(0); + output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf)); + output_byte(0); /* pre-compensation from track + 0 upwards */ + return 1; +} + +#define NOMINAL_DTR 500 + +/* Issue a "SPECIFY" command to set the step rate time, head unload time, + * head load time, and DMA disable flag to values needed by floppy. + * + * The value "dtr" is the data transfer rate in Kbps. It is needed + * to account for the data rate-based scaling done by the 82072 and 82077 + * FDC types. This parameter is ignored for other types of FDCs (i.e. + * 8272a). + * + * Note that changing the data transfer rate has a (probably deleterious) + * effect on the parameters subject to scaling for 82072/82077 FDCs, so + * fdc_specify is called again after each data transfer rate + * change. + * + * srt: 1000 to 16000 in microseconds + * hut: 16 to 240 milliseconds + * hlt: 2 to 254 milliseconds + * + * These values are rounded up to the next highest available delay time. + */ +static void fdc_specify(void) +{ + unsigned char spec1; + unsigned char spec2; + unsigned long srt; + unsigned long hlt; + unsigned long hut; + unsigned long dtr = NOMINAL_DTR; + unsigned long scale_dtr = NOMINAL_DTR; + int hlt_max_code = 0x7f; + int hut_max_code = 0xf; + + if (FDCS->need_configure && FDCS->version >= FDC_82072A) { + fdc_configure(); + FDCS->need_configure = 0; + } + + switch (raw_cmd->rate & 0x03) { + case 3: + dtr = 1000; + break; + case 1: + dtr = 300; + if (FDCS->version >= FDC_82078) { + /* chose the default rate table, not the one + * where 1 = 2 Mbps */ + output_byte(FD_DRIVESPEC); + if (need_more_output() == MORE_OUTPUT) { + output_byte(UNIT(current_drive)); + output_byte(0xc0); + } + } + break; + case 2: + dtr = 250; + break; + } + + if (FDCS->version >= FDC_82072) { + scale_dtr = dtr; + hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */ + hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */ + } + + /* Convert step rate from microseconds to milliseconds and 4 bits */ + srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR); + if (slow_floppy) { + srt = srt / 4; + } + SUPBOUND(srt, 0xf); + INFBOUND(srt, 0); + + hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR); + if (hlt < 0x01) + hlt = 0x01; + else if (hlt > 0x7f) + hlt = hlt_max_code; + + hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR); + if (hut < 0x1) + hut = 0x1; + else if (hut > 0xf) + hut = hut_max_code; + + spec1 = (srt << 4) | hut; + spec2 = (hlt << 1) | (use_virtual_dma & 1); + + /* If these parameters did not change, just return with success */ + if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) { + /* Go ahead and set spec1 and spec2 */ + output_byte(FD_SPECIFY); + output_byte(FDCS->spec1 = spec1); + output_byte(FDCS->spec2 = spec2); + } +} /* fdc_specify */ + +/* Set the FDC's data transfer rate on behalf of the specified drive. + * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue + * of the specify command (i.e. using the fdc_specify function). + */ +static int fdc_dtr(void) +{ + /* If data rate not already set to desired value, set it. */ + if ((raw_cmd->rate & 3) == FDCS->dtr) + return 0; + + /* Set dtr */ + fd_outb(raw_cmd->rate & 3, FD_DCR); + + /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB) + * need a stabilization period of several milliseconds to be + * enforced after data rate changes before R/W operations. + * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies) + */ + FDCS->dtr = raw_cmd->rate & 3; + return (fd_wait_for_completion(jiffies + 2UL * HZ / 100, + (timeout_fn) floppy_ready)); +} /* fdc_dtr */ + +static void tell_sector(void) +{ + printk(": track %d, head %d, sector %d, size %d", + R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE); +} /* tell_sector */ + +/* + * OK, this error interpreting routine is called after a + * DMA read/write has succeeded + * or failed, so we check the results, and copy any buffers. + * hhb: Added better error reporting. + * ak: Made this into a separate routine. + */ +static int interpret_errors(void) +{ + char bad; + + if (inr != 7) { + DPRINT("-- FDC reply error"); + FDCS->reset = 1; + return 1; + } + + /* check IC to find cause of interrupt */ + switch (ST0 & ST0_INTR) { + case 0x40: /* error occurred during command execution */ + if (ST1 & ST1_EOC) + return 0; /* occurs with pseudo-DMA */ + bad = 1; + if (ST1 & ST1_WP) { + DPRINT("Drive is write protected\n"); + CLEARF(FD_DISK_WRITABLE); + cont->done(0); + bad = 2; + } else if (ST1 & ST1_ND) { + SETF(FD_NEED_TWADDLE); + } else if (ST1 & ST1_OR) { + if (DP->flags & FTD_MSG) + DPRINT("Over/Underrun - retrying\n"); + bad = 0; + } else if (*errors >= DP->max_errors.reporting) { + DPRINT(""); + if (ST0 & ST0_ECE) { + printk("Recalibrate failed!"); + } else if (ST2 & ST2_CRC) { + printk("data CRC error"); + tell_sector(); + } else if (ST1 & ST1_CRC) { + printk("CRC error"); + tell_sector(); + } else if ((ST1 & (ST1_MAM | ST1_ND)) + || (ST2 & ST2_MAM)) { + if (!probing) { + printk("sector not found"); + tell_sector(); + } else + printk("probe failed..."); + } else if (ST2 & ST2_WC) { /* seek error */ + printk("wrong cylinder"); + } else if (ST2 & ST2_BC) { /* cylinder marked as bad */ + printk("bad cylinder"); + } else { + printk + ("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x", + ST0, ST1, ST2); + tell_sector(); + } + printk("\n"); + } + if (ST2 & ST2_WC || ST2 & ST2_BC) + /* wrong cylinder => recal */ + DRS->track = NEED_2_RECAL; + return bad; + case 0x80: /* invalid command given */ + DPRINT("Invalid FDC command given!\n"); + cont->done(0); + return 2; + case 0xc0: + DPRINT("Abnormal termination caused by polling\n"); + cont->error(); + return 2; + default: /* (0) Normal command termination */ + return 0; + } +} + +/* + * This routine is called when everything should be correctly set up + * for the transfer (i.e. floppy motor is on, the correct floppy is + * selected, and the head is sitting on the right track). + */ +static void setup_rw_floppy(void) +{ + int i; + int r; + int flags; + int dflags; + unsigned long ready_date; + timeout_fn function; + + flags = raw_cmd->flags; + if (flags & (FD_RAW_READ | FD_RAW_WRITE)) + flags |= FD_RAW_INTR; + + if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) { + ready_date = DRS->spinup_date + DP->spinup; + /* If spinup will take a long time, rerun scandrives + * again just before spinup completion. Beware that + * after scandrives, we must again wait for selection. + */ + if (time_after(ready_date, jiffies + DP->select_delay)) { + ready_date -= DP->select_delay; + function = (timeout_fn) floppy_start; + } else + function = (timeout_fn) setup_rw_floppy; + + /* wait until the floppy is spinning fast enough */ + if (fd_wait_for_completion(ready_date, function)) + return; + } + dflags = DRS->flags; + + if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE)) + setup_DMA(); + + if (flags & FD_RAW_INTR) + do_floppy = main_command_interrupt; + + r = 0; + for (i = 0; i < raw_cmd->cmd_count; i++) + r |= output_byte(raw_cmd->cmd[i]); + + debugt("rw_command: "); + + if (r) { + cont->error(); + reset_fdc(); + return; + } + + if (!(flags & FD_RAW_INTR)) { + inr = result(); + cont->interrupt(); + } else if (flags & FD_RAW_NEED_DISK) + fd_watchdog(); +} + +static int blind_seek; + +/* + * This is the routine called after every seek (or recalibrate) interrupt + * from the floppy controller. + */ +static void seek_interrupt(void) +{ + debugt("seek interrupt:"); + if (inr != 2 || (ST0 & 0xF8) != 0x20) { + DPRINT("seek failed\n"); + DRS->track = NEED_2_RECAL; + cont->error(); + cont->redo(); + return; + } + if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek) { +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG) { + DPRINT + ("clearing NEWCHANGE flag because of effective seek\n"); + DPRINT("jiffies=%lu\n", jiffies); + } +#endif + CLEARF(FD_DISK_NEWCHANGE); /* effective seek */ + DRS->select_date = jiffies; + } + DRS->track = ST1; + floppy_ready(); +} + +static void check_wp(void) +{ + if (TESTF(FD_VERIFY)) { + /* check write protection */ + output_byte(FD_GETSTATUS); + output_byte(UNIT(current_drive)); + if (result() != 1) { + FDCS->reset = 1; + return; + } + CLEARF(FD_VERIFY); + CLEARF(FD_NEED_TWADDLE); +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG) { + DPRINT("checking whether disk is write protected\n"); + DPRINT("wp=%x\n", ST3 & 0x40); + } +#endif + if (!(ST3 & 0x40)) + SETF(FD_DISK_WRITABLE); + else + CLEARF(FD_DISK_WRITABLE); + } +} + +static void seek_floppy(void) +{ + int track; + + blind_seek = 0; + +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG) { + DPRINT("calling disk change from seek\n"); + } +#endif + + if (!TESTF(FD_DISK_NEWCHANGE) && + disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) { + /* the media changed flag should be cleared after the seek. + * If it isn't, this means that there is really no disk in + * the drive. + */ + SETF(FD_DISK_CHANGED); + cont->done(0); + cont->redo(); + return; + } + if (DRS->track <= NEED_1_RECAL) { + recalibrate_floppy(); + return; + } else if (TESTF(FD_DISK_NEWCHANGE) && + (raw_cmd->flags & FD_RAW_NEED_DISK) && + (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) { + /* we seek to clear the media-changed condition. Does anybody + * know a more elegant way, which works on all drives? */ + if (raw_cmd->track) + track = raw_cmd->track - 1; + else { + if (DP->flags & FD_SILENT_DCL_CLEAR) { + set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0); + blind_seek = 1; + raw_cmd->flags |= FD_RAW_NEED_SEEK; + } + track = 1; + } + } else { + check_wp(); + if (raw_cmd->track != DRS->track && + (raw_cmd->flags & FD_RAW_NEED_SEEK)) + track = raw_cmd->track; + else { + setup_rw_floppy(); + return; + } + } + + do_floppy = seek_interrupt; + output_byte(FD_SEEK); + output_byte(UNIT(current_drive)); + LAST_OUT(track); + debugt("seek command:"); +} + +static void recal_interrupt(void) +{ + debugt("recal interrupt:"); + if (inr != 2) + FDCS->reset = 1; + else if (ST0 & ST0_ECE) { + switch (DRS->track) { + case NEED_1_RECAL: + debugt("recal interrupt need 1 recal:"); + /* after a second recalibrate, we still haven't + * reached track 0. Probably no drive. Raise an + * error, as failing immediately might upset + * computers possessed by the Devil :-) */ + cont->error(); + cont->redo(); + return; + case NEED_2_RECAL: + debugt("recal interrupt need 2 recal:"); + /* If we already did a recalibrate, + * and we are not at track 0, this + * means we have moved. (The only way + * not to move at recalibration is to + * be already at track 0.) Clear the + * new change flag */ +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG) { + DPRINT + ("clearing NEWCHANGE flag because of second recalibrate\n"); + } +#endif + + CLEARF(FD_DISK_NEWCHANGE); + DRS->select_date = jiffies; + /* fall through */ + default: + debugt("recal interrupt default:"); + /* Recalibrate moves the head by at + * most 80 steps. If after one + * recalibrate we don't have reached + * track 0, this might mean that we + * started beyond track 80. Try + * again. */ + DRS->track = NEED_1_RECAL; + break; + } + } else + DRS->track = ST1; + floppy_ready(); +} + +static void print_result(char *message, int inr) +{ + int i; + + DPRINT("%s ", message); + if (inr >= 0) + for (i = 0; i < inr; i++) + printk("repl[%d]=%x ", i, reply_buffer[i]); + printk("\n"); +} + +/* interrupt handler. Note that this can be called externally on the Sparc */ +irqreturn_t floppy_interrupt(int irq, void *dev_id) +{ + int do_print; + unsigned long f; + void (*handler)(void) = do_floppy; + + lasthandler = handler; + interruptjiffies = jiffies; + + f = claim_dma_lock(); + fd_disable_dma(); + release_dma_lock(f); + + floppy_enable_hlt(); + do_floppy = NULL; + if (fdc >= N_FDC || FDCS->address == -1) { + /* we don't even know which FDC is the culprit */ + printk("DOR0=%x\n", fdc_state[0].dor); + printk("floppy interrupt on bizarre fdc %d\n", fdc); + printk("handler=%p\n", handler); + is_alive("bizarre fdc"); + return IRQ_NONE; + } + + FDCS->reset = 0; + /* We have to clear the reset flag here, because apparently on boxes + * with level triggered interrupts (PS/2, Sparc, ...), it is needed to + * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the + * emission of the SENSEI's. + * It is OK to emit floppy commands because we are in an interrupt + * handler here, and thus we have to fear no interference of other + * activity. + */ + + do_print = !handler && print_unex && !initialising; + + inr = result(); + if (do_print) + print_result("unexpected interrupt", inr); + if (inr == 0) { + int max_sensei = 4; + do { + output_byte(FD_SENSEI); + inr = result(); + if (do_print) + print_result("sensei", inr); + max_sensei--; + } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2 + && max_sensei); + } + if (!handler) { + FDCS->reset = 1; + return IRQ_NONE; + } + schedule_bh(handler); + is_alive("normal interrupt end"); + + /* FIXME! Was it really for us? */ + return IRQ_HANDLED; +} + +static void recalibrate_floppy(void) +{ + debugt("recalibrate floppy:"); + do_floppy = recal_interrupt; + output_byte(FD_RECALIBRATE); + LAST_OUT(UNIT(current_drive)); +} + +/* + * Must do 4 FD_SENSEIs after reset because of ``drive polling''. + */ +static void reset_interrupt(void) +{ + debugt("reset interrupt:"); + result(); /* get the status ready for set_fdc */ + if (FDCS->reset) { + printk("reset set in interrupt, calling %p\n", cont->error); + cont->error(); /* a reset just after a reset. BAD! */ + } + cont->redo(); +} + +/* + * reset is done by pulling bit 2 of DOR low for a while (old FDCs), + * or by setting the self clearing bit 7 of STATUS (newer FDCs) + */ +static void reset_fdc(void) +{ + unsigned long flags; + + do_floppy = reset_interrupt; + FDCS->reset = 0; + reset_fdc_info(0); + + /* Pseudo-DMA may intercept 'reset finished' interrupt. */ + /* Irrelevant for systems with true DMA (i386). */ + + flags = claim_dma_lock(); + fd_disable_dma(); + release_dma_lock(flags); + + if (FDCS->version >= FDC_82072A) + fd_outb(0x80 | (FDCS->dtr & 3), FD_STATUS); + else { + fd_outb(FDCS->dor & ~0x04, FD_DOR); + udelay(FD_RESET_DELAY); + fd_outb(FDCS->dor, FD_DOR); + } +} + +static void show_floppy(void) +{ + int i; + + printk("\n"); + printk("floppy driver state\n"); + printk("-------------------\n"); + printk("now=%lu last interrupt=%lu diff=%lu last called handler=%p\n", + jiffies, interruptjiffies, jiffies - interruptjiffies, + lasthandler); + +#ifdef FLOPPY_SANITY_CHECK + printk("timeout_message=%s\n", timeout_message); + printk("last output bytes:\n"); + for (i = 0; i < OLOGSIZE; i++) + printk("%2x %2x %lu\n", + output_log[(i + output_log_pos) % OLOGSIZE].data, + output_log[(i + output_log_pos) % OLOGSIZE].status, + output_log[(i + output_log_pos) % OLOGSIZE].jiffies); + printk("last result at %lu\n", resultjiffies); + printk("last redo_fd_request at %lu\n", lastredo); + for (i = 0; i < resultsize; i++) { + printk("%2x ", reply_buffer[i]); + } + printk("\n"); +#endif + + printk("status=%x\n", fd_inb(FD_STATUS)); + printk("fdc_busy=%lu\n", fdc_busy); + if (do_floppy) + printk("do_floppy=%p\n", do_floppy); + if (work_pending(&floppy_work)) + printk("floppy_work.func=%p\n", floppy_work.func); + if (timer_pending(&fd_timer)) + printk("fd_timer.function=%p\n", fd_timer.function); + if (timer_pending(&fd_timeout)) { + printk("timer_function=%p\n", fd_timeout.function); + printk("expires=%lu\n", fd_timeout.expires - jiffies); + printk("now=%lu\n", jiffies); + } + printk("cont=%p\n", cont); + printk("current_req=%p\n", current_req); + printk("command_status=%d\n", command_status); + printk("\n"); +} + +static void floppy_shutdown(unsigned long data) +{ + unsigned long flags; + + if (!initialising) + show_floppy(); + cancel_activity(); + + floppy_enable_hlt(); + + flags = claim_dma_lock(); + fd_disable_dma(); + release_dma_lock(flags); + + /* avoid dma going to a random drive after shutdown */ + + if (!initialising) + DPRINT("floppy timeout called\n"); + FDCS->reset = 1; + if (cont) { + cont->done(0); + cont->redo(); /* this will recall reset when needed */ + } else { + printk("no cont in shutdown!\n"); + process_fd_request(); + } + is_alive("floppy shutdown"); +} + +/* start motor, check media-changed condition and write protection */ +static int start_motor(void (*function)(void)) +{ + int mask; + int data; + + mask = 0xfc; + data = UNIT(current_drive); + if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)) { + if (!(FDCS->dor & (0x10 << UNIT(current_drive)))) { + set_debugt(); + /* no read since this drive is running */ + DRS->first_read_date = 0; + /* note motor start time if motor is not yet running */ + DRS->spinup_date = jiffies; + data |= (0x10 << UNIT(current_drive)); + } + } else if (FDCS->dor & (0x10 << UNIT(current_drive))) + mask &= ~(0x10 << UNIT(current_drive)); + + /* starts motor and selects floppy */ + del_timer(motor_off_timer + current_drive); + set_dor(fdc, mask, data); + + /* wait_for_completion also schedules reset if needed. */ + return (fd_wait_for_completion(DRS->select_date + DP->select_delay, + (timeout_fn) function)); +} + +static void floppy_ready(void) +{ + CHECK_RESET; + if (start_motor(floppy_ready)) + return; + if (fdc_dtr()) + return; + +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG) { + DPRINT("calling disk change from floppy_ready\n"); + } +#endif + if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) && + disk_change(current_drive) && !DP->select_delay) + twaddle(); /* this clears the dcl on certain drive/controller + * combinations */ + +#ifdef fd_chose_dma_mode + if ((raw_cmd->flags & FD_RAW_READ) || (raw_cmd->flags & FD_RAW_WRITE)) { + unsigned long flags = claim_dma_lock(); + fd_chose_dma_mode(raw_cmd->kernel_data, raw_cmd->length); + release_dma_lock(flags); + } +#endif + + if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)) { + perpendicular_mode(); + fdc_specify(); /* must be done here because of hut, hlt ... */ + seek_floppy(); + } else { + if ((raw_cmd->flags & FD_RAW_READ) || + (raw_cmd->flags & FD_RAW_WRITE)) + fdc_specify(); + setup_rw_floppy(); + } +} + +static void floppy_start(void) +{ + reschedule_timeout(current_reqD, "floppy start", 0); + + scandrives(); +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG) { + DPRINT("setting NEWCHANGE in floppy_start\n"); + } +#endif + SETF(FD_DISK_NEWCHANGE); + floppy_ready(); +} + +/* + * ======================================================================== + * here ends the bottom half. Exported routines are: + * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc, + * start_motor, reset_fdc, reset_fdc_info, interpret_errors. + * Initialization also uses output_byte, result, set_dor, floppy_interrupt + * and set_dor. + * ======================================================================== + */ +/* + * General purpose continuations. + * ============================== + */ + +static void do_wakeup(void) +{ + reschedule_timeout(MAXTIMEOUT, "do wakeup", 0); + cont = NULL; + command_status += 2; + wake_up(&command_done); +} + +static struct cont_t wakeup_cont = { + .interrupt = empty, + .redo = do_wakeup, + .error = empty, + .done = (done_f)empty +}; + +static struct cont_t intr_cont = { + .interrupt = empty, + .redo = process_fd_request, + .error = empty, + .done = (done_f)empty +}; + +static int wait_til_done(void (*handler)(void), int interruptible) +{ + int ret; + + schedule_bh(handler); + + if (command_status < 2 && NO_SIGNAL) { + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue(&command_done, &wait); + for (;;) { + set_current_state(interruptible ? + TASK_INTERRUPTIBLE : + TASK_UNINTERRUPTIBLE); + + if (command_status >= 2 || !NO_SIGNAL) + break; + + is_alive("wait_til_done"); + schedule(); + } + + set_current_state(TASK_RUNNING); + remove_wait_queue(&command_done, &wait); + } + + if (command_status < 2) { + cancel_activity(); + cont = &intr_cont; + reset_fdc(); + return -EINTR; + } + + if (FDCS->reset) + command_status = FD_COMMAND_ERROR; + if (command_status == FD_COMMAND_OKAY) + ret = 0; + else + ret = -EIO; + command_status = FD_COMMAND_NONE; + return ret; +} + +static void generic_done(int result) +{ + command_status = result; + cont = &wakeup_cont; +} + +static void generic_success(void) +{ + cont->done(1); +} + +static void generic_failure(void) +{ + cont->done(0); +} + +static void success_and_wakeup(void) +{ + generic_success(); + cont->redo(); +} + +/* + * formatting and rw support. + * ========================== + */ + +static int next_valid_format(void) +{ + int probed_format; + + probed_format = DRS->probed_format; + while (1) { + if (probed_format >= 8 || !DP->autodetect[probed_format]) { + DRS->probed_format = 0; + return 1; + } + if (floppy_type[DP->autodetect[probed_format]].sect) { + DRS->probed_format = probed_format; + return 0; + } + probed_format++; + } +} + +static void bad_flp_intr(void) +{ + int err_count; + + if (probing) { + DRS->probed_format++; + if (!next_valid_format()) + return; + } + err_count = ++(*errors); + INFBOUND(DRWE->badness, err_count); + if (err_count > DP->max_errors.abort) + cont->done(0); + if (err_count > DP->max_errors.reset) + FDCS->reset = 1; + else if (err_count > DP->max_errors.recal) + DRS->track = NEED_2_RECAL; +} + +static void set_floppy(int drive) +{ + int type = ITYPE(UDRS->fd_device); + + if (type) + _floppy = floppy_type + type; + else + _floppy = current_type[drive]; +} + +/* + * formatting support. + * =================== + */ +static void format_interrupt(void) +{ + switch (interpret_errors()) { + case 1: + cont->error(); + case 2: + break; + case 0: + cont->done(1); + } + cont->redo(); +} + +#define CODE2SIZE (ssize = ((1 << SIZECODE) + 3) >> 2) +#define FM_MODE(x,y) ((y) & ~(((x)->rate & 0x80) >>1)) +#define CT(x) ((x) | 0xc0) +static void setup_format_params(int track) +{ + int n; + int il; + int count; + int head_shift; + int track_shift; + struct fparm { + unsigned char track, head, sect, size; + } *here = (struct fparm *)floppy_track_buffer; + + raw_cmd = &default_raw_cmd; + raw_cmd->track = track; + + raw_cmd->flags = FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN | + FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK; + raw_cmd->rate = _floppy->rate & 0x43; + raw_cmd->cmd_count = NR_F; + COMMAND = FM_MODE(_floppy, FD_FORMAT); + DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head); + F_SIZECODE = FD_SIZECODE(_floppy); + F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE; + F_GAP = _floppy->fmt_gap; + F_FILL = FD_FILL_BYTE; + + raw_cmd->kernel_data = floppy_track_buffer; + raw_cmd->length = 4 * F_SECT_PER_TRACK; + + /* allow for about 30ms for data transport per track */ + head_shift = (F_SECT_PER_TRACK + 5) / 6; + + /* a ``cylinder'' is two tracks plus a little stepping time */ + track_shift = 2 * head_shift + 3; + + /* position of logical sector 1 on this track */ + n = (track_shift * format_req.track + head_shift * format_req.head) + % F_SECT_PER_TRACK; + + /* determine interleave */ + il = 1; + if (_floppy->fmt_gap < 0x22) + il++; + + /* initialize field */ + for (count = 0; count < F_SECT_PER_TRACK; ++count) { + here[count].track = format_req.track; + here[count].head = format_req.head; + here[count].sect = 0; + here[count].size = F_SIZECODE; + } + /* place logical sectors */ + for (count = 1; count <= F_SECT_PER_TRACK; ++count) { + here[n].sect = count; + n = (n + il) % F_SECT_PER_TRACK; + if (here[n].sect) { /* sector busy, find next free sector */ + ++n; + if (n >= F_SECT_PER_TRACK) { + n -= F_SECT_PER_TRACK; + while (here[n].sect) + ++n; + } + } + } + if (_floppy->stretch & FD_SECTBASEMASK) { + for (count = 0; count < F_SECT_PER_TRACK; count++) + here[count].sect += FD_SECTBASE(_floppy) - 1; + } +} + +static void redo_format(void) +{ + buffer_track = -1; + setup_format_params(format_req.track << STRETCH(_floppy)); + floppy_start(); + debugt("queue format request"); +} + +static struct cont_t format_cont = { + .interrupt = format_interrupt, + .redo = redo_format, + .error = bad_flp_intr, + .done = generic_done +}; + +static int do_format(int drive, struct format_descr *tmp_format_req) +{ + int ret; + + LOCK_FDC(drive, 1); + set_floppy(drive); + if (!_floppy || + _floppy->track > DP->tracks || + tmp_format_req->track >= _floppy->track || + tmp_format_req->head >= _floppy->head || + (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) || + !_floppy->fmt_gap) { + process_fd_request(); + return -EINVAL; + } + format_req = *tmp_format_req; + format_errors = 0; + cont = &format_cont; + errors = &format_errors; + IWAIT(redo_format); + process_fd_request(); + return ret; +} + +/* + * Buffer read/write and support + * ============================= + */ + +static void floppy_end_request(struct request *req, int error) +{ + unsigned int nr_sectors = current_count_sectors; + unsigned int drive = (unsigned long)req->rq_disk->private_data; + + /* current_count_sectors can be zero if transfer failed */ + if (error) + nr_sectors = req->current_nr_sectors; + if (__blk_end_request(req, error, nr_sectors << 9)) + return; + + /* We're done with the request */ + floppy_off(drive); + current_req = NULL; +} + +/* new request_done. Can handle physical sectors which are smaller than a + * logical buffer */ +static void request_done(int uptodate) +{ + struct request_queue *q = floppy_queue; + struct request *req = current_req; + unsigned long flags; + int block; + + probing = 0; + reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate); + + if (!req) { + printk("floppy.c: no request in request_done\n"); + return; + } + + if (uptodate) { + /* maintain values for invalidation on geometry + * change */ + block = current_count_sectors + req->sector; + INFBOUND(DRS->maxblock, block); + if (block > _floppy->sect) + DRS->maxtrack = 1; + + /* unlock chained buffers */ + spin_lock_irqsave(q->queue_lock, flags); + floppy_end_request(req, 0); + spin_unlock_irqrestore(q->queue_lock, flags); + } else { + if (rq_data_dir(req) == WRITE) { + /* record write error information */ + DRWE->write_errors++; + if (DRWE->write_errors == 1) { + DRWE->first_error_sector = req->sector; + DRWE->first_error_generation = DRS->generation; + } + DRWE->last_error_sector = req->sector; + DRWE->last_error_generation = DRS->generation; + } + spin_lock_irqsave(q->queue_lock, flags); + floppy_end_request(req, -EIO); + spin_unlock_irqrestore(q->queue_lock, flags); + } +} + +/* Interrupt handler evaluating the result of the r/w operation */ +static void rw_interrupt(void) +{ + int eoc; + int ssize; + int heads; + int nr_sectors; + + if (R_HEAD >= 2) { + /* some Toshiba floppy controllers occasionnally seem to + * return bogus interrupts after read/write operations, which + * can be recognized by a bad head number (>= 2) */ + return; + } + + if (!DRS->first_read_date) + DRS->first_read_date = jiffies; + + nr_sectors = 0; + CODE2SIZE; + + if (ST1 & ST1_EOC) + eoc = 1; + else + eoc = 0; + + if (COMMAND & 0x80) + heads = 2; + else + heads = 1; + + nr_sectors = (((R_TRACK - TRACK) * heads + + R_HEAD - HEAD) * SECT_PER_TRACK + + R_SECTOR - SECTOR + eoc) << SIZECODE >> 2; + +#ifdef FLOPPY_SANITY_CHECK + if (nr_sectors / ssize > + DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) { + DPRINT("long rw: %x instead of %lx\n", + nr_sectors, current_count_sectors); + printk("rs=%d s=%d\n", R_SECTOR, SECTOR); + printk("rh=%d h=%d\n", R_HEAD, HEAD); + printk("rt=%d t=%d\n", R_TRACK, TRACK); + printk("heads=%d eoc=%d\n", heads, eoc); + printk("spt=%d st=%d ss=%d\n", SECT_PER_TRACK, + fsector_t, ssize); + printk("in_sector_offset=%d\n", in_sector_offset); + } +#endif + + nr_sectors -= in_sector_offset; + INFBOUND(nr_sectors, 0); + SUPBOUND(current_count_sectors, nr_sectors); + + switch (interpret_errors()) { + case 2: + cont->redo(); + return; + case 1: + if (!current_count_sectors) { + cont->error(); + cont->redo(); + return; + } + break; + case 0: + if (!current_count_sectors) { + cont->redo(); + return; + } + current_type[current_drive] = _floppy; + floppy_sizes[TOMINOR(current_drive)] = _floppy->size; + break; + } + + if (probing) { + if (DP->flags & FTD_MSG) + DPRINT("Auto-detected floppy type %s in fd%d\n", + _floppy->name, current_drive); + current_type[current_drive] = _floppy; + floppy_sizes[TOMINOR(current_drive)] = _floppy->size; + probing = 0; + } + + if (CT(COMMAND) != FD_READ || + raw_cmd->kernel_data == current_req->buffer) { + /* transfer directly from buffer */ + cont->done(1); + } else if (CT(COMMAND) == FD_READ) { + buffer_track = raw_cmd->track; + buffer_drive = current_drive; + INFBOUND(buffer_max, nr_sectors + fsector_t); + } + cont->redo(); +} + +/* Compute maximal contiguous buffer size. */ +static int buffer_chain_size(void) +{ + struct bio_vec *bv; + int size; + struct req_iterator iter; + char *base; + + base = bio_data(current_req->bio); + size = 0; + + rq_for_each_segment(bv, current_req, iter) { + if (page_address(bv->bv_page) + bv->bv_offset != base + size) + break; + + size += bv->bv_len; + } + + return size >> 9; +} + +/* Compute the maximal transfer size */ +static int transfer_size(int ssize, int max_sector, int max_size) +{ + SUPBOUND(max_sector, fsector_t + max_size); + + /* alignment */ + max_sector -= (max_sector % _floppy->sect) % ssize; + + /* transfer size, beginning not aligned */ + current_count_sectors = max_sector - fsector_t; + + return max_sector; +} + +/* + * Move data from/to the track buffer to/from the buffer cache. + */ +static void copy_buffer(int ssize, int max_sector, int max_sector_2) +{ + int remaining; /* number of transferred 512-byte sectors */ + struct bio_vec *bv; + char *buffer; + char *dma_buffer; + int size; + struct req_iterator iter; + + max_sector = transfer_size(ssize, + min(max_sector, max_sector_2), + current_req->nr_sectors); + + if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && + buffer_max > fsector_t + current_req->nr_sectors) + current_count_sectors = min_t(int, buffer_max - fsector_t, + current_req->nr_sectors); + + remaining = current_count_sectors << 9; +#ifdef FLOPPY_SANITY_CHECK + if ((remaining >> 9) > current_req->nr_sectors && + CT(COMMAND) == FD_WRITE) { + DPRINT("in copy buffer\n"); + printk("current_count_sectors=%ld\n", current_count_sectors); + printk("remaining=%d\n", remaining >> 9); + printk("current_req->nr_sectors=%ld\n", + current_req->nr_sectors); + printk("current_req->current_nr_sectors=%u\n", + current_req->current_nr_sectors); + printk("max_sector=%d\n", max_sector); + printk("ssize=%d\n", ssize); + } +#endif + + buffer_max = max(max_sector, buffer_max); + + dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); + + size = current_req->current_nr_sectors << 9; + + rq_for_each_segment(bv, current_req, iter) { + if (!remaining) + break; + + size = bv->bv_len; + SUPBOUND(size, remaining); + + buffer = page_address(bv->bv_page) + bv->bv_offset; +#ifdef FLOPPY_SANITY_CHECK + if (dma_buffer + size > + floppy_track_buffer + (max_buffer_sectors << 10) || + dma_buffer < floppy_track_buffer) { + DPRINT("buffer overrun in copy buffer %d\n", + (int)((floppy_track_buffer - + dma_buffer) >> 9)); + printk("fsector_t=%d buffer_min=%d\n", + fsector_t, buffer_min); + printk("current_count_sectors=%ld\n", + current_count_sectors); + if (CT(COMMAND) == FD_READ) + printk("read\n"); + if (CT(COMMAND) == FD_WRITE) + printk("write\n"); + break; + } + if (((unsigned long)buffer) % 512) + DPRINT("%p buffer not aligned\n", buffer); +#endif + if (CT(COMMAND) == FD_READ) + memcpy(buffer, dma_buffer, size); + else + memcpy(dma_buffer, buffer, size); + + remaining -= size; + dma_buffer += size; + } +#ifdef FLOPPY_SANITY_CHECK + if (remaining) { + if (remaining > 0) + max_sector -= remaining >> 9; + DPRINT("weirdness: remaining %d\n", remaining >> 9); + } +#endif +} + +/* work around a bug in pseudo DMA + * (on some FDCs) pseudo DMA does not stop when the CPU stops + * sending data. Hence we need a different way to signal the + * transfer length: We use SECT_PER_TRACK. Unfortunately, this + * does not work with MT, hence we can only transfer one head at + * a time + */ +static void virtualdmabug_workaround(void) +{ + int hard_sectors; + int end_sector; + + if (CT(COMMAND) == FD_WRITE) { + COMMAND &= ~0x80; /* switch off multiple track mode */ + + hard_sectors = raw_cmd->length >> (7 + SIZECODE); + end_sector = SECTOR + hard_sectors - 1; +#ifdef FLOPPY_SANITY_CHECK + if (end_sector > SECT_PER_TRACK) { + printk("too many sectors %d > %d\n", + end_sector, SECT_PER_TRACK); + return; + } +#endif + SECT_PER_TRACK = end_sector; /* make sure SECT_PER_TRACK points + * to end of transfer */ + } +} + +/* + * Formulate a read/write request. + * this routine decides where to load the data (directly to buffer, or to + * tmp floppy area), how much data to load (the size of the buffer, the whole + * track, or a single sector) + * All floppy_track_buffer handling goes in here. If we ever add track buffer + * allocation on the fly, it should be done here. No other part should need + * modification. + */ + +static int make_raw_rw_request(void) +{ + int aligned_sector_t; + int max_sector; + int max_size; + int tracksize; + int ssize; + + if (max_buffer_sectors == 0) { + printk("VFS: Block I/O scheduled on unopened device\n"); + return 0; + } + + set_fdc((long)current_req->rq_disk->private_data); + + raw_cmd = &default_raw_cmd; + raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK | + FD_RAW_NEED_SEEK; + raw_cmd->cmd_count = NR_RW; + if (rq_data_dir(current_req) == READ) { + raw_cmd->flags |= FD_RAW_READ; + COMMAND = FM_MODE(_floppy, FD_READ); + } else if (rq_data_dir(current_req) == WRITE) { + raw_cmd->flags |= FD_RAW_WRITE; + COMMAND = FM_MODE(_floppy, FD_WRITE); + } else { + DPRINT("make_raw_rw_request: unknown command\n"); + return 0; + } + + max_sector = _floppy->sect * _floppy->head; + + TRACK = (int)current_req->sector / max_sector; + fsector_t = (int)current_req->sector % max_sector; + if (_floppy->track && TRACK >= _floppy->track) { + if (current_req->current_nr_sectors & 1) { + current_count_sectors = 1; + return 1; + } else + return 0; + } + HEAD = fsector_t / _floppy->sect; + + if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) || + TESTF(FD_NEED_TWADDLE)) && fsector_t < _floppy->sect) + max_sector = _floppy->sect; + + /* 2M disks have phantom sectors on the first track */ + if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)) { + max_sector = 2 * _floppy->sect / 3; + if (fsector_t >= max_sector) { + current_count_sectors = + min_t(int, _floppy->sect - fsector_t, + current_req->nr_sectors); + return 1; + } + SIZECODE = 2; + } else + SIZECODE = FD_SIZECODE(_floppy); + raw_cmd->rate = _floppy->rate & 0x43; + if ((_floppy->rate & FD_2M) && (TRACK || HEAD) && raw_cmd->rate == 2) + raw_cmd->rate = 1; + + if (SIZECODE) + SIZECODE2 = 0xff; + else + SIZECODE2 = 0x80; + raw_cmd->track = TRACK << STRETCH(_floppy); + DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, HEAD); + GAP = _floppy->gap; + CODE2SIZE; + SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE; + SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) + + FD_SECTBASE(_floppy); + + /* tracksize describes the size which can be filled up with sectors + * of size ssize. + */ + tracksize = _floppy->sect - _floppy->sect % ssize; + if (tracksize < _floppy->sect) { + SECT_PER_TRACK++; + if (tracksize <= fsector_t % _floppy->sect) + SECTOR--; + + /* if we are beyond tracksize, fill up using smaller sectors */ + while (tracksize <= fsector_t % _floppy->sect) { + while (tracksize + ssize > _floppy->sect) { + SIZECODE--; + ssize >>= 1; + } + SECTOR++; + SECT_PER_TRACK++; + tracksize += ssize; + } + max_sector = HEAD * _floppy->sect + tracksize; + } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing) { + max_sector = _floppy->sect; + } else if (!HEAD && CT(COMMAND) == FD_WRITE) { + /* for virtual DMA bug workaround */ + max_sector = _floppy->sect; + } + + in_sector_offset = (fsector_t % _floppy->sect) % ssize; + aligned_sector_t = fsector_t - in_sector_offset; + max_size = current_req->nr_sectors; + if ((raw_cmd->track == buffer_track) && + (current_drive == buffer_drive) && + (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { + /* data already in track buffer */ + if (CT(COMMAND) == FD_READ) { + copy_buffer(1, max_sector, buffer_max); + return 1; + } + } else if (in_sector_offset || current_req->nr_sectors < ssize) { + if (CT(COMMAND) == FD_WRITE) { + if (fsector_t + current_req->nr_sectors > ssize && + fsector_t + current_req->nr_sectors < ssize + ssize) + max_size = ssize + ssize; + else + max_size = ssize; + } + raw_cmd->flags &= ~FD_RAW_WRITE; + raw_cmd->flags |= FD_RAW_READ; + COMMAND = FM_MODE(_floppy, FD_READ); + } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) { + unsigned long dma_limit; + int direct, indirect; + + indirect = + transfer_size(ssize, max_sector, + max_buffer_sectors * 2) - fsector_t; + + /* + * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide + * on a 64 bit machine! + */ + max_size = buffer_chain_size(); + dma_limit = + (MAX_DMA_ADDRESS - + ((unsigned long)current_req->buffer)) >> 9; + if ((unsigned long)max_size > dma_limit) { + max_size = dma_limit; + } + /* 64 kb boundaries */ + if (CROSS_64KB(current_req->buffer, max_size << 9)) + max_size = (K_64 - + ((unsigned long)current_req->buffer) % + K_64) >> 9; + direct = transfer_size(ssize, max_sector, max_size) - fsector_t; + /* + * We try to read tracks, but if we get too many errors, we + * go back to reading just one sector at a time. + * + * This means we should be able to read a sector even if there + * are other bad sectors on this track. + */ + if (!direct || + (indirect * 2 > direct * 3 && + *errors < DP->max_errors.read_track && ((!probing + || (DP->read_track & (1 << DRS->probed_format)))))) { + max_size = current_req->nr_sectors; + } else { + raw_cmd->kernel_data = current_req->buffer; + raw_cmd->length = current_count_sectors << 9; + if (raw_cmd->length == 0) { + DPRINT + ("zero dma transfer attempted from make_raw_request\n"); + DPRINT("indirect=%d direct=%d fsector_t=%d", + indirect, direct, fsector_t); + return 0; + } + virtualdmabug_workaround(); + return 2; + } + } + + if (CT(COMMAND) == FD_READ) + max_size = max_sector; /* unbounded */ + + /* claim buffer track if needed */ + if (buffer_track != raw_cmd->track || /* bad track */ + buffer_drive != current_drive || /* bad drive */ + fsector_t > buffer_max || + fsector_t < buffer_min || + ((CT(COMMAND) == FD_READ || + (!in_sector_offset && current_req->nr_sectors >= ssize)) && + max_sector > 2 * max_buffer_sectors + buffer_min && + max_size + fsector_t > 2 * max_buffer_sectors + buffer_min) + /* not enough space */ + ) { + buffer_track = -1; + buffer_drive = current_drive; + buffer_max = buffer_min = aligned_sector_t; + } + raw_cmd->kernel_data = floppy_track_buffer + + ((aligned_sector_t - buffer_min) << 9); + + if (CT(COMMAND) == FD_WRITE) { + /* copy write buffer to track buffer. + * if we get here, we know that the write + * is either aligned or the data already in the buffer + * (buffer will be overwritten) */ +#ifdef FLOPPY_SANITY_CHECK + if (in_sector_offset && buffer_track == -1) + DPRINT("internal error offset !=0 on write\n"); +#endif + buffer_track = raw_cmd->track; + buffer_drive = current_drive; + copy_buffer(ssize, max_sector, + 2 * max_buffer_sectors + buffer_min); + } else + transfer_size(ssize, max_sector, + 2 * max_buffer_sectors + buffer_min - + aligned_sector_t); + + /* round up current_count_sectors to get dma xfer size */ + raw_cmd->length = in_sector_offset + current_count_sectors; + raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1; + raw_cmd->length <<= 9; +#ifdef FLOPPY_SANITY_CHECK + if ((raw_cmd->length < current_count_sectors << 9) || + (raw_cmd->kernel_data != current_req->buffer && + CT(COMMAND) == FD_WRITE && + (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || + aligned_sector_t < buffer_min)) || + raw_cmd->length % (128 << SIZECODE) || + raw_cmd->length <= 0 || current_count_sectors <= 0) { + DPRINT("fractionary current count b=%lx s=%lx\n", + raw_cmd->length, current_count_sectors); + if (raw_cmd->kernel_data != current_req->buffer) + printk("addr=%d, length=%ld\n", + (int)((raw_cmd->kernel_data - + floppy_track_buffer) >> 9), + current_count_sectors); + printk("st=%d ast=%d mse=%d msi=%d\n", + fsector_t, aligned_sector_t, max_sector, max_size); + printk("ssize=%x SIZECODE=%d\n", ssize, SIZECODE); + printk("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n", + COMMAND, SECTOR, HEAD, TRACK); + printk("buffer drive=%d\n", buffer_drive); + printk("buffer track=%d\n", buffer_track); + printk("buffer_min=%d\n", buffer_min); + printk("buffer_max=%d\n", buffer_max); + return 0; + } + + if (raw_cmd->kernel_data != current_req->buffer) { + if (raw_cmd->kernel_data < floppy_track_buffer || + current_count_sectors < 0 || + raw_cmd->length < 0 || + raw_cmd->kernel_data + raw_cmd->length > + floppy_track_buffer + (max_buffer_sectors << 10)) { + DPRINT("buffer overrun in schedule dma\n"); + printk("fsector_t=%d buffer_min=%d current_count=%ld\n", + fsector_t, buffer_min, raw_cmd->length >> 9); + printk("current_count_sectors=%ld\n", + current_count_sectors); + if (CT(COMMAND) == FD_READ) + printk("read\n"); + if (CT(COMMAND) == FD_WRITE) + printk("write\n"); + return 0; + } + } else if (raw_cmd->length > current_req->nr_sectors << 9 || + current_count_sectors > current_req->nr_sectors) { + DPRINT("buffer overrun in direct transfer\n"); + return 0; + } else if (raw_cmd->length < current_count_sectors << 9) { + DPRINT("more sectors than bytes\n"); + printk("bytes=%ld\n", raw_cmd->length >> 9); + printk("sectors=%ld\n", current_count_sectors); + } + if (raw_cmd->length == 0) { + DPRINT("zero dma transfer attempted from make_raw_request\n"); + return 0; + } +#endif + + virtualdmabug_workaround(); + return 2; +} + +static void redo_fd_request(void) +{ +#define REPEAT {request_done(0); continue; } + int drive; + int tmp; + + lastredo = jiffies; + if (current_drive < N_DRIVE) + floppy_off(current_drive); + + for (;;) { + if (!current_req) { + struct request *req; + + spin_lock_irq(floppy_queue->queue_lock); + req = elv_next_request(floppy_queue); + spin_unlock_irq(floppy_queue->queue_lock); + if (!req) { + do_floppy = NULL; + unlock_fdc(); + return; + } + current_req = req; + } + drive = (long)current_req->rq_disk->private_data; + set_fdc(drive); + reschedule_timeout(current_reqD, "redo fd request", 0); + + set_floppy(drive); + raw_cmd = &default_raw_cmd; + raw_cmd->flags = 0; + if (start_motor(redo_fd_request)) + return; + disk_change(current_drive); + if (test_bit(current_drive, &fake_change) || + TESTF(FD_DISK_CHANGED)) { + DPRINT("disk absent or changed during operation\n"); + REPEAT; + } + if (!_floppy) { /* Autodetection */ + if (!probing) { + DRS->probed_format = 0; + if (next_valid_format()) { + DPRINT("no autodetectable formats\n"); + _floppy = NULL; + REPEAT; + } + } + probing = 1; + _floppy = + floppy_type + DP->autodetect[DRS->probed_format]; + } else + probing = 0; + errors = &(current_req->errors); + tmp = make_raw_rw_request(); + if (tmp < 2) { + request_done(tmp); + continue; + } + + if (TESTF(FD_NEED_TWADDLE)) + twaddle(); + schedule_bh(floppy_start); + debugt("queue fd request"); + return; + } +#undef REPEAT +} + +static struct cont_t rw_cont = { + .interrupt = rw_interrupt, + .redo = redo_fd_request, + .error = bad_flp_intr, + .done = request_done +}; + +static void process_fd_request(void) +{ + cont = &rw_cont; + schedule_bh(redo_fd_request); +} + +static void do_fd_request(struct request_queue * q) +{ + if (max_buffer_sectors == 0) { + printk("VFS: do_fd_request called on non-open device\n"); + return; + } + + if (usage_count == 0) { + printk("warning: usage count=0, current_req=%p exiting\n", + current_req); + printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector, + current_req->cmd_type, current_req->cmd_flags); + return; + } + if (test_bit(0, &fdc_busy)) { + /* fdc busy, this new request will be treated when the + current one is done */ + is_alive("do fd request, old request running"); + return; + } + lock_fdc(MAXTIMEOUT, 0); + process_fd_request(); + is_alive("do fd request"); +} + +static struct cont_t poll_cont = { + .interrupt = success_and_wakeup, + .redo = floppy_ready, + .error = generic_failure, + .done = generic_done +}; + +static int poll_drive(int interruptible, int flag) +{ + int ret; + + /* no auto-sense, just clear dcl */ + raw_cmd = &default_raw_cmd; + raw_cmd->flags = flag; + raw_cmd->track = 0; + raw_cmd->cmd_count = 0; + cont = &poll_cont; +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG) { + DPRINT("setting NEWCHANGE in poll_drive\n"); + } +#endif + SETF(FD_DISK_NEWCHANGE); + WAIT(floppy_ready); + return ret; +} + +/* + * User triggered reset + * ==================== + */ + +static void reset_intr(void) +{ + printk("weird, reset interrupt called\n"); +} + +static struct cont_t reset_cont = { + .interrupt = reset_intr, + .redo = success_and_wakeup, + .error = generic_failure, + .done = generic_done +}; + +static int user_reset_fdc(int drive, int arg, int interruptible) +{ + int ret; + + ret = 0; + LOCK_FDC(drive, interruptible); + if (arg == FD_RESET_ALWAYS) + FDCS->reset = 1; + if (FDCS->reset) { + cont = &reset_cont; + WAIT(reset_fdc); + } + process_fd_request(); + return ret; +} + +/* + * Misc Ioctl's and support + * ======================== + */ +static inline int fd_copyout(void __user *param, const void *address, + unsigned long size) +{ + return copy_to_user(param, address, size) ? -EFAULT : 0; +} + +static inline int fd_copyin(void __user *param, void *address, unsigned long size) +{ + return copy_from_user(address, param, size) ? -EFAULT : 0; +} + +#define _COPYOUT(x) (copy_to_user((void __user *)param, &(x), sizeof(x)) ? -EFAULT : 0) +#define _COPYIN(x) (copy_from_user(&(x), (void __user *)param, sizeof(x)) ? -EFAULT : 0) + +#define COPYOUT(x) ECALL(_COPYOUT(x)) +#define COPYIN(x) ECALL(_COPYIN(x)) + +static inline const char *drive_name(int type, int drive) +{ + struct floppy_struct *floppy; + + if (type) + floppy = floppy_type + type; + else { + if (UDP->native_format) + floppy = floppy_type + UDP->native_format; + else + return "(null)"; + } + if (floppy->name) + return floppy->name; + else + return "(null)"; +} + +/* raw commands */ +static void raw_cmd_done(int flag) +{ + int i; + + if (!flag) { + raw_cmd->flags |= FD_RAW_FAILURE; + raw_cmd->flags |= FD_RAW_HARDFAILURE; + } else { + raw_cmd->reply_count = inr; + if (raw_cmd->reply_count > MAX_REPLIES) + raw_cmd->reply_count = 0; + for (i = 0; i < raw_cmd->reply_count; i++) + raw_cmd->reply[i] = reply_buffer[i]; + + if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) { + unsigned long flags; + flags = claim_dma_lock(); + raw_cmd->length = fd_get_dma_residue(); + release_dma_lock(flags); + } + + if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) && + (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0))) + raw_cmd->flags |= FD_RAW_FAILURE; + + if (disk_change(current_drive)) + raw_cmd->flags |= FD_RAW_DISK_CHANGE; + else + raw_cmd->flags &= ~FD_RAW_DISK_CHANGE; + if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER) + motor_off_callback(current_drive); + + if (raw_cmd->next && + (!(raw_cmd->flags & FD_RAW_FAILURE) || + !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) && + ((raw_cmd->flags & FD_RAW_FAILURE) || + !(raw_cmd->flags & FD_RAW_STOP_IF_SUCCESS))) { + raw_cmd = raw_cmd->next; + return; + } + } + generic_done(flag); +} + +static struct cont_t raw_cmd_cont = { + .interrupt = success_and_wakeup, + .redo = floppy_start, + .error = generic_failure, + .done = raw_cmd_done +}; + +static inline int raw_cmd_copyout(int cmd, char __user *param, + struct floppy_raw_cmd *ptr) +{ + int ret; + + while (ptr) { + COPYOUT(*ptr); + param += sizeof(struct floppy_raw_cmd); + if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) { + if (ptr->length >= 0 + && ptr->length <= ptr->buffer_length) + ECALL(fd_copyout + (ptr->data, ptr->kernel_data, + ptr->buffer_length - ptr->length)); + } + ptr = ptr->next; + } + return 0; +} + +static void raw_cmd_free(struct floppy_raw_cmd **ptr) +{ + struct floppy_raw_cmd *next; + struct floppy_raw_cmd *this; + + this = *ptr; + *ptr = NULL; + while (this) { + if (this->buffer_length) { + fd_dma_mem_free((unsigned long)this->kernel_data, + this->buffer_length); + this->buffer_length = 0; + } + next = this->next; + kfree(this); + this = next; + } +} + +static inline int raw_cmd_copyin(int cmd, char __user *param, + struct floppy_raw_cmd **rcmd) +{ + struct floppy_raw_cmd *ptr; + int ret; + int i; + + *rcmd = NULL; + while (1) { + ptr = (struct floppy_raw_cmd *) + kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER); + if (!ptr) + return -ENOMEM; + *rcmd = ptr; + COPYIN(*ptr); + ptr->next = NULL; + ptr->buffer_length = 0; + param += sizeof(struct floppy_raw_cmd); + if (ptr->cmd_count > 33) + /* the command may now also take up the space + * initially intended for the reply & the + * reply count. Needed for long 82078 commands + * such as RESTORE, which takes ... 17 command + * bytes. Murphy's law #137: When you reserve + * 16 bytes for a structure, you'll one day + * discover that you really need 17... + */ + return -EINVAL; + + for (i = 0; i < 16; i++) + ptr->reply[i] = 0; + ptr->resultcode = 0; + ptr->kernel_data = NULL; + + if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { + if (ptr->length <= 0) + return -EINVAL; + ptr->kernel_data = + (char *)fd_dma_mem_alloc(ptr->length); + fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length); + if (!ptr->kernel_data) + return -ENOMEM; + ptr->buffer_length = ptr->length; + } + if (ptr->flags & FD_RAW_WRITE) + ECALL(fd_copyin(ptr->data, ptr->kernel_data, + ptr->length)); + rcmd = &(ptr->next); + if (!(ptr->flags & FD_RAW_MORE)) + return 0; + ptr->rate &= 0x43; + } +} + +static int raw_cmd_ioctl(int cmd, void __user *param) +{ + struct floppy_raw_cmd *my_raw_cmd; + int drive; + int ret2; + int ret; + + if (FDCS->rawcmd <= 1) + FDCS->rawcmd = 1; + for (drive = 0; drive < N_DRIVE; drive++) { + if (FDC(drive) != fdc) + continue; + if (drive == current_drive) { + if (UDRS->fd_ref > 1) { + FDCS->rawcmd = 2; + break; + } + } else if (UDRS->fd_ref) { + FDCS->rawcmd = 2; + break; + } + } + + if (FDCS->reset) + return -EIO; + + ret = raw_cmd_copyin(cmd, param, &my_raw_cmd); + if (ret) { + raw_cmd_free(&my_raw_cmd); + return ret; + } + + raw_cmd = my_raw_cmd; + cont = &raw_cmd_cont; + ret = wait_til_done(floppy_start, 1); +#ifdef DCL_DEBUG + if (DP->flags & FD_DEBUG) { + DPRINT("calling disk change from raw_cmd ioctl\n"); + } +#endif + + if (ret != -EINTR && FDCS->reset) + ret = -EIO; + + DRS->track = NO_TRACK; + + ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd); + if (!ret) + ret = ret2; + raw_cmd_free(&my_raw_cmd); + return ret; +} + +static int invalidate_drive(struct block_device *bdev) +{ + /* invalidate the buffer track to force a reread */ + set_bit((long)bdev->bd_disk->private_data, &fake_change); + process_fd_request(); + check_disk_change(bdev); + return 0; +} + +static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, + int drive, int type, struct block_device *bdev) +{ + int cnt; + + /* sanity checking for parameters. */ + if (g->sect <= 0 || + g->head <= 0 || + g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) || + /* check if reserved bits are set */ + (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0) + return -EINVAL; + if (type) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + mutex_lock(&open_lock); + LOCK_FDC(drive, 1); + floppy_type[type] = *g; + floppy_type[type].name = "user format"; + for (cnt = type << 2; cnt < (type << 2) + 4; cnt++) + floppy_sizes[cnt] = floppy_sizes[cnt + 0x80] = + floppy_type[type].size + 1; + process_fd_request(); + for (cnt = 0; cnt < N_DRIVE; cnt++) { + struct block_device *bdev = opened_bdev[cnt]; + if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) + continue; + __invalidate_device(bdev); + } + mutex_unlock(&open_lock); + } else { + int oldStretch; + LOCK_FDC(drive, 1); + if (cmd != FDDEFPRM) + /* notice a disk change immediately, else + * we lose our settings immediately*/ + CALL(poll_drive(1, FD_RAW_NEED_DISK)); + oldStretch = g->stretch; + user_params[drive] = *g; + if (buffer_drive == drive) + SUPBOUND(buffer_max, user_params[drive].sect); + current_type[drive] = &user_params[drive]; + floppy_sizes[drive] = user_params[drive].size; + if (cmd == FDDEFPRM) + DRS->keep_data = -1; + else + DRS->keep_data = 1; + /* invalidation. Invalidate only when needed, i.e. + * when there are already sectors in the buffer cache + * whose number will change. This is useful, because + * mtools often changes the geometry of the disk after + * looking at the boot block */ + if (DRS->maxblock > user_params[drive].sect || + DRS->maxtrack || + ((user_params[drive].sect ^ oldStretch) & + (FD_SWAPSIDES | FD_SECTBASEMASK))) + invalidate_drive(bdev); + else + process_fd_request(); + } + return 0; +} + +/* handle obsolete ioctl's */ +static int ioctl_table[] = { + FDCLRPRM, + FDSETPRM, + FDDEFPRM, + FDGETPRM, + FDMSGON, + FDMSGOFF, + FDFMTBEG, + FDFMTTRK, + FDFMTEND, + FDSETEMSGTRESH, + FDFLUSH, + FDSETMAXERRS, + FDGETMAXERRS, + FDGETDRVTYP, + FDSETDRVPRM, + FDGETDRVPRM, + FDGETDRVSTAT, + FDPOLLDRVSTAT, + FDRESET, + FDGETFDCSTAT, + FDWERRORCLR, + FDWERRORGET, + FDRAWCMD, + FDEJECT, + FDTWADDLE +}; + +static inline int normalize_ioctl(int *cmd, int *size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ioctl_table); i++) { + if ((*cmd & 0xffff) == (ioctl_table[i] & 0xffff)) { + *size = _IOC_SIZE(*cmd); + *cmd = ioctl_table[i]; + if (*size > _IOC_SIZE(*cmd)) { + printk("ioctl not yet supported\n"); + return -EFAULT; + } + return 0; + } + } + return -EINVAL; +} + +static int get_floppy_geometry(int drive, int type, struct floppy_struct **g) +{ + if (type) + *g = &floppy_type[type]; + else { + LOCK_FDC(drive, 0); + CALL(poll_drive(0, 0)); + process_fd_request(); + *g = current_type[drive]; + } + if (!*g) + return -ENODEV; + return 0; +} + +static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + int drive = (long)bdev->bd_disk->private_data; + int type = ITYPE(drive_state[drive].fd_device); + struct floppy_struct *g; + int ret; + + ret = get_floppy_geometry(drive, type, &g); + if (ret) + return ret; + + geo->heads = g->head; + geo->sectors = g->sect; + geo->cylinders = g->track; + return 0; +} + +static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, + unsigned long param) +{ +#define FD_IOCTL_ALLOWED (mode & (FMODE_WRITE|FMODE_WRITE_IOCTL)) +#define OUT(c,x) case c: outparam = (const char *) (x); break +#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0 + + int drive = (long)bdev->bd_disk->private_data; + int type = ITYPE(UDRS->fd_device); + int i; + int ret; + int size; + union inparam { + struct floppy_struct g; /* geometry */ + struct format_descr f; + struct floppy_max_errors max_errors; + struct floppy_drive_params dp; + } inparam; /* parameters coming from user space */ + const char *outparam; /* parameters passed back to user space */ + + /* convert compatibility eject ioctls into floppy eject ioctl. + * We do this in order to provide a means to eject floppy disks before + * installing the new fdutils package */ + if (cmd == CDROMEJECT || /* CD-ROM eject */ + cmd == 0x6470 /* SunOS floppy eject */ ) { + DPRINT("obsolete eject ioctl\n"); + DPRINT("please use floppycontrol --eject\n"); + cmd = FDEJECT; + } + + /* convert the old style command into a new style command */ + if ((cmd & 0xff00) == 0x0200) { + ECALL(normalize_ioctl(&cmd, &size)); + } else + return -EINVAL; + + /* permission checks */ + if (((cmd & 0x40) && !FD_IOCTL_ALLOWED) || + ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))) + return -EPERM; + + /* copyin */ + CLEARSTRUCT(&inparam); + if (_IOC_DIR(cmd) & _IOC_WRITE) + ECALL(fd_copyin((void __user *)param, &inparam, size)) + + switch (cmd) { + case FDEJECT: + if (UDRS->fd_ref != 1) + /* somebody else has this drive open */ + return -EBUSY; + LOCK_FDC(drive, 1); + + /* do the actual eject. Fails on + * non-Sparc architectures */ + ret = fd_eject(UNIT(drive)); + + USETF(FD_DISK_CHANGED); + USETF(FD_VERIFY); + process_fd_request(); + return ret; + case FDCLRPRM: + LOCK_FDC(drive, 1); + current_type[drive] = NULL; + floppy_sizes[drive] = MAX_DISK_SIZE << 1; + UDRS->keep_data = 0; + return invalidate_drive(bdev); + case FDSETPRM: + case FDDEFPRM: + return set_geometry(cmd, &inparam.g, + drive, type, bdev); + case FDGETPRM: + ECALL(get_floppy_geometry(drive, type, + (struct floppy_struct **) + &outparam)); + break; + + case FDMSGON: + UDP->flags |= FTD_MSG; + return 0; + case FDMSGOFF: + UDP->flags &= ~FTD_MSG; + return 0; + + case FDFMTBEG: + LOCK_FDC(drive, 1); + CALL(poll_drive(1, FD_RAW_NEED_DISK)); + ret = UDRS->flags; + process_fd_request(); + if (ret & FD_VERIFY) + return -ENODEV; + if (!(ret & FD_DISK_WRITABLE)) + return -EROFS; + return 0; + case FDFMTTRK: + if (UDRS->fd_ref != 1) + return -EBUSY; + return do_format(drive, &inparam.f); + case FDFMTEND: + case FDFLUSH: + LOCK_FDC(drive, 1); + return invalidate_drive(bdev); + + case FDSETEMSGTRESH: + UDP->max_errors.reporting = + (unsigned short)(param & 0x0f); + return 0; + OUT(FDGETMAXERRS, &UDP->max_errors); + IN(FDSETMAXERRS, &UDP->max_errors, max_errors); + + case FDGETDRVTYP: + outparam = drive_name(type, drive); + SUPBOUND(size, strlen(outparam) + 1); + break; + + IN(FDSETDRVPRM, UDP, dp); + OUT(FDGETDRVPRM, UDP); + + case FDPOLLDRVSTAT: + LOCK_FDC(drive, 1); + CALL(poll_drive(1, FD_RAW_NEED_DISK)); + process_fd_request(); + /* fall through */ + OUT(FDGETDRVSTAT, UDRS); + + case FDRESET: + return user_reset_fdc(drive, (int)param, 1); + + OUT(FDGETFDCSTAT, UFDCS); + + case FDWERRORCLR: + CLEARSTRUCT(UDRWE); + return 0; + OUT(FDWERRORGET, UDRWE); + + case FDRAWCMD: + if (type) + return -EINVAL; + LOCK_FDC(drive, 1); + set_floppy(drive); + CALL(i = raw_cmd_ioctl(cmd, (void __user *)param)); + process_fd_request(); + return i; + + case FDTWADDLE: + LOCK_FDC(drive, 1); + twaddle(); + process_fd_request(); + return 0; + + default: + return -EINVAL; + } + + if (_IOC_DIR(cmd) & _IOC_READ) + return fd_copyout((void __user *)param, outparam, size); + else + return 0; +#undef OUT +#undef IN +} + +static void __init config_types(void) +{ + int first = 1; + int drive; + + /* read drive info out of physical CMOS */ + drive = 0; + if (!UDP->cmos) + UDP->cmos = FLOPPY0_TYPE; + drive = 1; + if (!UDP->cmos && FLOPPY1_TYPE) + UDP->cmos = FLOPPY1_TYPE; + + /* FIXME: additional physical CMOS drive detection should go here */ + + for (drive = 0; drive < N_DRIVE; drive++) { + unsigned int type = UDP->cmos; + struct floppy_drive_params *params; + const char *name = NULL; + static char temparea[32]; + + if (type < ARRAY_SIZE(default_drive_params)) { + params = &default_drive_params[type].params; + if (type) { + name = default_drive_params[type].name; + allowed_drive_mask |= 1 << drive; + } else + allowed_drive_mask &= ~(1 << drive); + } else { + params = &default_drive_params[0].params; + sprintf(temparea, "unknown type %d (usb?)", type); + name = temparea; + } + if (name) { + const char *prepend = ","; + if (first) { + prepend = KERN_INFO "Floppy drive(s):"; + first = 0; + } + printk("%s fd%d is %s", prepend, drive, name); + } + *UDP = *params; + } + if (!first) + printk("\n"); +} + +static int floppy_release(struct gendisk *disk, fmode_t mode) +{ + int drive = (long)disk->private_data; + + mutex_lock(&open_lock); + if (UDRS->fd_ref < 0) + UDRS->fd_ref = 0; + else if (!UDRS->fd_ref--) { + DPRINT("floppy_release with fd_ref == 0"); + UDRS->fd_ref = 0; + } + if (!UDRS->fd_ref) + opened_bdev[drive] = NULL; + mutex_unlock(&open_lock); + + return 0; +} + +/* + * floppy_open check for aliasing (/dev/fd0 can be the same as + * /dev/PS0 etc), and disallows simultaneous access to the same + * drive with different device numbers. + */ +static int floppy_open(struct block_device *bdev, fmode_t mode) +{ + int drive = (long)bdev->bd_disk->private_data; + int old_dev, new_dev; + int try; + int res = -EBUSY; + char *tmp; + + mutex_lock(&open_lock); + old_dev = UDRS->fd_device; + if (opened_bdev[drive] && opened_bdev[drive] != bdev) + goto out2; + + if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) { + USETF(FD_DISK_CHANGED); + USETF(FD_VERIFY); + } + + if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (mode & FMODE_EXCL))) + goto out2; + + if (mode & FMODE_EXCL) + UDRS->fd_ref = -1; + else + UDRS->fd_ref++; + + opened_bdev[drive] = bdev; + + res = -ENXIO; + + if (!floppy_track_buffer) { + /* if opening an ED drive, reserve a big buffer, + * else reserve a small one */ + if ((UDP->cmos == 6) || (UDP->cmos == 5)) + try = 64; /* Only 48 actually useful */ + else + try = 32; /* Only 24 actually useful */ + + tmp = (char *)fd_dma_mem_alloc(1024 * try); + if (!tmp && !floppy_track_buffer) { + try >>= 1; /* buffer only one side */ + INFBOUND(try, 16); + tmp = (char *)fd_dma_mem_alloc(1024 * try); + } + if (!tmp && !floppy_track_buffer) { + fallback_on_nodma_alloc(&tmp, 2048 * try); + } + if (!tmp && !floppy_track_buffer) { + DPRINT("Unable to allocate DMA memory\n"); + goto out; + } + if (floppy_track_buffer) { + if (tmp) + fd_dma_mem_free((unsigned long)tmp, try * 1024); + } else { + buffer_min = buffer_max = -1; + floppy_track_buffer = tmp; + max_buffer_sectors = try; + } + } + + new_dev = MINOR(bdev->bd_dev); + UDRS->fd_device = new_dev; + set_capacity(disks[drive], floppy_sizes[new_dev]); + if (old_dev != -1 && old_dev != new_dev) { + if (buffer_drive == drive) + buffer_track = -1; + } + + if (UFDCS->rawcmd == 1) + UFDCS->rawcmd = 2; + + if (!(mode & FMODE_NDELAY)) { + if (mode & (FMODE_READ|FMODE_WRITE)) { + UDRS->last_checked = 0; + check_disk_change(bdev); + if (UTESTF(FD_DISK_CHANGED)) + goto out; + } + res = -EROFS; + if ((mode & FMODE_WRITE) && !(UTESTF(FD_DISK_WRITABLE))) + goto out; + } + mutex_unlock(&open_lock); + return 0; +out: + if (UDRS->fd_ref < 0) + UDRS->fd_ref = 0; + else + UDRS->fd_ref--; + if (!UDRS->fd_ref) + opened_bdev[drive] = NULL; +out2: + mutex_unlock(&open_lock); + return res; +} + +/* + * Check if the disk has been changed or if a change has been faked. + */ +static int check_floppy_change(struct gendisk *disk) +{ + int drive = (long)disk->private_data; + + if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY)) + return 1; + + if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { + lock_fdc(drive, 0); + poll_drive(0, 0); + process_fd_request(); + } + + if (UTESTF(FD_DISK_CHANGED) || + UTESTF(FD_VERIFY) || + test_bit(drive, &fake_change) || + (!ITYPE(UDRS->fd_device) && !current_type[drive])) + return 1; + return 0; +} + +/* + * This implements "read block 0" for floppy_revalidate(). + * Needed for format autodetection, checking whether there is + * a disk in the drive, and whether that disk is writable. + */ + +static void floppy_rb0_complete(struct bio *bio, + int err) +{ + complete((struct completion *)bio->bi_private); +} + +static int __floppy_read_block_0(struct block_device *bdev) +{ + struct bio bio; + struct bio_vec bio_vec; + struct completion complete; + struct page *page; + size_t size; + + page = alloc_page(GFP_NOIO); + if (!page) { + process_fd_request(); + return -ENOMEM; + } + + size = bdev->bd_block_size; + if (!size) + size = 1024; + + bio_init(&bio); + bio.bi_io_vec = &bio_vec; + bio_vec.bv_page = page; + bio_vec.bv_len = size; + bio_vec.bv_offset = 0; + bio.bi_vcnt = 1; + bio.bi_idx = 0; + bio.bi_size = size; + bio.bi_bdev = bdev; + bio.bi_sector = 0; + init_completion(&complete); + bio.bi_private = &complete; + bio.bi_end_io = floppy_rb0_complete; + + submit_bio(READ, &bio); + generic_unplug_device(bdev_get_queue(bdev)); + process_fd_request(); + wait_for_completion(&complete); + + __free_page(page); + + return 0; +} + +/* revalidate the floppy disk, i.e. trigger format autodetection by reading + * the bootblock (block 0). "Autodetection" is also needed to check whether + * there is a disk in the drive at all... Thus we also do it for fixed + * geometry formats */ +static int floppy_revalidate(struct gendisk *disk) +{ + int drive = (long)disk->private_data; +#define NO_GEOM (!current_type[drive] && !ITYPE(UDRS->fd_device)) + int cf; + int res = 0; + + if (UTESTF(FD_DISK_CHANGED) || + UTESTF(FD_VERIFY) || test_bit(drive, &fake_change) || NO_GEOM) { + if (usage_count == 0) { + printk("VFS: revalidate called on non-open device.\n"); + return -EFAULT; + } + lock_fdc(drive, 0); + cf = UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY); + if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)) { + process_fd_request(); /*already done by another thread */ + return 0; + } + UDRS->maxblock = 0; + UDRS->maxtrack = 0; + if (buffer_drive == drive) + buffer_track = -1; + clear_bit(drive, &fake_change); + UCLEARF(FD_DISK_CHANGED); + if (cf) + UDRS->generation++; + if (NO_GEOM) { + /* auto-sensing */ + res = __floppy_read_block_0(opened_bdev[drive]); + } else { + if (cf) + poll_drive(0, FD_RAW_NEED_DISK); + process_fd_request(); + } + } + set_capacity(disk, floppy_sizes[UDRS->fd_device]); + return res; +} + +static struct block_device_operations floppy_fops = { + .owner = THIS_MODULE, + .open = floppy_open, + .release = floppy_release, + .locked_ioctl = fd_ioctl, + .getgeo = fd_getgeo, + .media_changed = check_floppy_change, + .revalidate_disk = floppy_revalidate, +}; + +/* + * Floppy Driver initialization + * ============================= + */ + +/* Determine the floppy disk controller type */ +/* This routine was written by David C. Niemi */ +static char __init get_fdc_version(void) +{ + int r; + + output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */ + if (FDCS->reset) + return FDC_NONE; + if ((r = result()) <= 0x00) + return FDC_NONE; /* No FDC present ??? */ + if ((r == 1) && (reply_buffer[0] == 0x80)) { + printk(KERN_INFO "FDC %d is an 8272A\n", fdc); + return FDC_8272A; /* 8272a/765 don't know DUMPREGS */ + } + if (r != 10) { + printk + ("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n", + fdc, r); + return FDC_UNKNOWN; + } + + if (!fdc_configure()) { + printk(KERN_INFO "FDC %d is an 82072\n", fdc); + return FDC_82072; /* 82072 doesn't know CONFIGURE */ + } + + output_byte(FD_PERPENDICULAR); + if (need_more_output() == MORE_OUTPUT) { + output_byte(0); + } else { + printk(KERN_INFO "FDC %d is an 82072A\n", fdc); + return FDC_82072A; /* 82072A as found on Sparcs. */ + } + + output_byte(FD_UNLOCK); + r = result(); + if ((r == 1) && (reply_buffer[0] == 0x80)) { + printk(KERN_INFO "FDC %d is a pre-1991 82077\n", fdc); + return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know + * LOCK/UNLOCK */ + } + if ((r != 1) || (reply_buffer[0] != 0x00)) { + printk("FDC %d init: UNLOCK: unexpected return of %d bytes.\n", + fdc, r); + return FDC_UNKNOWN; + } + output_byte(FD_PARTID); + r = result(); + if (r != 1) { + printk("FDC %d init: PARTID: unexpected return of %d bytes.\n", + fdc, r); + return FDC_UNKNOWN; + } + if (reply_buffer[0] == 0x80) { + printk(KERN_INFO "FDC %d is a post-1991 82077\n", fdc); + return FDC_82077; /* Revised 82077AA passes all the tests */ + } + switch (reply_buffer[0] >> 5) { + case 0x0: + /* Either a 82078-1 or a 82078SL running at 5Volt */ + printk(KERN_INFO "FDC %d is an 82078.\n", fdc); + return FDC_82078; + case 0x1: + printk(KERN_INFO "FDC %d is a 44pin 82078\n", fdc); + return FDC_82078; + case 0x2: + printk(KERN_INFO "FDC %d is a S82078B\n", fdc); + return FDC_S82078B; + case 0x3: + printk(KERN_INFO "FDC %d is a National Semiconductor PC87306\n", + fdc); + return FDC_87306; + default: + printk(KERN_INFO + "FDC %d init: 82078 variant with unknown PARTID=%d.\n", + fdc, reply_buffer[0] >> 5); + return FDC_82078_UNKN; + } +} /* get_fdc_version */ + +/* lilo configuration */ + +static void __init floppy_set_flags(int *ints, int param, int param2) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) { + if (param) + default_drive_params[i].params.flags |= param2; + else + default_drive_params[i].params.flags &= ~param2; + } + DPRINT("%s flag 0x%x\n", param2 ? "Setting" : "Clearing", param); +} + +static void __init daring(int *ints, int param, int param2) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) { + if (param) { + default_drive_params[i].params.select_delay = 0; + default_drive_params[i].params.flags |= + FD_SILENT_DCL_CLEAR; + } else { + default_drive_params[i].params.select_delay = + 2 * HZ / 100; + default_drive_params[i].params.flags &= + ~FD_SILENT_DCL_CLEAR; + } + } + DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken"); +} + +static void __init set_cmos(int *ints, int dummy, int dummy2) +{ + int current_drive = 0; + + if (ints[0] != 2) { + DPRINT("wrong number of parameters for CMOS\n"); + return; + } + current_drive = ints[1]; + if (current_drive < 0 || current_drive >= 8) { + DPRINT("bad drive for set_cmos\n"); + return; + } +#if N_FDC > 1 + if (current_drive >= 4 && !FDC2) + FDC2 = 0x370; +#endif + DP->cmos = ints[2]; + DPRINT("setting CMOS code to %d\n", ints[2]); +} + +static struct param_table { + const char *name; + void (*fn) (int *ints, int param, int param2); + int *var; + int def_param; + int param2; +} config_params[] __initdata = { + {"allowed_drive_mask", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */ + {"all_drives", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */ + {"asus_pci", NULL, &allowed_drive_mask, 0x33, 0}, + {"irq", NULL, &FLOPPY_IRQ, 6, 0}, + {"dma", NULL, &FLOPPY_DMA, 2, 0}, + {"daring", daring, NULL, 1, 0}, +#if N_FDC > 1 + {"two_fdc", NULL, &FDC2, 0x370, 0}, + {"one_fdc", NULL, &FDC2, 0, 0}, +#endif + {"thinkpad", floppy_set_flags, NULL, 1, FD_INVERTED_DCL}, + {"broken_dcl", floppy_set_flags, NULL, 1, FD_BROKEN_DCL}, + {"messages", floppy_set_flags, NULL, 1, FTD_MSG}, + {"silent_dcl_clear", floppy_set_flags, NULL, 1, FD_SILENT_DCL_CLEAR}, + {"debug", floppy_set_flags, NULL, 1, FD_DEBUG}, + {"nodma", NULL, &can_use_virtual_dma, 1, 0}, + {"omnibook", NULL, &can_use_virtual_dma, 1, 0}, + {"yesdma", NULL, &can_use_virtual_dma, 0, 0}, + {"fifo_depth", NULL, &fifo_depth, 0xa, 0}, + {"nofifo", NULL, &no_fifo, 0x20, 0}, + {"usefifo", NULL, &no_fifo, 0, 0}, + {"cmos", set_cmos, NULL, 0, 0}, + {"slow", NULL, &slow_floppy, 1, 0}, + {"unexpected_interrupts", NULL, &print_unex, 1, 0}, + {"no_unexpected_interrupts", NULL, &print_unex, 0, 0}, + {"L40SX", NULL, &print_unex, 0, 0} + + EXTRA_FLOPPY_PARAMS +}; + +static int __init floppy_setup(char *str) +{ + int i; + int param; + int ints[11]; + + str = get_options(str, ARRAY_SIZE(ints), ints); + if (str) { + for (i = 0; i < ARRAY_SIZE(config_params); i++) { + if (strcmp(str, config_params[i].name) == 0) { + if (ints[0]) + param = ints[1]; + else + param = config_params[i].def_param; + if (config_params[i].fn) + config_params[i]. + fn(ints, param, + config_params[i].param2); + if (config_params[i].var) { + DPRINT("%s=%d\n", str, param); + *config_params[i].var = param; + } + return 1; + } + } + } + if (str) { + DPRINT("unknown floppy option [%s]\n", str); + + DPRINT("allowed options are:"); + for (i = 0; i < ARRAY_SIZE(config_params); i++) + printk(" %s", config_params[i].name); + printk("\n"); + } else + DPRINT("botched floppy option\n"); + DPRINT("Read Documentation/blockdev/floppy.txt\n"); + return 0; +} + +static int have_no_fdc = -ENODEV; + +static ssize_t floppy_cmos_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *p; + int drive; + + p = container_of(dev, struct platform_device,dev); + drive = p->id; + return sprintf(buf, "%X\n", UDP->cmos); +} +DEVICE_ATTR(cmos,S_IRUGO,floppy_cmos_show,NULL); + +static void floppy_device_release(struct device *dev) +{ +} + +static struct platform_device floppy_device[N_DRIVE]; + +static struct kobject *floppy_find(dev_t dev, int *part, void *data) +{ + int drive = (*part & 3) | ((*part & 0x80) >> 5); + if (drive >= N_DRIVE || + !(allowed_drive_mask & (1 << drive)) || + fdc_state[FDC(drive)].version == FDC_NONE) + return NULL; + if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type)) + return NULL; + *part = 0; + return get_disk(disks[drive]); +} + +static int __init floppy_init(void) +{ + int i, unit, drive; + int err, dr; + +#if defined(CONFIG_PPC) + if (check_legacy_ioport(FDC1)) + return -ENODEV; +#endif + + raw_cmd = NULL; + + for (dr = 0; dr < N_DRIVE; dr++) { + disks[dr] = alloc_disk(1); + if (!disks[dr]) { + err = -ENOMEM; + goto out_put_disk; + } + + disks[dr]->major = FLOPPY_MAJOR; + disks[dr]->first_minor = TOMINOR(dr); + disks[dr]->fops = &floppy_fops; + sprintf(disks[dr]->disk_name, "fd%d", dr); + + init_timer(&motor_off_timer[dr]); + motor_off_timer[dr].data = dr; + motor_off_timer[dr].function = motor_off_callback; + } + + err = register_blkdev(FLOPPY_MAJOR, "fd"); + if (err) + goto out_put_disk; + + floppy_queue = blk_init_queue(do_fd_request, &floppy_lock); + if (!floppy_queue) { + err = -ENOMEM; + goto out_unreg_blkdev; + } + blk_queue_max_sectors(floppy_queue, 64); + + blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, + floppy_find, NULL, NULL); + + for (i = 0; i < 256; i++) + if (ITYPE(i)) + floppy_sizes[i] = floppy_type[ITYPE(i)].size; + else + floppy_sizes[i] = MAX_DISK_SIZE << 1; + + reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT); + config_types(); + + for (i = 0; i < N_FDC; i++) { + fdc = i; + CLEARSTRUCT(FDCS); + FDCS->dtr = -1; + FDCS->dor = 0x4; +#if defined(__sparc__) || defined(__mc68000__) + /*sparcs/sun3x don't have a DOR reset which we can fall back on to */ +#ifdef __mc68000__ + if (MACH_IS_SUN3X) +#endif + FDCS->version = FDC_82072A; +#endif + } + + use_virtual_dma = can_use_virtual_dma & 1; + fdc_state[0].address = FDC1; + if (fdc_state[0].address == -1) { + del_timer(&fd_timeout); + err = -ENODEV; + goto out_unreg_region; + } +#if N_FDC > 1 + fdc_state[1].address = FDC2; +#endif + + fdc = 0; /* reset fdc in case of unexpected interrupt */ + err = floppy_grab_irq_and_dma(); + if (err) { + del_timer(&fd_timeout); + err = -EBUSY; + goto out_unreg_region; + } + + /* initialise drive state */ + for (drive = 0; drive < N_DRIVE; drive++) { + CLEARSTRUCT(UDRS); + CLEARSTRUCT(UDRWE); + USETF(FD_DISK_NEWCHANGE); + USETF(FD_DISK_CHANGED); + USETF(FD_VERIFY); + UDRS->fd_device = -1; + floppy_track_buffer = NULL; + max_buffer_sectors = 0; + } + /* + * Small 10 msec delay to let through any interrupt that + * initialization might have triggered, to not + * confuse detection: + */ + msleep(10); + + for (i = 0; i < N_FDC; i++) { + fdc = i; + FDCS->driver_version = FD_DRIVER_VERSION; + for (unit = 0; unit < 4; unit++) + FDCS->track[unit] = 0; + if (FDCS->address == -1) + continue; + FDCS->rawcmd = 2; + if (user_reset_fdc(-1, FD_RESET_ALWAYS, 0)) { + /* free ioports reserved by floppy_grab_irq_and_dma() */ + release_region(FDCS->address + 2, 4); + release_region(FDCS->address + 7, 1); + FDCS->address = -1; + FDCS->version = FDC_NONE; + continue; + } + /* Try to determine the floppy controller type */ + FDCS->version = get_fdc_version(); + if (FDCS->version == FDC_NONE) { + /* free ioports reserved by floppy_grab_irq_and_dma() */ + release_region(FDCS->address + 2, 4); + release_region(FDCS->address + 7, 1); + FDCS->address = -1; + continue; + } + if (can_use_virtual_dma == 2 && FDCS->version < FDC_82072A) + can_use_virtual_dma = 0; + + have_no_fdc = 0; + /* Not all FDCs seem to be able to handle the version command + * properly, so force a reset for the standard FDC clones, + * to avoid interrupt garbage. + */ + user_reset_fdc(-1, FD_RESET_ALWAYS, 0); + } + fdc = 0; + del_timer(&fd_timeout); + current_drive = 0; + initialising = 0; + if (have_no_fdc) { + DPRINT("no floppy controllers found\n"); + err = have_no_fdc; + goto out_flush_work; + } + + for (drive = 0; drive < N_DRIVE; drive++) { + if (!(allowed_drive_mask & (1 << drive))) + continue; + if (fdc_state[FDC(drive)].version == FDC_NONE) + continue; + + floppy_device[drive].name = floppy_device_name; + floppy_device[drive].id = drive; + floppy_device[drive].dev.release = floppy_device_release; + + err = platform_device_register(&floppy_device[drive]); + if (err) + goto out_flush_work; + + err = device_create_file(&floppy_device[drive].dev,&dev_attr_cmos); + if (err) + goto out_unreg_platform_dev; + + /* to be cleaned up... */ + disks[drive]->private_data = (void *)(long)drive; + disks[drive]->queue = floppy_queue; + disks[drive]->flags |= GENHD_FL_REMOVABLE; + disks[drive]->driverfs_dev = &floppy_device[drive].dev; + add_disk(disks[drive]); + } + + return 0; + +out_unreg_platform_dev: + platform_device_unregister(&floppy_device[drive]); +out_flush_work: + flush_scheduled_work(); + if (usage_count) + floppy_release_irq_and_dma(); +out_unreg_region: + blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); + blk_cleanup_queue(floppy_queue); +out_unreg_blkdev: + unregister_blkdev(FLOPPY_MAJOR, "fd"); +out_put_disk: + while (dr--) { + del_timer(&motor_off_timer[dr]); + put_disk(disks[dr]); + } + return err; +} + +static DEFINE_SPINLOCK(floppy_usage_lock); + +static int floppy_grab_irq_and_dma(void) +{ + unsigned long flags; + + spin_lock_irqsave(&floppy_usage_lock, flags); + if (usage_count++) { + spin_unlock_irqrestore(&floppy_usage_lock, flags); + return 0; + } + spin_unlock_irqrestore(&floppy_usage_lock, flags); + + /* + * We might have scheduled a free_irq(), wait it to + * drain first: + */ + flush_scheduled_work(); + + if (fd_request_irq()) { + DPRINT("Unable to grab IRQ%d for the floppy driver\n", + FLOPPY_IRQ); + spin_lock_irqsave(&floppy_usage_lock, flags); + usage_count--; + spin_unlock_irqrestore(&floppy_usage_lock, flags); + return -1; + } + if (fd_request_dma()) { + DPRINT("Unable to grab DMA%d for the floppy driver\n", + FLOPPY_DMA); + if (can_use_virtual_dma & 2) + use_virtual_dma = can_use_virtual_dma = 1; + if (!(can_use_virtual_dma & 1)) { + fd_free_irq(); + spin_lock_irqsave(&floppy_usage_lock, flags); + usage_count--; + spin_unlock_irqrestore(&floppy_usage_lock, flags); + return -1; + } + } + + for (fdc = 0; fdc < N_FDC; fdc++) { + if (FDCS->address != -1) { + if (!request_region(FDCS->address + 2, 4, "floppy")) { + DPRINT("Floppy io-port 0x%04lx in use\n", + FDCS->address + 2); + goto cleanup1; + } + if (!request_region(FDCS->address + 7, 1, "floppy DIR")) { + DPRINT("Floppy io-port 0x%04lx in use\n", + FDCS->address + 7); + goto cleanup2; + } + /* address + 6 is reserved, and may be taken by IDE. + * Unfortunately, Adaptec doesn't know this :-(, */ + } + } + for (fdc = 0; fdc < N_FDC; fdc++) { + if (FDCS->address != -1) { + reset_fdc_info(1); + fd_outb(FDCS->dor, FD_DOR); + } + } + fdc = 0; + set_dor(0, ~0, 8); /* avoid immediate interrupt */ + + for (fdc = 0; fdc < N_FDC; fdc++) + if (FDCS->address != -1) + fd_outb(FDCS->dor, FD_DOR); + /* + * The driver will try and free resources and relies on us + * to know if they were allocated or not. + */ + fdc = 0; + irqdma_allocated = 1; + return 0; +cleanup2: + release_region(FDCS->address + 2, 4); +cleanup1: + fd_free_irq(); + fd_free_dma(); + while (--fdc >= 0) { + release_region(FDCS->address + 2, 4); + release_region(FDCS->address + 7, 1); + } + spin_lock_irqsave(&floppy_usage_lock, flags); + usage_count--; + spin_unlock_irqrestore(&floppy_usage_lock, flags); + return -1; +} + +static void floppy_release_irq_and_dma(void) +{ + int old_fdc; +#ifdef FLOPPY_SANITY_CHECK +#ifndef __sparc__ + int drive; +#endif +#endif + long tmpsize; + unsigned long tmpaddr; + unsigned long flags; + + spin_lock_irqsave(&floppy_usage_lock, flags); + if (--usage_count) { + spin_unlock_irqrestore(&floppy_usage_lock, flags); + return; + } + spin_unlock_irqrestore(&floppy_usage_lock, flags); + if (irqdma_allocated) { + fd_disable_dma(); + fd_free_dma(); + fd_free_irq(); + irqdma_allocated = 0; + } + set_dor(0, ~0, 8); +#if N_FDC > 1 + set_dor(1, ~8, 0); +#endif + floppy_enable_hlt(); + + if (floppy_track_buffer && max_buffer_sectors) { + tmpsize = max_buffer_sectors * 1024; + tmpaddr = (unsigned long)floppy_track_buffer; + floppy_track_buffer = NULL; + max_buffer_sectors = 0; + buffer_min = buffer_max = -1; + fd_dma_mem_free(tmpaddr, tmpsize); + } +#ifdef FLOPPY_SANITY_CHECK +#ifndef __sparc__ + for (drive = 0; drive < N_FDC * 4; drive++) + if (timer_pending(motor_off_timer + drive)) + printk("motor off timer %d still active\n", drive); +#endif + + if (timer_pending(&fd_timeout)) + printk("floppy timer still active:%s\n", timeout_message); + if (timer_pending(&fd_timer)) + printk("auxiliary floppy timer still active\n"); + if (work_pending(&floppy_work)) + printk("work still pending\n"); +#endif + old_fdc = fdc; + for (fdc = 0; fdc < N_FDC; fdc++) + if (FDCS->address != -1) { + release_region(FDCS->address + 2, 4); + release_region(FDCS->address + 7, 1); + } + fdc = old_fdc; +} + +#ifdef MODULE + +static char *floppy; + +static void __init parse_floppy_cfg_string(char *cfg) +{ + char *ptr; + + while (*cfg) { + for (ptr = cfg; *cfg && *cfg != ' ' && *cfg != '\t'; cfg++) ; + if (*cfg) { + *cfg = '\0'; + cfg++; + } + if (*ptr) + floppy_setup(ptr); + } +} + +static int __init floppy_module_init(void) +{ + if (floppy) + parse_floppy_cfg_string(floppy); + return floppy_init(); +} +module_init(floppy_module_init); + +static void __exit floppy_module_exit(void) +{ + int drive; + + blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); + unregister_blkdev(FLOPPY_MAJOR, "fd"); + + for (drive = 0; drive < N_DRIVE; drive++) { + del_timer_sync(&motor_off_timer[drive]); + + if ((allowed_drive_mask & (1 << drive)) && + fdc_state[FDC(drive)].version != FDC_NONE) { + del_gendisk(disks[drive]); + device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); + platform_device_unregister(&floppy_device[drive]); + } + put_disk(disks[drive]); + } + + del_timer_sync(&fd_timeout); + del_timer_sync(&fd_timer); + blk_cleanup_queue(floppy_queue); + + if (usage_count) + floppy_release_irq_and_dma(); + + /* eject disk, if any */ + fd_eject(0); +} +module_exit(floppy_module_exit); + +module_param(floppy, charp, 0); +module_param(FLOPPY_IRQ, int, 0); +module_param(FLOPPY_DMA, int, 0); +MODULE_AUTHOR("Alain L. Knaff"); +MODULE_SUPPORTED_DEVICE("fd"); +MODULE_LICENSE("GPL"); + +#else + +__setup("floppy=", floppy_setup); +module_init(floppy_init) +#endif + +MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR); diff --git a/drivers/block/hd.c b/drivers/block/hd.c new file mode 100644 index 0000000..482c0c4 --- /dev/null +++ b/drivers/block/hd.c @@ -0,0 +1,815 @@ +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * + * This is the low-level hd interrupt support. It traverses the + * request-list, using interrupts to jump between functions. As + * all the functions are called within interrupts, we may not + * sleep. Special care is recommended. + * + * modified by Drew Eckhardt to check nr of hd's from the CMOS. + * + * Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug + * in the early extended-partition checks and added DM partitions + * + * IRQ-unmask, drive-id, multiple-mode, support for ">16 heads", + * and general streamlining by Mark Lord. + * + * Removed 99% of above. Use Mark's ide driver for those options. + * This is now a lightweight ST-506 driver. (Paul Gortmaker) + * + * Modified 1995 Russell King for ARM processor. + * + * Bugfix: max_sectors must be <= 255 or the wheels tend to come + * off in a hurry once you queue things up - Paul G. 02/2001 + */ + +/* Uncomment the following if you want verbose error reports. */ +/* #define VERBOSE_ERRORS */ + +#include <linux/blkdev.h> +#include <linux/errno.h> +#include <linux/signal.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/genhd.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/ioport.h> +#include <linux/init.h> +#include <linux/blkpg.h> +#include <linux/ata.h> +#include <linux/hdreg.h> + +#define REALLY_SLOW_IO +#include <asm/system.h> +#include <asm/io.h> +#include <asm/uaccess.h> + +#ifdef __arm__ +#undef HD_IRQ +#endif +#include <asm/irq.h> +#ifdef __arm__ +#define HD_IRQ IRQ_HARDDISK +#endif + +/* Hd controller regster ports */ + +#define HD_DATA 0x1f0 /* _CTL when writing */ +#define HD_ERROR 0x1f1 /* see err-bits */ +#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */ +#define HD_SECTOR 0x1f3 /* starting sector */ +#define HD_LCYL 0x1f4 /* starting cylinder */ +#define HD_HCYL 0x1f5 /* high byte of starting cyl */ +#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */ +#define HD_STATUS 0x1f7 /* see status-bits */ +#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */ +#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */ +#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */ + +#define HD_CMD 0x3f6 /* used for resets */ +#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */ + +/* Bits of HD_STATUS */ +#define ERR_STAT 0x01 +#define INDEX_STAT 0x02 +#define ECC_STAT 0x04 /* Corrected error */ +#define DRQ_STAT 0x08 +#define SEEK_STAT 0x10 +#define SERVICE_STAT SEEK_STAT +#define WRERR_STAT 0x20 +#define READY_STAT 0x40 +#define BUSY_STAT 0x80 + +/* Bits for HD_ERROR */ +#define MARK_ERR 0x01 /* Bad address mark */ +#define TRK0_ERR 0x02 /* couldn't find track 0 */ +#define ABRT_ERR 0x04 /* Command aborted */ +#define MCR_ERR 0x08 /* media change request */ +#define ID_ERR 0x10 /* ID field not found */ +#define MC_ERR 0x20 /* media changed */ +#define ECC_ERR 0x40 /* Uncorrectable ECC error */ +#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ +#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ + +static DEFINE_SPINLOCK(hd_lock); +static struct request_queue *hd_queue; + +#define MAJOR_NR HD_MAJOR +#define QUEUE (hd_queue) +#define CURRENT elv_next_request(hd_queue) + +#define TIMEOUT_VALUE (6*HZ) +#define HD_DELAY 0 + +#define MAX_ERRORS 16 /* Max read/write errors/sector */ +#define RESET_FREQ 8 /* Reset controller every 8th retry */ +#define RECAL_FREQ 4 /* Recalibrate every 4th retry */ +#define MAX_HD 2 + +#define STAT_OK (READY_STAT|SEEK_STAT) +#define OK_STATUS(s) (((s)&(STAT_OK|(BUSY_STAT|WRERR_STAT|ERR_STAT)))==STAT_OK) + +static void recal_intr(void); +static void bad_rw_intr(void); + +static int reset; +static int hd_error; + +/* + * This struct defines the HD's and their types. + */ +struct hd_i_struct { + unsigned int head, sect, cyl, wpcom, lzone, ctl; + int unit; + int recalibrate; + int special_op; +}; + +#ifdef HD_TYPE +static struct hd_i_struct hd_info[] = { HD_TYPE }; +static int NR_HD = ARRAY_SIZE(hd_info); +#else +static struct hd_i_struct hd_info[MAX_HD]; +static int NR_HD; +#endif + +static struct gendisk *hd_gendisk[MAX_HD]; + +static struct timer_list device_timer; + +#define TIMEOUT_VALUE (6*HZ) + +#define SET_TIMER \ + do { \ + mod_timer(&device_timer, jiffies + TIMEOUT_VALUE); \ + } while (0) + +static void (*do_hd)(void) = NULL; +#define SET_HANDLER(x) \ +if ((do_hd = (x)) != NULL) \ + SET_TIMER; \ +else \ + del_timer(&device_timer); + + +#if (HD_DELAY > 0) + +#include <asm/i8253.h> + +unsigned long last_req; + +unsigned long read_timer(void) +{ + unsigned long t, flags; + int i; + + spin_lock_irqsave(&i8253_lock, flags); + t = jiffies * 11932; + outb_p(0, 0x43); + i = inb_p(0x40); + i |= inb(0x40) << 8; + spin_unlock_irqrestore(&i8253_lock, flags); + return(t - i); +} +#endif + +static void __init hd_setup(char *str, int *ints) +{ + int hdind = 0; + + if (ints[0] != 3) + return; + if (hd_info[0].head != 0) + hdind = 1; + hd_info[hdind].head = ints[2]; + hd_info[hdind].sect = ints[3]; + hd_info[hdind].cyl = ints[1]; + hd_info[hdind].wpcom = 0; + hd_info[hdind].lzone = ints[1]; + hd_info[hdind].ctl = (ints[2] > 8 ? 8 : 0); + NR_HD = hdind+1; +} + +static void dump_status(const char *msg, unsigned int stat) +{ + char *name = "hd?"; + if (CURRENT) + name = CURRENT->rq_disk->disk_name; + +#ifdef VERBOSE_ERRORS + printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff); + if (stat & BUSY_STAT) printk("Busy "); + if (stat & READY_STAT) printk("DriveReady "); + if (stat & WRERR_STAT) printk("WriteFault "); + if (stat & SEEK_STAT) printk("SeekComplete "); + if (stat & DRQ_STAT) printk("DataRequest "); + if (stat & ECC_STAT) printk("CorrectedError "); + if (stat & INDEX_STAT) printk("Index "); + if (stat & ERR_STAT) printk("Error "); + printk("}\n"); + if ((stat & ERR_STAT) == 0) { + hd_error = 0; + } else { + hd_error = inb(HD_ERROR); + printk("%s: %s: error=0x%02x { ", name, msg, hd_error & 0xff); + if (hd_error & BBD_ERR) printk("BadSector "); + if (hd_error & ECC_ERR) printk("UncorrectableError "); + if (hd_error & ID_ERR) printk("SectorIdNotFound "); + if (hd_error & ABRT_ERR) printk("DriveStatusError "); + if (hd_error & TRK0_ERR) printk("TrackZeroNotFound "); + if (hd_error & MARK_ERR) printk("AddrMarkNotFound "); + printk("}"); + if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) { + printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL), + inb(HD_CURRENT) & 0xf, inb(HD_SECTOR)); + if (CURRENT) + printk(", sector=%ld", CURRENT->sector); + } + printk("\n"); + } +#else + printk("%s: %s: status=0x%02x.\n", name, msg, stat & 0xff); + if ((stat & ERR_STAT) == 0) { + hd_error = 0; + } else { + hd_error = inb(HD_ERROR); + printk("%s: %s: error=0x%02x.\n", name, msg, hd_error & 0xff); + } +#endif +} + +static void check_status(void) +{ + int i = inb_p(HD_STATUS); + + if (!OK_STATUS(i)) { + dump_status("check_status", i); + bad_rw_intr(); + } +} + +static int controller_busy(void) +{ + int retries = 100000; + unsigned char status; + + do { + status = inb_p(HD_STATUS); + } while ((status & BUSY_STAT) && --retries); + return status; +} + +static int status_ok(void) +{ + unsigned char status = inb_p(HD_STATUS); + + if (status & BUSY_STAT) + return 1; /* Ancient, but does it make sense??? */ + if (status & WRERR_STAT) + return 0; + if (!(status & READY_STAT)) + return 0; + if (!(status & SEEK_STAT)) + return 0; + return 1; +} + +static int controller_ready(unsigned int drive, unsigned int head) +{ + int retry = 100; + + do { + if (controller_busy() & BUSY_STAT) + return 0; + outb_p(0xA0 | (drive<<4) | head, HD_CURRENT); + if (status_ok()) + return 1; + } while (--retry); + return 0; +} + +static void hd_out(struct hd_i_struct *disk, + unsigned int nsect, + unsigned int sect, + unsigned int head, + unsigned int cyl, + unsigned int cmd, + void (*intr_addr)(void)) +{ + unsigned short port; + +#if (HD_DELAY > 0) + while (read_timer() - last_req < HD_DELAY) + /* nothing */; +#endif + if (reset) + return; + if (!controller_ready(disk->unit, head)) { + reset = 1; + return; + } + SET_HANDLER(intr_addr); + outb_p(disk->ctl, HD_CMD); + port = HD_DATA; + outb_p(disk->wpcom >> 2, ++port); + outb_p(nsect, ++port); + outb_p(sect, ++port); + outb_p(cyl, ++port); + outb_p(cyl >> 8, ++port); + outb_p(0xA0 | (disk->unit << 4) | head, ++port); + outb_p(cmd, ++port); +} + +static void hd_request (void); + +static int drive_busy(void) +{ + unsigned int i; + unsigned char c; + + for (i = 0; i < 500000 ; i++) { + c = inb_p(HD_STATUS); + if ((c & (BUSY_STAT | READY_STAT | SEEK_STAT)) == STAT_OK) + return 0; + } + dump_status("reset timed out", c); + return 1; +} + +static void reset_controller(void) +{ + int i; + + outb_p(4, HD_CMD); + for (i = 0; i < 1000; i++) barrier(); + outb_p(hd_info[0].ctl & 0x0f, HD_CMD); + for (i = 0; i < 1000; i++) barrier(); + if (drive_busy()) + printk("hd: controller still busy\n"); + else if ((hd_error = inb(HD_ERROR)) != 1) + printk("hd: controller reset failed: %02x\n", hd_error); +} + +static void reset_hd(void) +{ + static int i; + +repeat: + if (reset) { + reset = 0; + i = -1; + reset_controller(); + } else { + check_status(); + if (reset) + goto repeat; + } + if (++i < NR_HD) { + struct hd_i_struct *disk = &hd_info[i]; + disk->special_op = disk->recalibrate = 1; + hd_out(disk, disk->sect, disk->sect, disk->head-1, + disk->cyl, ATA_CMD_INIT_DEV_PARAMS, &reset_hd); + if (reset) + goto repeat; + } else + hd_request(); +} + +/* + * Ok, don't know what to do with the unexpected interrupts: on some machines + * doing a reset and a retry seems to result in an eternal loop. Right now I + * ignore it, and just set the timeout. + * + * On laptops (and "green" PCs), an unexpected interrupt occurs whenever the + * drive enters "idle", "standby", or "sleep" mode, so if the status looks + * "good", we just ignore the interrupt completely. + */ +static void unexpected_hd_interrupt(void) +{ + unsigned int stat = inb_p(HD_STATUS); + + if (stat & (BUSY_STAT|DRQ_STAT|ECC_STAT|ERR_STAT)) { + dump_status("unexpected interrupt", stat); + SET_TIMER; + } +} + +/* + * bad_rw_intr() now tries to be a bit smarter and does things + * according to the error returned by the controller. + * -Mika Liljeberg (liljeber@cs.Helsinki.FI) + */ +static void bad_rw_intr(void) +{ + struct request *req = CURRENT; + if (req != NULL) { + struct hd_i_struct *disk = req->rq_disk->private_data; + if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) { + end_request(req, 0); + disk->special_op = disk->recalibrate = 1; + } else if (req->errors % RESET_FREQ == 0) + reset = 1; + else if ((hd_error & TRK0_ERR) || req->errors % RECAL_FREQ == 0) + disk->special_op = disk->recalibrate = 1; + /* Otherwise just retry */ + } +} + +static inline int wait_DRQ(void) +{ + int retries; + int stat; + + for (retries = 0; retries < 100000; retries++) { + stat = inb_p(HD_STATUS); + if (stat & DRQ_STAT) + return 0; + } + dump_status("wait_DRQ", stat); + return -1; +} + +static void read_intr(void) +{ + struct request *req; + int i, retries = 100000; + + do { + i = (unsigned) inb_p(HD_STATUS); + if (i & BUSY_STAT) + continue; + if (!OK_STATUS(i)) + break; + if (i & DRQ_STAT) + goto ok_to_read; + } while (--retries > 0); + dump_status("read_intr", i); + bad_rw_intr(); + hd_request(); + return; +ok_to_read: + req = CURRENT; + insw(HD_DATA, req->buffer, 256); + req->sector++; + req->buffer += 512; + req->errors = 0; + i = --req->nr_sectors; + --req->current_nr_sectors; +#ifdef DEBUG + printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n", + req->rq_disk->disk_name, req->sector, req->nr_sectors, + req->buffer+512); +#endif + if (req->current_nr_sectors <= 0) + end_request(req, 1); + if (i > 0) { + SET_HANDLER(&read_intr); + return; + } + (void) inb_p(HD_STATUS); +#if (HD_DELAY > 0) + last_req = read_timer(); +#endif + if (elv_next_request(QUEUE)) + hd_request(); + return; +} + +static void write_intr(void) +{ + struct request *req = CURRENT; + int i; + int retries = 100000; + + do { + i = (unsigned) inb_p(HD_STATUS); + if (i & BUSY_STAT) + continue; + if (!OK_STATUS(i)) + break; + if ((req->nr_sectors <= 1) || (i & DRQ_STAT)) + goto ok_to_write; + } while (--retries > 0); + dump_status("write_intr", i); + bad_rw_intr(); + hd_request(); + return; +ok_to_write: + req->sector++; + i = --req->nr_sectors; + --req->current_nr_sectors; + req->buffer += 512; + if (!i || (req->bio && req->current_nr_sectors <= 0)) + end_request(req, 1); + if (i > 0) { + SET_HANDLER(&write_intr); + outsw(HD_DATA, req->buffer, 256); + local_irq_enable(); + } else { +#if (HD_DELAY > 0) + last_req = read_timer(); +#endif + hd_request(); + } + return; +} + +static void recal_intr(void) +{ + check_status(); +#if (HD_DELAY > 0) + last_req = read_timer(); +#endif + hd_request(); +} + +/* + * This is another of the error-routines I don't know what to do with. The + * best idea seems to just set reset, and start all over again. + */ +static void hd_times_out(unsigned long dummy) +{ + char *name; + + do_hd = NULL; + + if (!CURRENT) + return; + + disable_irq(HD_IRQ); + local_irq_enable(); + reset = 1; + name = CURRENT->rq_disk->disk_name; + printk("%s: timeout\n", name); + if (++CURRENT->errors >= MAX_ERRORS) { +#ifdef DEBUG + printk("%s: too many errors\n", name); +#endif + end_request(CURRENT, 0); + } + local_irq_disable(); + hd_request(); + enable_irq(HD_IRQ); +} + +static int do_special_op(struct hd_i_struct *disk, struct request *req) +{ + if (disk->recalibrate) { + disk->recalibrate = 0; + hd_out(disk, disk->sect, 0, 0, 0, ATA_CMD_RESTORE, &recal_intr); + return reset; + } + if (disk->head > 16) { + printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); + end_request(req, 0); + } + disk->special_op = 0; + return 1; +} + +/* + * The driver enables interrupts as much as possible. In order to do this, + * (a) the device-interrupt is disabled before entering hd_request(), + * and (b) the timeout-interrupt is disabled before the sti(). + * + * Interrupts are still masked (by default) whenever we are exchanging + * data/cmds with a drive, because some drives seem to have very poor + * tolerance for latency during I/O. The IDE driver has support to unmask + * interrupts for non-broken hardware, so use that driver if required. + */ +static void hd_request(void) +{ + unsigned int block, nsect, sec, track, head, cyl; + struct hd_i_struct *disk; + struct request *req; + + if (do_hd) + return; +repeat: + del_timer(&device_timer); + local_irq_enable(); + + req = CURRENT; + if (!req) { + do_hd = NULL; + return; + } + + if (reset) { + local_irq_disable(); + reset_hd(); + return; + } + disk = req->rq_disk->private_data; + block = req->sector; + nsect = req->nr_sectors; + if (block >= get_capacity(req->rq_disk) || + ((block+nsect) > get_capacity(req->rq_disk))) { + printk("%s: bad access: block=%d, count=%d\n", + req->rq_disk->disk_name, block, nsect); + end_request(req, 0); + goto repeat; + } + + if (disk->special_op) { + if (do_special_op(disk, req)) + goto repeat; + return; + } + sec = block % disk->sect + 1; + track = block / disk->sect; + head = track % disk->head; + cyl = track / disk->head; +#ifdef DEBUG + printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n", + req->rq_disk->disk_name, + req_data_dir(req) == READ ? "read" : "writ", + cyl, head, sec, nsect, req->buffer); +#endif + if (blk_fs_request(req)) { + switch (rq_data_dir(req)) { + case READ: + hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ, + &read_intr); + if (reset) + goto repeat; + break; + case WRITE: + hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE, + &write_intr); + if (reset) + goto repeat; + if (wait_DRQ()) { + bad_rw_intr(); + goto repeat; + } + outsw(HD_DATA, req->buffer, 256); + break; + default: + printk("unknown hd-command\n"); + end_request(req, 0); + break; + } + } +} + +static void do_hd_request(struct request_queue *q) +{ + disable_irq(HD_IRQ); + hd_request(); + enable_irq(HD_IRQ); +} + +static int hd_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct hd_i_struct *disk = bdev->bd_disk->private_data; + + geo->heads = disk->head; + geo->sectors = disk->sect; + geo->cylinders = disk->cyl; + return 0; +} + +/* + * Releasing a block device means we sync() it, so that it can safely + * be forgotten about... + */ + +static irqreturn_t hd_interrupt(int irq, void *dev_id) +{ + void (*handler)(void) = do_hd; + + do_hd = NULL; + del_timer(&device_timer); + if (!handler) + handler = unexpected_hd_interrupt; + handler(); + local_irq_enable(); + return IRQ_HANDLED; +} + +static struct block_device_operations hd_fops = { + .getgeo = hd_getgeo, +}; + +/* + * This is the hard disk IRQ description. The IRQF_DISABLED in sa_flags + * means we run the IRQ-handler with interrupts disabled: this is bad for + * interrupt latency, but anything else has led to problems on some + * machines. + * + * We enable interrupts in some of the routines after making sure it's + * safe. + */ + +static int __init hd_init(void) +{ + int drive; + + if (register_blkdev(MAJOR_NR, "hd")) + return -1; + + hd_queue = blk_init_queue(do_hd_request, &hd_lock); + if (!hd_queue) { + unregister_blkdev(MAJOR_NR, "hd"); + return -ENOMEM; + } + + blk_queue_max_sectors(hd_queue, 255); + init_timer(&device_timer); + device_timer.function = hd_times_out; + blk_queue_hardsect_size(hd_queue, 512); + + if (!NR_HD) { + /* + * We don't know anything about the drive. This means + * that you *MUST* specify the drive parameters to the + * kernel yourself. + * + * If we were on an i386, we used to read this info from + * the BIOS or CMOS. This doesn't work all that well, + * since this assumes that this is a primary or secondary + * drive, and if we're using this legacy driver, it's + * probably an auxilliary controller added to recover + * legacy data off an ST-506 drive. Either way, it's + * definitely safest to have the user explicitly specify + * the information. + */ + printk("hd: no drives specified - use hd=cyl,head,sectors" + " on kernel command line\n"); + goto out; + } + + for (drive = 0 ; drive < NR_HD ; drive++) { + struct gendisk *disk = alloc_disk(64); + struct hd_i_struct *p = &hd_info[drive]; + if (!disk) + goto Enomem; + disk->major = MAJOR_NR; + disk->first_minor = drive << 6; + disk->fops = &hd_fops; + sprintf(disk->disk_name, "hd%c", 'a'+drive); + disk->private_data = p; + set_capacity(disk, p->head * p->sect * p->cyl); + disk->queue = hd_queue; + p->unit = drive; + hd_gendisk[drive] = disk; + printk("%s: %luMB, CHS=%d/%d/%d\n", + disk->disk_name, (unsigned long)get_capacity(disk)/2048, + p->cyl, p->head, p->sect); + } + + if (request_irq(HD_IRQ, hd_interrupt, IRQF_DISABLED, "hd", NULL)) { + printk("hd: unable to get IRQ%d for the hard disk driver\n", + HD_IRQ); + goto out1; + } + if (!request_region(HD_DATA, 8, "hd")) { + printk(KERN_WARNING "hd: port 0x%x busy\n", HD_DATA); + goto out2; + } + if (!request_region(HD_CMD, 1, "hd(cmd)")) { + printk(KERN_WARNING "hd: port 0x%x busy\n", HD_CMD); + goto out3; + } + + /* Let them fly */ + for (drive = 0; drive < NR_HD; drive++) + add_disk(hd_gendisk[drive]); + + return 0; + +out3: + release_region(HD_DATA, 8); +out2: + free_irq(HD_IRQ, NULL); +out1: + for (drive = 0; drive < NR_HD; drive++) + put_disk(hd_gendisk[drive]); + NR_HD = 0; +out: + del_timer(&device_timer); + unregister_blkdev(MAJOR_NR, "hd"); + blk_cleanup_queue(hd_queue); + return -1; +Enomem: + while (drive--) + put_disk(hd_gendisk[drive]); + goto out; +} + +static int __init parse_hd_setup(char *line) +{ + int ints[6]; + + (void) get_options(line, ARRAY_SIZE(ints), ints); + hd_setup(NULL, ints); + + return 1; +} +__setup("hd=", parse_hd_setup); + +late_initcall(hd_init); diff --git a/drivers/block/ida_cmd.h b/drivers/block/ida_cmd.h new file mode 100644 index 0000000..98b5746 --- /dev/null +++ b/drivers/block/ida_cmd.h @@ -0,0 +1,349 @@ +/* + * Disk Array driver for Compaq SMART2 Controllers + * Copyright 1998 Compaq Computer Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Questions/Comments/Bugfixes to iss_storagedev@hp.com + * + */ +#ifndef ARRAYCMD_H +#define ARRAYCMD_H + +#include <asm/types.h> +#if 0 +#include <linux/blkdev.h> +#endif + +/* for the Smart Array 42XX cards */ +#define S42XX_REQUEST_PORT_OFFSET 0x40 +#define S42XX_REPLY_INTR_MASK_OFFSET 0x34 +#define S42XX_REPLY_PORT_OFFSET 0x44 +#define S42XX_INTR_STATUS 0x30 + +#define S42XX_INTR_OFF 0x08 +#define S42XX_INTR_PENDING 0x08 + +#define COMMAND_FIFO 0x04 +#define COMMAND_COMPLETE_FIFO 0x08 +#define INTR_MASK 0x0C +#define INTR_STATUS 0x10 +#define INTR_PENDING 0x14 + +#define FIFO_NOT_EMPTY 0x01 +#define FIFO_NOT_FULL 0x02 + +#define BIG_PROBLEM 0x40 +#define LOG_NOT_CONF 2 + +#pragma pack(1) +typedef struct { + __u32 size; + __u32 addr; +} sg_t; + +#define RCODE_NONFATAL 0x02 +#define RCODE_FATAL 0x04 +#define RCODE_INVREQ 0x10 +typedef struct { + __u16 next; + __u8 cmd; + __u8 rcode; + __u32 blk; + __u16 blk_cnt; + __u8 sg_cnt; + __u8 reserved; +} rhdr_t; + +#define SG_MAX 32 +typedef struct { + rhdr_t hdr; + sg_t sg[SG_MAX]; + __u32 bp; +} rblk_t; + +typedef struct { + __u8 unit; + __u8 prio; + __u16 size; +} chdr_t; + +#define CMD_RWREQ 0x00 +#define CMD_IOCTL_PEND 0x01 +#define CMD_IOCTL_DONE 0x02 + +typedef struct cmdlist { + chdr_t hdr; + rblk_t req; + __u32 size; + int retry_cnt; + __u32 busaddr; + int ctlr; + struct cmdlist *prev; + struct cmdlist *next; + struct request *rq; + int type; +} cmdlist_t; + +#define ID_CTLR 0x11 +typedef struct { + __u8 nr_drvs; + __u32 cfg_sig; + __u8 firm_rev[4]; + __u8 rom_rev[4]; + __u8 hw_rev; + __u32 bb_rev; + __u32 drv_present_map; + __u32 ext_drv_map; + __u32 board_id; + __u8 cfg_error; + __u32 non_disk_bits; + __u8 bad_ram_addr; + __u8 cpu_rev; + __u8 pdpi_rev; + __u8 epic_rev; + __u8 wcxc_rev; + __u8 marketing_rev; + __u8 ctlr_flags; + __u8 host_flags; + __u8 expand_dis; + __u8 scsi_chips; + __u32 max_req_blocks; + __u32 ctlr_clock; + __u8 drvs_per_bus; + __u16 big_drv_present_map[8]; + __u16 big_ext_drv_map[8]; + __u16 big_non_disk_map[8]; + __u16 task_flags; + __u8 icl_bus; + __u8 red_modes; + __u8 cur_red_mode; + __u8 red_ctlr_stat; + __u8 red_fail_reason; + __u8 reserved[403]; +} id_ctlr_t; + +typedef struct { + __u16 cyl; + __u8 heads; + __u8 xsig; + __u8 psectors; + __u16 wpre; + __u8 maxecc; + __u8 drv_ctrl; + __u16 pcyls; + __u8 pheads; + __u16 landz; + __u8 sect_per_track; + __u8 cksum; +} drv_param_t; + +#define ID_LOG_DRV 0x10 +typedef struct { + __u16 blk_size; + __u32 nr_blks; + drv_param_t drv; + __u8 fault_tol; + __u8 reserved; + __u8 bios_disable; +} id_log_drv_t; + +#define ID_LOG_DRV_EXT 0x18 +typedef struct { + __u32 log_drv_id; + __u8 log_drv_label[64]; + __u8 reserved[418]; +} id_log_drv_ext_t; + +#define SENSE_LOG_DRV_STAT 0x12 +typedef struct { + __u8 status; + __u32 fail_map; + __u16 read_err[32]; + __u16 write_err[32]; + __u8 drv_err_data[256]; + __u8 drq_timeout[32]; + __u32 blks_to_recover; + __u8 drv_recovering; + __u16 remap_cnt[32]; + __u32 replace_drv_map; + __u32 act_spare_map; + __u8 spare_stat; + __u8 spare_repl_map[32]; + __u32 repl_ok_map; + __u8 media_exch; + __u8 cache_fail; + __u8 expn_fail; + __u8 unit_flags; + __u16 big_fail_map[8]; + __u16 big_remap_map[128]; + __u16 big_repl_map[8]; + __u16 big_act_spare_map[8]; + __u8 big_spar_repl_map[128]; + __u16 big_repl_ok_map[8]; + __u8 big_drv_rebuild; + __u8 reserved[36]; +} sense_log_drv_stat_t; + +#define START_RECOVER 0x13 + +#define ID_PHYS_DRV 0x15 +typedef struct { + __u8 scsi_bus; + __u8 scsi_id; + __u16 blk_size; + __u32 nr_blks; + __u32 rsvd_blks; + __u8 drv_model[40]; + __u8 drv_sn[40]; + __u8 drv_fw[8]; + __u8 scsi_iq_bits; + __u8 compaq_drv_stmp; + __u8 last_fail; + __u8 phys_drv_flags; + __u8 phys_drv_flags1; + __u8 scsi_lun; + __u8 phys_drv_flags2; + __u8 reserved; + __u32 spi_speed_rules; + __u8 phys_connector[2]; + __u8 phys_box_on_bus; + __u8 phys_bay_in_box; +} id_phys_drv_t; + +#define BLINK_DRV_LEDS 0x16 +typedef struct { + __u32 blink_duration; + __u32 reserved; + __u8 blink[256]; + __u8 reserved1[248]; +} blink_drv_leds_t; + +#define SENSE_BLINK_LEDS 0x17 +typedef struct { + __u32 blink_duration; + __u32 btime_elap; + __u8 blink[256]; + __u8 reserved1[248]; +} sense_blink_leds_t; + +#define IDA_READ 0x20 +#define IDA_WRITE 0x30 +#define IDA_WRITE_MEDIA 0x31 +#define RESET_TO_DIAG 0x40 +#define DIAG_PASS_THRU 0x41 + +#define SENSE_CONFIG 0x50 +#define SET_CONFIG 0x51 +typedef struct { + __u32 cfg_sig; + __u16 compat_port; + __u8 data_dist_mode; + __u8 surf_an_ctrl; + __u16 ctlr_phys_drv; + __u16 log_unit_phys_drv; + __u16 fault_tol_mode; + __u8 phys_drv_param[16]; + drv_param_t drv; + __u32 drv_asgn_map; + __u16 dist_factor; + __u32 spare_asgn_map; + __u8 reserved[6]; + __u16 os; + __u8 ctlr_order; + __u8 extra_info; + __u32 data_offs; + __u8 parity_backedout_write_drvs; + __u8 parity_dist_mode; + __u8 parity_shift_fact; + __u8 bios_disable_flag; + __u32 blks_on_vol; + __u32 blks_per_drv; + __u8 scratch[16]; + __u16 big_drv_map[8]; + __u16 big_spare_map[8]; + __u8 ss_source_vol; + __u8 mix_drv_cap_range; + struct { + __u16 big_drv_map[8]; + __u32 blks_per_drv; + __u16 fault_tol_mode; + __u16 dist_factor; + } MDC_range[4]; + __u8 reserved1[248]; +} config_t; + +#define BYPASS_VOL_STATE 0x52 +#define SS_CREATE_VOL 0x53 +#define CHANGE_CONFIG 0x54 +#define SENSE_ORIG_CONF 0x55 +#define REORDER_LOG_DRV 0x56 +typedef struct { + __u8 old_units[32]; +} reorder_log_drv_t; + +#define LABEL_LOG_DRV 0x57 +typedef struct { + __u8 log_drv_label[64]; +} label_log_drv_t; + +#define SS_TO_VOL 0x58 + +#define SET_SURF_DELAY 0x60 +typedef struct { + __u16 delay; + __u8 reserved[510]; +} surf_delay_t; + +#define SET_OVERHEAT_DELAY 0x61 +typedef struct { + __u16 delay; +} overhead_delay_t; + +#define SET_MP_DELAY +typedef struct { + __u16 delay; + __u8 reserved[510]; +} mp_delay_t; + +#define PASSTHRU_A 0x91 +typedef struct { + __u8 target; + __u8 bus; + __u8 lun; + __u32 timeout; + __u32 flags; + __u8 status; + __u8 error; + __u8 cdb_len; + __u8 sense_error; + __u8 sense_key; + __u32 sense_info; + __u8 sense_code; + __u8 sense_qual; + __u32 residual; + __u8 reserved[4]; + __u8 cdb[12]; +} scsi_param_t; + +#define RESUME_BACKGROUND_ACTIVITY 0x99 +#define SENSE_CONTROLLER_PERFORMANCE 0xa8 +#define FLUSH_CACHE 0xc2 +#define COLLECT_BUFFER 0xd2 +#define READ_FLASH_ROM 0xf6 +#define WRITE_FLASH_ROM 0xf7 +#pragma pack() + +#endif /* ARRAYCMD_H */ diff --git a/drivers/block/ida_ioctl.h b/drivers/block/ida_ioctl.h new file mode 100644 index 0000000..888fff9 --- /dev/null +++ b/drivers/block/ida_ioctl.h @@ -0,0 +1,87 @@ +/* + * Disk Array driver for Compaq SMART2 Controllers + * Copyright 1998 Compaq Computer Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Questions/Comments/Bugfixes to iss_storagedev@hp.com + * + */ +#ifndef IDA_IOCTL_H +#define IDA_IOCTL_H + +#include "ida_cmd.h" +#include "cpqarray.h" + +#define IDAGETDRVINFO 0x27272828 +#define IDAPASSTHRU 0x28282929 +#define IDAGETCTLRSIG 0x29293030 +#define IDAREVALIDATEVOLS 0x30303131 +#define IDADRIVERVERSION 0x31313232 +#define IDAGETPCIINFO 0x32323333 + +typedef struct _ida_pci_info_struct +{ + unsigned char bus; + unsigned char dev_fn; + __u32 board_id; +} ida_pci_info_struct; +/* + * Normally, the ioctl determines the logical unit for this command by + * the major,minor number of the fd passed to ioctl. If you need to send + * a command to a different/nonexistant unit (such as during config), you + * can override the normal behavior by setting the unit valid bit. (Normally, + * it should be zero) The controller the command is sent to is still + * determined by the major number of the open device. + */ + +#define UNITVALID 0x80 +typedef struct { + __u8 cmd; + __u8 rcode; + __u8 unit; + __u32 blk; + __u16 blk_cnt; + +/* currently, sg_cnt is assumed to be 1: only the 0th element of sg is used */ + struct { + void __user *addr; + size_t size; + } sg[SG_MAX]; + int sg_cnt; + + union ctlr_cmds { + drv_info_t drv; + unsigned char buf[1024]; + + id_ctlr_t id_ctlr; + drv_param_t drv_param; + id_log_drv_t id_log_drv; + id_log_drv_ext_t id_log_drv_ext; + sense_log_drv_stat_t sense_log_drv_stat; + id_phys_drv_t id_phys_drv; + blink_drv_leds_t blink_drv_leds; + sense_blink_leds_t sense_blink_leds; + config_t config; + reorder_log_drv_t reorder_log_drv; + label_log_drv_t label_log_drv; + surf_delay_t surf_delay; + overhead_delay_t overhead_delay; + mp_delay_t mp_delay; + scsi_param_t scsi_param; + } c; +} ida_ioctl_t; + +#endif /* IDA_IOCTL_H */ diff --git a/drivers/block/loop.c b/drivers/block/loop.c new file mode 100644 index 0000000..5c4ee70 --- /dev/null +++ b/drivers/block/loop.c @@ -0,0 +1,1588 @@ +/* + * linux/drivers/block/loop.c + * + * Written by Theodore Ts'o, 3/29/93 + * + * Copyright 1993 by Theodore Ts'o. Redistribution of this file is + * permitted under the GNU General Public License. + * + * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993 + * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996 + * + * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994 + * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996 + * + * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997 + * + * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998 + * + * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998 + * + * Loadable modules and other fixes by AK, 1998 + * + * Make real block number available to downstream transfer functions, enables + * CBC (and relatives) mode encryption requiring unique IVs per data block. + * Reed H. Petty, rhp@draper.net + * + * Maximum number of loop devices now dynamic via max_loop module parameter. + * Russell Kroll <rkroll@exploits.org> 19990701 + * + * Maximum number of loop devices when compiled-in now selectable by passing + * max_loop=<1-255> to the kernel on boot. + * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999 + * + * Completely rewrite request handling to be make_request_fn style and + * non blocking, pushing work to a helper thread. Lots of fixes from + * Al Viro too. + * Jens Axboe <axboe@suse.de>, Nov 2000 + * + * Support up to 256 loop devices + * Heinz Mauelshagen <mge@sistina.com>, Feb 2002 + * + * Support for falling back on the write file operation when the address space + * operations write_begin is not available on the backing filesystem. + * Anton Altaparmakov, 16 Feb 2005 + * + * Still To Fix: + * - Advisory locking is ignored here. + * - Should use an own CAP_* category instead of CAP_SYS_ADMIN + * + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/stat.h> +#include <linux/errno.h> +#include <linux/major.h> +#include <linux/wait.h> +#include <linux/blkdev.h> +#include <linux/blkpg.h> +#include <linux/init.h> +#include <linux/smp_lock.h> +#include <linux/swap.h> +#include <linux/slab.h> +#include <linux/loop.h> +#include <linux/compat.h> +#include <linux/suspend.h> +#include <linux/freezer.h> +#include <linux/writeback.h> +#include <linux/buffer_head.h> /* for invalidate_bdev() */ +#include <linux/completion.h> +#include <linux/highmem.h> +#include <linux/gfp.h> +#include <linux/kthread.h> +#include <linux/splice.h> + +#include <asm/uaccess.h> + +static LIST_HEAD(loop_devices); +static DEFINE_MUTEX(loop_devices_mutex); + +static int max_part; +static int part_shift; + +/* + * Transfer functions + */ +static int transfer_none(struct loop_device *lo, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t real_block) +{ + char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; + char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; + + if (cmd == READ) + memcpy(loop_buf, raw_buf, size); + else + memcpy(raw_buf, loop_buf, size); + + kunmap_atomic(raw_buf, KM_USER0); + kunmap_atomic(loop_buf, KM_USER1); + cond_resched(); + return 0; +} + +static int transfer_xor(struct loop_device *lo, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t real_block) +{ + char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; + char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; + char *in, *out, *key; + int i, keysize; + + if (cmd == READ) { + in = raw_buf; + out = loop_buf; + } else { + in = loop_buf; + out = raw_buf; + } + + key = lo->lo_encrypt_key; + keysize = lo->lo_encrypt_key_size; + for (i = 0; i < size; i++) + *out++ = *in++ ^ key[(i & 511) % keysize]; + + kunmap_atomic(raw_buf, KM_USER0); + kunmap_atomic(loop_buf, KM_USER1); + cond_resched(); + return 0; +} + +static int xor_init(struct loop_device *lo, const struct loop_info64 *info) +{ + if (unlikely(info->lo_encrypt_key_size <= 0)) + return -EINVAL; + return 0; +} + +static struct loop_func_table none_funcs = { + .number = LO_CRYPT_NONE, + .transfer = transfer_none, +}; + +static struct loop_func_table xor_funcs = { + .number = LO_CRYPT_XOR, + .transfer = transfer_xor, + .init = xor_init +}; + +/* xfer_funcs[0] is special - its release function is never called */ +static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = { + &none_funcs, + &xor_funcs +}; + +static loff_t get_loop_size(struct loop_device *lo, struct file *file) +{ + loff_t size, offset, loopsize; + + /* Compute loopsize in bytes */ + size = i_size_read(file->f_mapping->host); + offset = lo->lo_offset; + loopsize = size - offset; + if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) + loopsize = lo->lo_sizelimit; + + /* + * Unfortunately, if we want to do I/O on the device, + * the number of 512-byte sectors has to fit into a sector_t. + */ + return loopsize >> 9; +} + +static int +figure_loop_size(struct loop_device *lo) +{ + loff_t size = get_loop_size(lo, lo->lo_backing_file); + sector_t x = (sector_t)size; + + if (unlikely((loff_t)x != size)) + return -EFBIG; + + set_capacity(lo->lo_disk, x); + return 0; +} + +static inline int +lo_do_transfer(struct loop_device *lo, int cmd, + struct page *rpage, unsigned roffs, + struct page *lpage, unsigned loffs, + int size, sector_t rblock) +{ + if (unlikely(!lo->transfer)) + return 0; + + return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock); +} + +/** + * do_lo_send_aops - helper for writing data to a loop device + * + * This is the fast version for backing filesystems which implement the address + * space operations write_begin and write_end. + */ +static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, + loff_t pos, struct page *unused) +{ + struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */ + struct address_space *mapping = file->f_mapping; + pgoff_t index; + unsigned offset, bv_offs; + int len, ret; + + mutex_lock(&mapping->host->i_mutex); + index = pos >> PAGE_CACHE_SHIFT; + offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1); + bv_offs = bvec->bv_offset; + len = bvec->bv_len; + while (len > 0) { + sector_t IV; + unsigned size, copied; + int transfer_result; + struct page *page; + void *fsdata; + + IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9); + size = PAGE_CACHE_SIZE - offset; + if (size > len) + size = len; + + ret = pagecache_write_begin(file, mapping, pos, size, 0, + &page, &fsdata); + if (ret) + goto fail; + + transfer_result = lo_do_transfer(lo, WRITE, page, offset, + bvec->bv_page, bv_offs, size, IV); + copied = size; + if (unlikely(transfer_result)) + copied = 0; + + ret = pagecache_write_end(file, mapping, pos, size, copied, + page, fsdata); + if (ret < 0 || ret != copied) + goto fail; + + if (unlikely(transfer_result)) + goto fail; + + bv_offs += copied; + len -= copied; + offset = 0; + index++; + pos += copied; + } + ret = 0; +out: + mutex_unlock(&mapping->host->i_mutex); + return ret; +fail: + ret = -1; + goto out; +} + +/** + * __do_lo_send_write - helper for writing data to a loop device + * + * This helper just factors out common code between do_lo_send_direct_write() + * and do_lo_send_write(). + */ +static int __do_lo_send_write(struct file *file, + u8 *buf, const int len, loff_t pos) +{ + ssize_t bw; + mm_segment_t old_fs = get_fs(); + + set_fs(get_ds()); + bw = file->f_op->write(file, buf, len, &pos); + set_fs(old_fs); + if (likely(bw == len)) + return 0; + printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n", + (unsigned long long)pos, len); + if (bw >= 0) + bw = -EIO; + return bw; +} + +/** + * do_lo_send_direct_write - helper for writing data to a loop device + * + * This is the fast, non-transforming version for backing filesystems which do + * not implement the address space operations write_begin and write_end. + * It uses the write file operation which should be present on all writeable + * filesystems. + */ +static int do_lo_send_direct_write(struct loop_device *lo, + struct bio_vec *bvec, loff_t pos, struct page *page) +{ + ssize_t bw = __do_lo_send_write(lo->lo_backing_file, + kmap(bvec->bv_page) + bvec->bv_offset, + bvec->bv_len, pos); + kunmap(bvec->bv_page); + cond_resched(); + return bw; +} + +/** + * do_lo_send_write - helper for writing data to a loop device + * + * This is the slow, transforming version for filesystems which do not + * implement the address space operations write_begin and write_end. It + * uses the write file operation which should be present on all writeable + * filesystems. + * + * Using fops->write is slower than using aops->{prepare,commit}_write in the + * transforming case because we need to double buffer the data as we cannot do + * the transformations in place as we do not have direct access to the + * destination pages of the backing file. + */ +static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec, + loff_t pos, struct page *page) +{ + int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page, + bvec->bv_offset, bvec->bv_len, pos >> 9); + if (likely(!ret)) + return __do_lo_send_write(lo->lo_backing_file, + page_address(page), bvec->bv_len, + pos); + printk(KERN_ERR "loop: Transfer error at byte offset %llu, " + "length %i.\n", (unsigned long long)pos, bvec->bv_len); + if (ret > 0) + ret = -EIO; + return ret; +} + +static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos) +{ + int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, + struct page *page); + struct bio_vec *bvec; + struct page *page = NULL; + int i, ret = 0; + + do_lo_send = do_lo_send_aops; + if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) { + do_lo_send = do_lo_send_direct_write; + if (lo->transfer != transfer_none) { + page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); + if (unlikely(!page)) + goto fail; + kmap(page); + do_lo_send = do_lo_send_write; + } + } + bio_for_each_segment(bvec, bio, i) { + ret = do_lo_send(lo, bvec, pos, page); + if (ret < 0) + break; + pos += bvec->bv_len; + } + if (page) { + kunmap(page); + __free_page(page); + } +out: + return ret; +fail: + printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n"); + ret = -ENOMEM; + goto out; +} + +struct lo_read_data { + struct loop_device *lo; + struct page *page; + unsigned offset; + int bsize; +}; + +static int +lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, + struct splice_desc *sd) +{ + struct lo_read_data *p = sd->u.data; + struct loop_device *lo = p->lo; + struct page *page = buf->page; + sector_t IV; + size_t size; + int ret; + + ret = buf->ops->confirm(pipe, buf); + if (unlikely(ret)) + return ret; + + IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9)) + + (buf->offset >> 9); + size = sd->len; + if (size > p->bsize) + size = p->bsize; + + if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) { + printk(KERN_ERR "loop: transfer error block %ld\n", + page->index); + size = -EINVAL; + } + + flush_dcache_page(p->page); + + if (size > 0) + p->offset += size; + + return size; +} + +static int +lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd) +{ + return __splice_from_pipe(pipe, sd, lo_splice_actor); +} + +static int +do_lo_receive(struct loop_device *lo, + struct bio_vec *bvec, int bsize, loff_t pos) +{ + struct lo_read_data cookie; + struct splice_desc sd; + struct file *file; + long retval; + + cookie.lo = lo; + cookie.page = bvec->bv_page; + cookie.offset = bvec->bv_offset; + cookie.bsize = bsize; + + sd.len = 0; + sd.total_len = bvec->bv_len; + sd.flags = 0; + sd.pos = pos; + sd.u.data = &cookie; + + file = lo->lo_backing_file; + retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor); + + if (retval < 0) + return retval; + + return 0; +} + +static int +lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) +{ + struct bio_vec *bvec; + int i, ret = 0; + + bio_for_each_segment(bvec, bio, i) { + ret = do_lo_receive(lo, bvec, bsize, pos); + if (ret < 0) + break; + pos += bvec->bv_len; + } + return ret; +} + +static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) +{ + loff_t pos; + int ret; + + pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; + if (bio_rw(bio) == WRITE) + ret = lo_send(lo, bio, pos); + else + ret = lo_receive(lo, bio, lo->lo_blocksize, pos); + return ret; +} + +/* + * Add bio to back of pending list + */ +static void loop_add_bio(struct loop_device *lo, struct bio *bio) +{ + if (lo->lo_biotail) { + lo->lo_biotail->bi_next = bio; + lo->lo_biotail = bio; + } else + lo->lo_bio = lo->lo_biotail = bio; +} + +/* + * Grab first pending buffer + */ +static struct bio *loop_get_bio(struct loop_device *lo) +{ + struct bio *bio; + + if ((bio = lo->lo_bio)) { + if (bio == lo->lo_biotail) + lo->lo_biotail = NULL; + lo->lo_bio = bio->bi_next; + bio->bi_next = NULL; + } + + return bio; +} + +static int loop_make_request(struct request_queue *q, struct bio *old_bio) +{ + struct loop_device *lo = q->queuedata; + int rw = bio_rw(old_bio); + + if (rw == READA) + rw = READ; + + BUG_ON(!lo || (rw != READ && rw != WRITE)); + + spin_lock_irq(&lo->lo_lock); + if (lo->lo_state != Lo_bound) + goto out; + if (unlikely(rw == WRITE && (lo->lo_flags & LO_FLAGS_READ_ONLY))) + goto out; + loop_add_bio(lo, old_bio); + wake_up(&lo->lo_event); + spin_unlock_irq(&lo->lo_lock); + return 0; + +out: + spin_unlock_irq(&lo->lo_lock); + bio_io_error(old_bio); + return 0; +} + +/* + * kick off io on the underlying address space + */ +static void loop_unplug(struct request_queue *q) +{ + struct loop_device *lo = q->queuedata; + + queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q); + blk_run_address_space(lo->lo_backing_file->f_mapping); +} + +struct switch_request { + struct file *file; + struct completion wait; +}; + +static void do_loop_switch(struct loop_device *, struct switch_request *); + +static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio) +{ + if (unlikely(!bio->bi_bdev)) { + do_loop_switch(lo, bio->bi_private); + bio_put(bio); + } else { + int ret = do_bio_filebacked(lo, bio); + bio_endio(bio, ret); + } +} + +/* + * worker thread that handles reads/writes to file backed loop devices, + * to avoid blocking in our make_request_fn. it also does loop decrypting + * on reads for block backed loop, as that is too heavy to do from + * b_end_io context where irqs may be disabled. + * + * Loop explanation: loop_clr_fd() sets lo_state to Lo_rundown before + * calling kthread_stop(). Therefore once kthread_should_stop() is + * true, make_request will not place any more requests. Therefore + * once kthread_should_stop() is true and lo_bio is NULL, we are + * done with the loop. + */ +static int loop_thread(void *data) +{ + struct loop_device *lo = data; + struct bio *bio; + + set_user_nice(current, -20); + + while (!kthread_should_stop() || lo->lo_bio) { + + wait_event_interruptible(lo->lo_event, + lo->lo_bio || kthread_should_stop()); + + if (!lo->lo_bio) + continue; + spin_lock_irq(&lo->lo_lock); + bio = loop_get_bio(lo); + spin_unlock_irq(&lo->lo_lock); + + BUG_ON(!bio); + loop_handle_bio(lo, bio); + } + + return 0; +} + +/* + * loop_switch performs the hard work of switching a backing store. + * First it needs to flush existing IO, it does this by sending a magic + * BIO down the pipe. The completion of this BIO does the actual switch. + */ +static int loop_switch(struct loop_device *lo, struct file *file) +{ + struct switch_request w; + struct bio *bio = bio_alloc(GFP_KERNEL, 0); + if (!bio) + return -ENOMEM; + init_completion(&w.wait); + w.file = file; + bio->bi_private = &w; + bio->bi_bdev = NULL; + loop_make_request(lo->lo_queue, bio); + wait_for_completion(&w.wait); + return 0; +} + +/* + * Do the actual switch; called from the BIO completion routine + */ +static void do_loop_switch(struct loop_device *lo, struct switch_request *p) +{ + struct file *file = p->file; + struct file *old_file = lo->lo_backing_file; + struct address_space *mapping = file->f_mapping; + + mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); + lo->lo_backing_file = file; + lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ? + mapping->host->i_bdev->bd_block_size : PAGE_SIZE; + lo->old_gfp_mask = mapping_gfp_mask(mapping); + mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); + complete(&p->wait); +} + + +/* + * loop_change_fd switched the backing store of a loopback device to + * a new file. This is useful for operating system installers to free up + * the original file and in High Availability environments to switch to + * an alternative location for the content in case of server meltdown. + * This can only work if the loop device is used read-only, and if the + * new backing store is the same size and type as the old backing store. + */ +static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, + unsigned int arg) +{ + struct file *file, *old_file; + struct inode *inode; + int error; + + error = -ENXIO; + if (lo->lo_state != Lo_bound) + goto out; + + /* the loop device has to be read-only */ + error = -EINVAL; + if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) + goto out; + + error = -EBADF; + file = fget(arg); + if (!file) + goto out; + + inode = file->f_mapping->host; + old_file = lo->lo_backing_file; + + error = -EINVAL; + + if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) + goto out_putf; + + /* new backing store needs to support loop (eg splice_read) */ + if (!inode->i_fop->splice_read) + goto out_putf; + + /* size of the new backing store needs to be the same */ + if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) + goto out_putf; + + /* and ... switch */ + error = loop_switch(lo, file); + if (error) + goto out_putf; + + fput(old_file); + if (max_part > 0) + ioctl_by_bdev(bdev, BLKRRPART, 0); + return 0; + + out_putf: + fput(file); + out: + return error; +} + +static inline int is_loop_device(struct file *file) +{ + struct inode *i = file->f_mapping->host; + + return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; +} + +static int loop_set_fd(struct loop_device *lo, fmode_t mode, + struct block_device *bdev, unsigned int arg) +{ + struct file *file, *f; + struct inode *inode; + struct address_space *mapping; + unsigned lo_blocksize; + int lo_flags = 0; + int error; + loff_t size; + + /* This is safe, since we have a reference from open(). */ + __module_get(THIS_MODULE); + + error = -EBADF; + file = fget(arg); + if (!file) + goto out; + + error = -EBUSY; + if (lo->lo_state != Lo_unbound) + goto out_putf; + + /* Avoid recursion */ + f = file; + while (is_loop_device(f)) { + struct loop_device *l; + + if (f->f_mapping->host->i_bdev == bdev) + goto out_putf; + + l = f->f_mapping->host->i_bdev->bd_disk->private_data; + if (l->lo_state == Lo_unbound) { + error = -EINVAL; + goto out_putf; + } + f = l->lo_backing_file; + } + + mapping = file->f_mapping; + inode = mapping->host; + + if (!(file->f_mode & FMODE_WRITE)) + lo_flags |= LO_FLAGS_READ_ONLY; + + error = -EINVAL; + if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) { + const struct address_space_operations *aops = mapping->a_ops; + /* + * If we can't read - sorry. If we only can't write - well, + * it's going to be read-only. + */ + if (!file->f_op->splice_read) + goto out_putf; + if (aops->write_begin) + lo_flags |= LO_FLAGS_USE_AOPS; + if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write) + lo_flags |= LO_FLAGS_READ_ONLY; + + lo_blocksize = S_ISBLK(inode->i_mode) ? + inode->i_bdev->bd_block_size : PAGE_SIZE; + + error = 0; + } else { + goto out_putf; + } + + size = get_loop_size(lo, file); + + if ((loff_t)(sector_t)size != size) { + error = -EFBIG; + goto out_putf; + } + + if (!(mode & FMODE_WRITE)) + lo_flags |= LO_FLAGS_READ_ONLY; + + set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); + + lo->lo_blocksize = lo_blocksize; + lo->lo_device = bdev; + lo->lo_flags = lo_flags; + lo->lo_backing_file = file; + lo->transfer = transfer_none; + lo->ioctl = NULL; + lo->lo_sizelimit = 0; + lo->old_gfp_mask = mapping_gfp_mask(mapping); + mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); + + lo->lo_bio = lo->lo_biotail = NULL; + + /* + * set queue make_request_fn, and add limits based on lower level + * device + */ + blk_queue_make_request(lo->lo_queue, loop_make_request); + lo->lo_queue->queuedata = lo; + lo->lo_queue->unplug_fn = loop_unplug; + + set_capacity(lo->lo_disk, size); + bd_set_size(bdev, size << 9); + + set_blocksize(bdev, lo_blocksize); + + lo->lo_thread = kthread_create(loop_thread, lo, "loop%d", + lo->lo_number); + if (IS_ERR(lo->lo_thread)) { + error = PTR_ERR(lo->lo_thread); + goto out_clr; + } + lo->lo_state = Lo_bound; + wake_up_process(lo->lo_thread); + if (max_part > 0) + ioctl_by_bdev(bdev, BLKRRPART, 0); + return 0; + +out_clr: + lo->lo_thread = NULL; + lo->lo_device = NULL; + lo->lo_backing_file = NULL; + lo->lo_flags = 0; + set_capacity(lo->lo_disk, 0); + invalidate_bdev(bdev); + bd_set_size(bdev, 0); + mapping_set_gfp_mask(mapping, lo->old_gfp_mask); + lo->lo_state = Lo_unbound; + out_putf: + fput(file); + out: + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + return error; +} + +static int +loop_release_xfer(struct loop_device *lo) +{ + int err = 0; + struct loop_func_table *xfer = lo->lo_encryption; + + if (xfer) { + if (xfer->release) + err = xfer->release(lo); + lo->transfer = NULL; + lo->lo_encryption = NULL; + module_put(xfer->owner); + } + return err; +} + +static int +loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, + const struct loop_info64 *i) +{ + int err = 0; + + if (xfer) { + struct module *owner = xfer->owner; + + if (!try_module_get(owner)) + return -EINVAL; + if (xfer->init) + err = xfer->init(lo, i); + if (err) + module_put(owner); + else + lo->lo_encryption = xfer; + } + return err; +} + +static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) +{ + struct file *filp = lo->lo_backing_file; + gfp_t gfp = lo->old_gfp_mask; + + if (lo->lo_state != Lo_bound) + return -ENXIO; + + if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */ + return -EBUSY; + + if (filp == NULL) + return -EINVAL; + + spin_lock_irq(&lo->lo_lock); + lo->lo_state = Lo_rundown; + spin_unlock_irq(&lo->lo_lock); + + kthread_stop(lo->lo_thread); + + lo->lo_backing_file = NULL; + + loop_release_xfer(lo); + lo->transfer = NULL; + lo->ioctl = NULL; + lo->lo_device = NULL; + lo->lo_encryption = NULL; + lo->lo_offset = 0; + lo->lo_sizelimit = 0; + lo->lo_encrypt_key_size = 0; + lo->lo_flags = 0; + lo->lo_thread = NULL; + memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); + memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); + memset(lo->lo_file_name, 0, LO_NAME_SIZE); + if (bdev) + invalidate_bdev(bdev); + set_capacity(lo->lo_disk, 0); + if (bdev) + bd_set_size(bdev, 0); + mapping_set_gfp_mask(filp->f_mapping, gfp); + lo->lo_state = Lo_unbound; + fput(filp); + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + if (max_part > 0) + ioctl_by_bdev(bdev, BLKRRPART, 0); + return 0; +} + +static int +loop_set_status(struct loop_device *lo, const struct loop_info64 *info) +{ + int err; + struct loop_func_table *xfer; + + if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid && + !capable(CAP_SYS_ADMIN)) + return -EPERM; + if (lo->lo_state != Lo_bound) + return -ENXIO; + if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) + return -EINVAL; + + err = loop_release_xfer(lo); + if (err) + return err; + + if (info->lo_encrypt_type) { + unsigned int type = info->lo_encrypt_type; + + if (type >= MAX_LO_CRYPT) + return -EINVAL; + xfer = xfer_funcs[type]; + if (xfer == NULL) + return -EINVAL; + } else + xfer = NULL; + + err = loop_init_xfer(lo, xfer, info); + if (err) + return err; + + if (lo->lo_offset != info->lo_offset || + lo->lo_sizelimit != info->lo_sizelimit) { + lo->lo_offset = info->lo_offset; + lo->lo_sizelimit = info->lo_sizelimit; + if (figure_loop_size(lo)) + return -EFBIG; + } + + memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); + memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE); + lo->lo_file_name[LO_NAME_SIZE-1] = 0; + lo->lo_crypt_name[LO_NAME_SIZE-1] = 0; + + if (!xfer) + xfer = &none_funcs; + lo->transfer = xfer->transfer; + lo->ioctl = xfer->ioctl; + + if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) != + (info->lo_flags & LO_FLAGS_AUTOCLEAR)) + lo->lo_flags ^= LO_FLAGS_AUTOCLEAR; + + lo->lo_encrypt_key_size = info->lo_encrypt_key_size; + lo->lo_init[0] = info->lo_init[0]; + lo->lo_init[1] = info->lo_init[1]; + if (info->lo_encrypt_key_size) { + memcpy(lo->lo_encrypt_key, info->lo_encrypt_key, + info->lo_encrypt_key_size); + lo->lo_key_owner = current->uid; + } + + return 0; +} + +static int +loop_get_status(struct loop_device *lo, struct loop_info64 *info) +{ + struct file *file = lo->lo_backing_file; + struct kstat stat; + int error; + + if (lo->lo_state != Lo_bound) + return -ENXIO; + error = vfs_getattr(file->f_path.mnt, file->f_path.dentry, &stat); + if (error) + return error; + memset(info, 0, sizeof(*info)); + info->lo_number = lo->lo_number; + info->lo_device = huge_encode_dev(stat.dev); + info->lo_inode = stat.ino; + info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev); + info->lo_offset = lo->lo_offset; + info->lo_sizelimit = lo->lo_sizelimit; + info->lo_flags = lo->lo_flags; + memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); + memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE); + info->lo_encrypt_type = + lo->lo_encryption ? lo->lo_encryption->number : 0; + if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) { + info->lo_encrypt_key_size = lo->lo_encrypt_key_size; + memcpy(info->lo_encrypt_key, lo->lo_encrypt_key, + lo->lo_encrypt_key_size); + } + return 0; +} + +static void +loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) +{ + memset(info64, 0, sizeof(*info64)); + info64->lo_number = info->lo_number; + info64->lo_device = info->lo_device; + info64->lo_inode = info->lo_inode; + info64->lo_rdevice = info->lo_rdevice; + info64->lo_offset = info->lo_offset; + info64->lo_sizelimit = 0; + info64->lo_encrypt_type = info->lo_encrypt_type; + info64->lo_encrypt_key_size = info->lo_encrypt_key_size; + info64->lo_flags = info->lo_flags; + info64->lo_init[0] = info->lo_init[0]; + info64->lo_init[1] = info->lo_init[1]; + if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE); + else + memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); + memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE); +} + +static int +loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) +{ + memset(info, 0, sizeof(*info)); + info->lo_number = info64->lo_number; + info->lo_device = info64->lo_device; + info->lo_inode = info64->lo_inode; + info->lo_rdevice = info64->lo_rdevice; + info->lo_offset = info64->lo_offset; + info->lo_encrypt_type = info64->lo_encrypt_type; + info->lo_encrypt_key_size = info64->lo_encrypt_key_size; + info->lo_flags = info64->lo_flags; + info->lo_init[0] = info64->lo_init[0]; + info->lo_init[1] = info64->lo_init[1]; + if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE); + else + memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); + memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); + + /* error in case values were truncated */ + if (info->lo_device != info64->lo_device || + info->lo_rdevice != info64->lo_rdevice || + info->lo_inode != info64->lo_inode || + info->lo_offset != info64->lo_offset) + return -EOVERFLOW; + + return 0; +} + +static int +loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) +{ + struct loop_info info; + struct loop_info64 info64; + + if (copy_from_user(&info, arg, sizeof (struct loop_info))) + return -EFAULT; + loop_info64_from_old(&info, &info64); + return loop_set_status(lo, &info64); +} + +static int +loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) +{ + struct loop_info64 info64; + + if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) + return -EFAULT; + return loop_set_status(lo, &info64); +} + +static int +loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { + struct loop_info info; + struct loop_info64 info64; + int err = 0; + + if (!arg) + err = -EINVAL; + if (!err) + err = loop_get_status(lo, &info64); + if (!err) + err = loop_info64_to_old(&info64, &info); + if (!err && copy_to_user(arg, &info, sizeof(info))) + err = -EFAULT; + + return err; +} + +static int +loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { + struct loop_info64 info64; + int err = 0; + + if (!arg) + err = -EINVAL; + if (!err) + err = loop_get_status(lo, &info64); + if (!err && copy_to_user(arg, &info64, sizeof(info64))) + err = -EFAULT; + + return err; +} + +static int lo_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct loop_device *lo = bdev->bd_disk->private_data; + int err; + + mutex_lock(&lo->lo_ctl_mutex); + switch (cmd) { + case LOOP_SET_FD: + err = loop_set_fd(lo, mode, bdev, arg); + break; + case LOOP_CHANGE_FD: + err = loop_change_fd(lo, bdev, arg); + break; + case LOOP_CLR_FD: + err = loop_clr_fd(lo, bdev); + break; + case LOOP_SET_STATUS: + err = loop_set_status_old(lo, (struct loop_info __user *) arg); + break; + case LOOP_GET_STATUS: + err = loop_get_status_old(lo, (struct loop_info __user *) arg); + break; + case LOOP_SET_STATUS64: + err = loop_set_status64(lo, (struct loop_info64 __user *) arg); + break; + case LOOP_GET_STATUS64: + err = loop_get_status64(lo, (struct loop_info64 __user *) arg); + break; + default: + err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; + } + mutex_unlock(&lo->lo_ctl_mutex); + return err; +} + +#ifdef CONFIG_COMPAT +struct compat_loop_info { + compat_int_t lo_number; /* ioctl r/o */ + compat_dev_t lo_device; /* ioctl r/o */ + compat_ulong_t lo_inode; /* ioctl r/o */ + compat_dev_t lo_rdevice; /* ioctl r/o */ + compat_int_t lo_offset; + compat_int_t lo_encrypt_type; + compat_int_t lo_encrypt_key_size; /* ioctl w/o */ + compat_int_t lo_flags; /* ioctl r/o */ + char lo_name[LO_NAME_SIZE]; + unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ + compat_ulong_t lo_init[2]; + char reserved[4]; +}; + +/* + * Transfer 32-bit compatibility structure in userspace to 64-bit loop info + * - noinlined to reduce stack space usage in main part of driver + */ +static noinline int +loop_info64_from_compat(const struct compat_loop_info __user *arg, + struct loop_info64 *info64) +{ + struct compat_loop_info info; + + if (copy_from_user(&info, arg, sizeof(info))) + return -EFAULT; + + memset(info64, 0, sizeof(*info64)); + info64->lo_number = info.lo_number; + info64->lo_device = info.lo_device; + info64->lo_inode = info.lo_inode; + info64->lo_rdevice = info.lo_rdevice; + info64->lo_offset = info.lo_offset; + info64->lo_sizelimit = 0; + info64->lo_encrypt_type = info.lo_encrypt_type; + info64->lo_encrypt_key_size = info.lo_encrypt_key_size; + info64->lo_flags = info.lo_flags; + info64->lo_init[0] = info.lo_init[0]; + info64->lo_init[1] = info.lo_init[1]; + if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE); + else + memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); + memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE); + return 0; +} + +/* + * Transfer 64-bit loop info to 32-bit compatibility structure in userspace + * - noinlined to reduce stack space usage in main part of driver + */ +static noinline int +loop_info64_to_compat(const struct loop_info64 *info64, + struct compat_loop_info __user *arg) +{ + struct compat_loop_info info; + + memset(&info, 0, sizeof(info)); + info.lo_number = info64->lo_number; + info.lo_device = info64->lo_device; + info.lo_inode = info64->lo_inode; + info.lo_rdevice = info64->lo_rdevice; + info.lo_offset = info64->lo_offset; + info.lo_encrypt_type = info64->lo_encrypt_type; + info.lo_encrypt_key_size = info64->lo_encrypt_key_size; + info.lo_flags = info64->lo_flags; + info.lo_init[0] = info64->lo_init[0]; + info.lo_init[1] = info64->lo_init[1]; + if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI) + memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE); + else + memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); + memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE); + + /* error in case values were truncated */ + if (info.lo_device != info64->lo_device || + info.lo_rdevice != info64->lo_rdevice || + info.lo_inode != info64->lo_inode || + info.lo_offset != info64->lo_offset || + info.lo_init[0] != info64->lo_init[0] || + info.lo_init[1] != info64->lo_init[1]) + return -EOVERFLOW; + + if (copy_to_user(arg, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +static int +loop_set_status_compat(struct loop_device *lo, + const struct compat_loop_info __user *arg) +{ + struct loop_info64 info64; + int ret; + + ret = loop_info64_from_compat(arg, &info64); + if (ret < 0) + return ret; + return loop_set_status(lo, &info64); +} + +static int +loop_get_status_compat(struct loop_device *lo, + struct compat_loop_info __user *arg) +{ + struct loop_info64 info64; + int err = 0; + + if (!arg) + err = -EINVAL; + if (!err) + err = loop_get_status(lo, &info64); + if (!err) + err = loop_info64_to_compat(&info64, arg); + return err; +} + +static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct loop_device *lo = bdev->bd_disk->private_data; + int err; + + switch(cmd) { + case LOOP_SET_STATUS: + mutex_lock(&lo->lo_ctl_mutex); + err = loop_set_status_compat( + lo, (const struct compat_loop_info __user *) arg); + mutex_unlock(&lo->lo_ctl_mutex); + break; + case LOOP_GET_STATUS: + mutex_lock(&lo->lo_ctl_mutex); + err = loop_get_status_compat( + lo, (struct compat_loop_info __user *) arg); + mutex_unlock(&lo->lo_ctl_mutex); + break; + case LOOP_CLR_FD: + case LOOP_GET_STATUS64: + case LOOP_SET_STATUS64: + arg = (unsigned long) compat_ptr(arg); + case LOOP_SET_FD: + case LOOP_CHANGE_FD: + err = lo_ioctl(bdev, mode, cmd, arg); + break; + default: + err = -ENOIOCTLCMD; + break; + } + return err; +} +#endif + +static int lo_open(struct block_device *bdev, fmode_t mode) +{ + struct loop_device *lo = bdev->bd_disk->private_data; + + mutex_lock(&lo->lo_ctl_mutex); + lo->lo_refcnt++; + mutex_unlock(&lo->lo_ctl_mutex); + + return 0; +} + +static int lo_release(struct gendisk *disk, fmode_t mode) +{ + struct loop_device *lo = disk->private_data; + + mutex_lock(&lo->lo_ctl_mutex); + --lo->lo_refcnt; + + if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt) + loop_clr_fd(lo, NULL); + + mutex_unlock(&lo->lo_ctl_mutex); + + return 0; +} + +static struct block_device_operations lo_fops = { + .owner = THIS_MODULE, + .open = lo_open, + .release = lo_release, + .ioctl = lo_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = lo_compat_ioctl, +#endif +}; + +/* + * And now the modules code and kernel interface. + */ +static int max_loop; +module_param(max_loop, int, 0); +MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); +module_param(max_part, int, 0); +MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); + +int loop_register_transfer(struct loop_func_table *funcs) +{ + unsigned int n = funcs->number; + + if (n >= MAX_LO_CRYPT || xfer_funcs[n]) + return -EINVAL; + xfer_funcs[n] = funcs; + return 0; +} + +int loop_unregister_transfer(int number) +{ + unsigned int n = number; + struct loop_device *lo; + struct loop_func_table *xfer; + + if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) + return -EINVAL; + + xfer_funcs[n] = NULL; + + list_for_each_entry(lo, &loop_devices, lo_list) { + mutex_lock(&lo->lo_ctl_mutex); + + if (lo->lo_encryption == xfer) + loop_release_xfer(lo); + + mutex_unlock(&lo->lo_ctl_mutex); + } + + return 0; +} + +EXPORT_SYMBOL(loop_register_transfer); +EXPORT_SYMBOL(loop_unregister_transfer); + +static struct loop_device *loop_alloc(int i) +{ + struct loop_device *lo; + struct gendisk *disk; + + lo = kzalloc(sizeof(*lo), GFP_KERNEL); + if (!lo) + goto out; + + lo->lo_queue = blk_alloc_queue(GFP_KERNEL); + if (!lo->lo_queue) + goto out_free_dev; + + disk = lo->lo_disk = alloc_disk(1 << part_shift); + if (!disk) + goto out_free_queue; + + mutex_init(&lo->lo_ctl_mutex); + lo->lo_number = i; + lo->lo_thread = NULL; + init_waitqueue_head(&lo->lo_event); + spin_lock_init(&lo->lo_lock); + disk->major = LOOP_MAJOR; + disk->first_minor = i << part_shift; + disk->fops = &lo_fops; + disk->private_data = lo; + disk->queue = lo->lo_queue; + sprintf(disk->disk_name, "loop%d", i); + return lo; + +out_free_queue: + blk_cleanup_queue(lo->lo_queue); +out_free_dev: + kfree(lo); +out: + return NULL; +} + +static void loop_free(struct loop_device *lo) +{ + blk_cleanup_queue(lo->lo_queue); + put_disk(lo->lo_disk); + list_del(&lo->lo_list); + kfree(lo); +} + +static struct loop_device *loop_init_one(int i) +{ + struct loop_device *lo; + + list_for_each_entry(lo, &loop_devices, lo_list) { + if (lo->lo_number == i) + return lo; + } + + lo = loop_alloc(i); + if (lo) { + add_disk(lo->lo_disk); + list_add_tail(&lo->lo_list, &loop_devices); + } + return lo; +} + +static void loop_del_one(struct loop_device *lo) +{ + del_gendisk(lo->lo_disk); + loop_free(lo); +} + +static struct kobject *loop_probe(dev_t dev, int *part, void *data) +{ + struct loop_device *lo; + struct kobject *kobj; + + mutex_lock(&loop_devices_mutex); + lo = loop_init_one(dev & MINORMASK); + kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); + mutex_unlock(&loop_devices_mutex); + + *part = 0; + return kobj; +} + +static int __init loop_init(void) +{ + int i, nr; + unsigned long range; + struct loop_device *lo, *next; + + /* + * loop module now has a feature to instantiate underlying device + * structure on-demand, provided that there is an access dev node. + * However, this will not work well with user space tool that doesn't + * know about such "feature". In order to not break any existing + * tool, we do the following: + * + * (1) if max_loop is specified, create that many upfront, and this + * also becomes a hard limit. + * (2) if max_loop is not specified, create 8 loop device on module + * load, user can further extend loop device by create dev node + * themselves and have kernel automatically instantiate actual + * device on-demand. + */ + + part_shift = 0; + if (max_part > 0) + part_shift = fls(max_part); + + if (max_loop > 1UL << (MINORBITS - part_shift)) + return -EINVAL; + + if (max_loop) { + nr = max_loop; + range = max_loop; + } else { + nr = 8; + range = 1UL << (MINORBITS - part_shift); + } + + if (register_blkdev(LOOP_MAJOR, "loop")) + return -EIO; + + for (i = 0; i < nr; i++) { + lo = loop_alloc(i); + if (!lo) + goto Enomem; + list_add_tail(&lo->lo_list, &loop_devices); + } + + /* point of no return */ + + list_for_each_entry(lo, &loop_devices, lo_list) + add_disk(lo->lo_disk); + + blk_register_region(MKDEV(LOOP_MAJOR, 0), range, + THIS_MODULE, loop_probe, NULL, NULL); + + printk(KERN_INFO "loop: module loaded\n"); + return 0; + +Enomem: + printk(KERN_INFO "loop: out of memory\n"); + + list_for_each_entry_safe(lo, next, &loop_devices, lo_list) + loop_free(lo); + + unregister_blkdev(LOOP_MAJOR, "loop"); + return -ENOMEM; +} + +static void __exit loop_exit(void) +{ + unsigned long range; + struct loop_device *lo, *next; + + range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift); + + list_for_each_entry_safe(lo, next, &loop_devices, lo_list) + loop_del_one(lo); + + blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); + unregister_blkdev(LOOP_MAJOR, "loop"); +} + +module_init(loop_init); +module_exit(loop_exit); + +#ifndef MODULE +static int __init max_loop_setup(char *str) +{ + max_loop = simple_strtol(str, NULL, 0); + return 1; +} + +__setup("max_loop=", max_loop_setup); +#endif diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c new file mode 100644 index 0000000..92d6c9f --- /dev/null +++ b/drivers/block/nbd.c @@ -0,0 +1,829 @@ +/* + * Network block device - make block devices work over TCP + * + * Note that you can not swap over this thing, yet. Seems to work but + * deadlocks sometimes - you can not swap over TCP in general. + * + * Copyright 1997-2000 Pavel Machek <pavel@ucw.cz> + * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> + * + * This file is released under GPLv2 or later. + * + * (part of code stolen from loop.c) + */ + +#include <linux/major.h> + +#include <linux/blkdev.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/fs.h> +#include <linux/bio.h> +#include <linux/stat.h> +#include <linux/errno.h> +#include <linux/file.h> +#include <linux/ioctl.h> +#include <linux/compiler.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <net/sock.h> +#include <linux/net.h> +#include <linux/kthread.h> + +#include <asm/uaccess.h> +#include <asm/system.h> +#include <asm/types.h> + +#include <linux/nbd.h> + +#define LO_MAGIC 0x68797548 + +#ifdef NDEBUG +#define dprintk(flags, fmt...) +#else /* NDEBUG */ +#define dprintk(flags, fmt...) do { \ + if (debugflags & (flags)) printk(KERN_DEBUG fmt); \ +} while (0) +#define DBG_IOCTL 0x0004 +#define DBG_INIT 0x0010 +#define DBG_EXIT 0x0020 +#define DBG_BLKDEV 0x0100 +#define DBG_RX 0x0200 +#define DBG_TX 0x0400 +static unsigned int debugflags; +#endif /* NDEBUG */ + +static unsigned int nbds_max = 16; +static struct nbd_device *nbd_dev; +static int max_part; + +/* + * Use just one lock (or at most 1 per NIC). Two arguments for this: + * 1. Each NIC is essentially a synchronization point for all servers + * accessed through that NIC so there's no need to have more locks + * than NICs anyway. + * 2. More locks lead to more "Dirty cache line bouncing" which will slow + * down each lock to the point where they're actually slower than just + * a single lock. + * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this! + */ +static DEFINE_SPINLOCK(nbd_lock); + +#ifndef NDEBUG +static const char *ioctl_cmd_to_ascii(int cmd) +{ + switch (cmd) { + case NBD_SET_SOCK: return "set-sock"; + case NBD_SET_BLKSIZE: return "set-blksize"; + case NBD_SET_SIZE: return "set-size"; + case NBD_DO_IT: return "do-it"; + case NBD_CLEAR_SOCK: return "clear-sock"; + case NBD_CLEAR_QUE: return "clear-que"; + case NBD_PRINT_DEBUG: return "print-debug"; + case NBD_SET_SIZE_BLOCKS: return "set-size-blocks"; + case NBD_DISCONNECT: return "disconnect"; + case BLKROSET: return "set-read-only"; + case BLKFLSBUF: return "flush-buffer-cache"; + } + return "unknown"; +} + +static const char *nbdcmd_to_ascii(int cmd) +{ + switch (cmd) { + case NBD_CMD_READ: return "read"; + case NBD_CMD_WRITE: return "write"; + case NBD_CMD_DISC: return "disconnect"; + } + return "invalid"; +} +#endif /* NDEBUG */ + +static void nbd_end_request(struct request *req) +{ + int error = req->errors ? -EIO : 0; + struct request_queue *q = req->q; + unsigned long flags; + + dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, + req, error ? "failed" : "done"); + + spin_lock_irqsave(q->queue_lock, flags); + __blk_end_request(req, error, req->nr_sectors << 9); + spin_unlock_irqrestore(q->queue_lock, flags); +} + +static void sock_shutdown(struct nbd_device *lo, int lock) +{ + /* Forcibly shutdown the socket causing all listeners + * to error + * + * FIXME: This code is duplicated from sys_shutdown, but + * there should be a more generic interface rather than + * calling socket ops directly here */ + if (lock) + mutex_lock(&lo->tx_lock); + if (lo->sock) { + printk(KERN_WARNING "%s: shutting down socket\n", + lo->disk->disk_name); + kernel_sock_shutdown(lo->sock, SHUT_RDWR); + lo->sock = NULL; + } + if (lock) + mutex_unlock(&lo->tx_lock); +} + +static void nbd_xmit_timeout(unsigned long arg) +{ + struct task_struct *task = (struct task_struct *)arg; + + printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n", + task->comm, task->pid); + force_sig(SIGKILL, task); +} + +/* + * Send or receive packet. + */ +static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, + int msg_flags) +{ + struct socket *sock = lo->sock; + int result; + struct msghdr msg; + struct kvec iov; + sigset_t blocked, oldset; + + if (unlikely(!sock)) { + printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n", + lo->disk->disk_name, (send ? "send" : "recv")); + return -EINVAL; + } + + /* Allow interception of SIGKILL only + * Don't allow other signals to interrupt the transmission */ + siginitsetinv(&blocked, sigmask(SIGKILL)); + sigprocmask(SIG_SETMASK, &blocked, &oldset); + + do { + sock->sk->sk_allocation = GFP_NOIO; + iov.iov_base = buf; + iov.iov_len = size; + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_flags = msg_flags | MSG_NOSIGNAL; + + if (send) { + struct timer_list ti; + + if (lo->xmit_timeout) { + init_timer(&ti); + ti.function = nbd_xmit_timeout; + ti.data = (unsigned long)current; + ti.expires = jiffies + lo->xmit_timeout; + add_timer(&ti); + } + result = kernel_sendmsg(sock, &msg, &iov, 1, size); + if (lo->xmit_timeout) + del_timer_sync(&ti); + } else + result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0); + + if (signal_pending(current)) { + siginfo_t info; + printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", + task_pid_nr(current), current->comm, + dequeue_signal_lock(current, ¤t->blocked, &info)); + result = -EINTR; + sock_shutdown(lo, !send); + break; + } + + if (result <= 0) { + if (result == 0) + result = -EPIPE; /* short read */ + break; + } + size -= result; + buf += result; + } while (size > 0); + + sigprocmask(SIG_SETMASK, &oldset, NULL); + + return result; +} + +static inline int sock_send_bvec(struct nbd_device *lo, struct bio_vec *bvec, + int flags) +{ + int result; + void *kaddr = kmap(bvec->bv_page); + result = sock_xmit(lo, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags); + kunmap(bvec->bv_page); + return result; +} + +/* always call with the tx_lock held */ +static int nbd_send_req(struct nbd_device *lo, struct request *req) +{ + int result, flags; + struct nbd_request request; + unsigned long size = req->nr_sectors << 9; + + request.magic = htonl(NBD_REQUEST_MAGIC); + request.type = htonl(nbd_cmd(req)); + request.from = cpu_to_be64((u64) req->sector << 9); + request.len = htonl(size); + memcpy(request.handle, &req, sizeof(req)); + + dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", + lo->disk->disk_name, req, + nbdcmd_to_ascii(nbd_cmd(req)), + (unsigned long long)req->sector << 9, + req->nr_sectors << 9); + result = sock_xmit(lo, 1, &request, sizeof(request), + (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); + if (result <= 0) { + printk(KERN_ERR "%s: Send control failed (result %d)\n", + lo->disk->disk_name, result); + goto error_out; + } + + if (nbd_cmd(req) == NBD_CMD_WRITE) { + struct req_iterator iter; + struct bio_vec *bvec; + /* + * we are really probing at internals to determine + * whether to set MSG_MORE or not... + */ + rq_for_each_segment(bvec, req, iter) { + flags = 0; + if (!rq_iter_last(req, iter)) + flags = MSG_MORE; + dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", + lo->disk->disk_name, req, bvec->bv_len); + result = sock_send_bvec(lo, bvec, flags); + if (result <= 0) { + printk(KERN_ERR "%s: Send data failed (result %d)\n", + lo->disk->disk_name, result); + goto error_out; + } + } + } + return 0; + +error_out: + return 1; +} + +static struct request *nbd_find_request(struct nbd_device *lo, + struct request *xreq) +{ + struct request *req, *tmp; + int err; + + err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq); + if (unlikely(err)) + goto out; + + spin_lock(&lo->queue_lock); + list_for_each_entry_safe(req, tmp, &lo->queue_head, queuelist) { + if (req != xreq) + continue; + list_del_init(&req->queuelist); + spin_unlock(&lo->queue_lock); + return req; + } + spin_unlock(&lo->queue_lock); + + err = -ENOENT; + +out: + return ERR_PTR(err); +} + +static inline int sock_recv_bvec(struct nbd_device *lo, struct bio_vec *bvec) +{ + int result; + void *kaddr = kmap(bvec->bv_page); + result = sock_xmit(lo, 0, kaddr + bvec->bv_offset, bvec->bv_len, + MSG_WAITALL); + kunmap(bvec->bv_page); + return result; +} + +/* NULL returned = something went wrong, inform userspace */ +static struct request *nbd_read_stat(struct nbd_device *lo) +{ + int result; + struct nbd_reply reply; + struct request *req; + + reply.magic = 0; + result = sock_xmit(lo, 0, &reply, sizeof(reply), MSG_WAITALL); + if (result <= 0) { + printk(KERN_ERR "%s: Receive control failed (result %d)\n", + lo->disk->disk_name, result); + goto harderror; + } + + if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { + printk(KERN_ERR "%s: Wrong magic (0x%lx)\n", + lo->disk->disk_name, + (unsigned long)ntohl(reply.magic)); + result = -EPROTO; + goto harderror; + } + + req = nbd_find_request(lo, *(struct request **)reply.handle); + if (IS_ERR(req)) { + result = PTR_ERR(req); + if (result != -ENOENT) + goto harderror; + + printk(KERN_ERR "%s: Unexpected reply (%p)\n", + lo->disk->disk_name, reply.handle); + result = -EBADR; + goto harderror; + } + + if (ntohl(reply.error)) { + printk(KERN_ERR "%s: Other side returned error (%d)\n", + lo->disk->disk_name, ntohl(reply.error)); + req->errors++; + return req; + } + + dprintk(DBG_RX, "%s: request %p: got reply\n", + lo->disk->disk_name, req); + if (nbd_cmd(req) == NBD_CMD_READ) { + struct req_iterator iter; + struct bio_vec *bvec; + + rq_for_each_segment(bvec, req, iter) { + result = sock_recv_bvec(lo, bvec); + if (result <= 0) { + printk(KERN_ERR "%s: Receive data failed (result %d)\n", + lo->disk->disk_name, result); + req->errors++; + return req; + } + dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", + lo->disk->disk_name, req, bvec->bv_len); + } + } + return req; +harderror: + lo->harderror = result; + return NULL; +} + +static ssize_t pid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%ld\n", + (long) ((struct nbd_device *)disk->private_data)->pid); +} + +static struct device_attribute pid_attr = { + .attr = { .name = "pid", .mode = S_IRUGO}, + .show = pid_show, +}; + +static int nbd_do_it(struct nbd_device *lo) +{ + struct request *req; + int ret; + + BUG_ON(lo->magic != LO_MAGIC); + + lo->pid = current->pid; + ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr); + if (ret) { + printk(KERN_ERR "nbd: sysfs_create_file failed!"); + lo->pid = 0; + return ret; + } + + while ((req = nbd_read_stat(lo)) != NULL) + nbd_end_request(req); + + sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr); + lo->pid = 0; + return 0; +} + +static void nbd_clear_que(struct nbd_device *lo) +{ + struct request *req; + + BUG_ON(lo->magic != LO_MAGIC); + + /* + * Because we have set lo->sock to NULL under the tx_lock, all + * modifications to the list must have completed by now. For + * the same reason, the active_req must be NULL. + * + * As a consequence, we don't need to take the spin lock while + * purging the list here. + */ + BUG_ON(lo->sock); + BUG_ON(lo->active_req); + + while (!list_empty(&lo->queue_head)) { + req = list_entry(lo->queue_head.next, struct request, + queuelist); + list_del_init(&req->queuelist); + req->errors++; + nbd_end_request(req); + } +} + + +static void nbd_handle_req(struct nbd_device *lo, struct request *req) +{ + if (!blk_fs_request(req)) + goto error_out; + + nbd_cmd(req) = NBD_CMD_READ; + if (rq_data_dir(req) == WRITE) { + nbd_cmd(req) = NBD_CMD_WRITE; + if (lo->flags & NBD_READ_ONLY) { + printk(KERN_ERR "%s: Write on read-only\n", + lo->disk->disk_name); + goto error_out; + } + } + + req->errors = 0; + + mutex_lock(&lo->tx_lock); + if (unlikely(!lo->sock)) { + mutex_unlock(&lo->tx_lock); + printk(KERN_ERR "%s: Attempted send on closed socket\n", + lo->disk->disk_name); + req->errors++; + nbd_end_request(req); + return; + } + + lo->active_req = req; + + if (nbd_send_req(lo, req) != 0) { + printk(KERN_ERR "%s: Request send failed\n", + lo->disk->disk_name); + req->errors++; + nbd_end_request(req); + } else { + spin_lock(&lo->queue_lock); + list_add(&req->queuelist, &lo->queue_head); + spin_unlock(&lo->queue_lock); + } + + lo->active_req = NULL; + mutex_unlock(&lo->tx_lock); + wake_up_all(&lo->active_wq); + + return; + +error_out: + req->errors++; + nbd_end_request(req); +} + +static int nbd_thread(void *data) +{ + struct nbd_device *lo = data; + struct request *req; + + set_user_nice(current, -20); + while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) { + /* wait for something to do */ + wait_event_interruptible(lo->waiting_wq, + kthread_should_stop() || + !list_empty(&lo->waiting_queue)); + + /* extract request */ + if (list_empty(&lo->waiting_queue)) + continue; + + spin_lock_irq(&lo->queue_lock); + req = list_entry(lo->waiting_queue.next, struct request, + queuelist); + list_del_init(&req->queuelist); + spin_unlock_irq(&lo->queue_lock); + + /* handle request */ + nbd_handle_req(lo, req); + } + return 0; +} + +/* + * We always wait for result of write, for now. It would be nice to make it optional + * in future + * if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK)) + * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } + */ + +static void do_nbd_request(struct request_queue * q) +{ + struct request *req; + + while ((req = elv_next_request(q)) != NULL) { + struct nbd_device *lo; + + blkdev_dequeue_request(req); + + spin_unlock_irq(q->queue_lock); + + dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", + req->rq_disk->disk_name, req, req->cmd_type); + + lo = req->rq_disk->private_data; + + BUG_ON(lo->magic != LO_MAGIC); + + if (unlikely(!lo->sock)) { + printk(KERN_ERR "%s: Attempted send on closed socket\n", + lo->disk->disk_name); + req->errors++; + nbd_end_request(req); + spin_lock_irq(q->queue_lock); + continue; + } + + spin_lock_irq(&lo->queue_lock); + list_add_tail(&req->queuelist, &lo->waiting_queue); + spin_unlock_irq(&lo->queue_lock); + + wake_up(&lo->waiting_wq); + + spin_lock_irq(q->queue_lock); + } +} + +static int nbd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct nbd_device *lo = bdev->bd_disk->private_data; + struct file *file; + int error; + struct request sreq ; + struct task_struct *thread; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + BUG_ON(lo->magic != LO_MAGIC); + + /* Anyone capable of this syscall can do *real bad* things */ + dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", + lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); + + switch (cmd) { + case NBD_DISCONNECT: + printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); + blk_rq_init(NULL, &sreq); + sreq.cmd_type = REQ_TYPE_SPECIAL; + nbd_cmd(&sreq) = NBD_CMD_DISC; + /* + * Set these to sane values in case server implementation + * fails to check the request type first and also to keep + * debugging output cleaner. + */ + sreq.sector = 0; + sreq.nr_sectors = 0; + if (!lo->sock) + return -EINVAL; + mutex_lock(&lo->tx_lock); + nbd_send_req(lo, &sreq); + mutex_unlock(&lo->tx_lock); + return 0; + + case NBD_CLEAR_SOCK: + error = 0; + mutex_lock(&lo->tx_lock); + lo->sock = NULL; + mutex_unlock(&lo->tx_lock); + file = lo->file; + lo->file = NULL; + nbd_clear_que(lo); + BUG_ON(!list_empty(&lo->queue_head)); + if (file) + fput(file); + return error; + case NBD_SET_SOCK: + if (lo->file) + return -EBUSY; + error = -EINVAL; + file = fget(arg); + if (file) { + struct inode *inode = file->f_path.dentry->d_inode; + if (S_ISSOCK(inode->i_mode)) { + lo->file = file; + lo->sock = SOCKET_I(inode); + if (max_part > 0) + bdev->bd_invalidated = 1; + error = 0; + } else { + fput(file); + } + } + return error; + case NBD_SET_BLKSIZE: + lo->blksize = arg; + lo->bytesize &= ~(lo->blksize-1); + bdev->bd_inode->i_size = lo->bytesize; + set_blocksize(bdev, lo->blksize); + set_capacity(lo->disk, lo->bytesize >> 9); + return 0; + case NBD_SET_SIZE: + lo->bytesize = arg & ~(lo->blksize-1); + bdev->bd_inode->i_size = lo->bytesize; + set_blocksize(bdev, lo->blksize); + set_capacity(lo->disk, lo->bytesize >> 9); + return 0; + case NBD_SET_TIMEOUT: + lo->xmit_timeout = arg * HZ; + return 0; + case NBD_SET_SIZE_BLOCKS: + lo->bytesize = ((u64) arg) * lo->blksize; + bdev->bd_inode->i_size = lo->bytesize; + set_blocksize(bdev, lo->blksize); + set_capacity(lo->disk, lo->bytesize >> 9); + return 0; + case NBD_DO_IT: + if (lo->pid) + return -EBUSY; + if (!lo->file) + return -EINVAL; + thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); + if (IS_ERR(thread)) + return PTR_ERR(thread); + wake_up_process(thread); + error = nbd_do_it(lo); + kthread_stop(thread); + if (error) + return error; + sock_shutdown(lo, 1); + file = lo->file; + lo->file = NULL; + nbd_clear_que(lo); + printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name); + if (file) + fput(file); + lo->bytesize = 0; + bdev->bd_inode->i_size = 0; + set_capacity(lo->disk, 0); + if (max_part > 0) + ioctl_by_bdev(bdev, BLKRRPART, 0); + return lo->harderror; + case NBD_CLEAR_QUE: + /* + * This is for compatibility only. The queue is always cleared + * by NBD_DO_IT or NBD_CLEAR_SOCK. + */ + BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); + return 0; + case NBD_PRINT_DEBUG: + printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", + bdev->bd_disk->disk_name, + lo->queue_head.next, lo->queue_head.prev, + &lo->queue_head); + return 0; + } + return -EINVAL; +} + +static struct block_device_operations nbd_fops = +{ + .owner = THIS_MODULE, + .locked_ioctl = nbd_ioctl, +}; + +/* + * And here should be modules and kernel interface + * (Just smiley confuses emacs :-) + */ + +static int __init nbd_init(void) +{ + int err = -ENOMEM; + int i; + int part_shift; + + BUILD_BUG_ON(sizeof(struct nbd_request) != 28); + + if (max_part < 0) { + printk(KERN_CRIT "nbd: max_part must be >= 0\n"); + return -EINVAL; + } + + nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); + if (!nbd_dev) + return -ENOMEM; + + part_shift = 0; + if (max_part > 0) + part_shift = fls(max_part); + + for (i = 0; i < nbds_max; i++) { + struct gendisk *disk = alloc_disk(1 << part_shift); + elevator_t *old_e; + if (!disk) + goto out; + nbd_dev[i].disk = disk; + /* + * The new linux 2.5 block layer implementation requires + * every gendisk to have its very own request_queue struct. + * These structs are big so we dynamically allocate them. + */ + disk->queue = blk_init_queue(do_nbd_request, &nbd_lock); + if (!disk->queue) { + put_disk(disk); + goto out; + } + old_e = disk->queue->elevator; + if (elevator_init(disk->queue, "deadline") == 0 || + elevator_init(disk->queue, "noop") == 0) { + elevator_exit(old_e); + } + } + + if (register_blkdev(NBD_MAJOR, "nbd")) { + err = -EIO; + goto out; + } + + printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); + dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags); + + for (i = 0; i < nbds_max; i++) { + struct gendisk *disk = nbd_dev[i].disk; + nbd_dev[i].file = NULL; + nbd_dev[i].magic = LO_MAGIC; + nbd_dev[i].flags = 0; + INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); + spin_lock_init(&nbd_dev[i].queue_lock); + INIT_LIST_HEAD(&nbd_dev[i].queue_head); + mutex_init(&nbd_dev[i].tx_lock); + init_waitqueue_head(&nbd_dev[i].active_wq); + init_waitqueue_head(&nbd_dev[i].waiting_wq); + nbd_dev[i].blksize = 1024; + nbd_dev[i].bytesize = 0; + disk->major = NBD_MAJOR; + disk->first_minor = i << part_shift; + disk->fops = &nbd_fops; + disk->private_data = &nbd_dev[i]; + sprintf(disk->disk_name, "nbd%d", i); + set_capacity(disk, 0); + add_disk(disk); + } + + return 0; +out: + while (i--) { + blk_cleanup_queue(nbd_dev[i].disk->queue); + put_disk(nbd_dev[i].disk); + } + kfree(nbd_dev); + return err; +} + +static void __exit nbd_cleanup(void) +{ + int i; + for (i = 0; i < nbds_max; i++) { + struct gendisk *disk = nbd_dev[i].disk; + nbd_dev[i].magic = 0; + if (disk) { + del_gendisk(disk); + blk_cleanup_queue(disk->queue); + put_disk(disk); + } + } + unregister_blkdev(NBD_MAJOR, "nbd"); + kfree(nbd_dev); + printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR); +} + +module_init(nbd_init); +module_exit(nbd_cleanup); + +MODULE_DESCRIPTION("Network Block Device"); +MODULE_LICENSE("GPL"); + +module_param(nbds_max, int, 0444); +MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); +module_param(max_part, int, 0444); +MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); +#ifndef NDEBUG +module_param(debugflags, int, 0644); +MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); +#endif diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig new file mode 100644 index 0000000..28cf308 --- /dev/null +++ b/drivers/block/paride/Kconfig @@ -0,0 +1,300 @@ +# +# PARIDE configuration +# +# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module, +# PARIDE must also be a module. +# PARIDE only supports PC style parports. Tough for USB or other parports... + +comment "Parallel IDE high-level drivers" + depends on PARIDE + +config PARIDE_PD + tristate "Parallel port IDE disks" + depends on PARIDE + help + This option enables the high-level driver for IDE-type disk devices + connected through a parallel port. If you chose to build PARIDE + support into your kernel, you may answer Y here to build in the + parallel port IDE driver, otherwise you should answer M to build + it as a loadable module. The module will be called pd. You + must also have at least one parallel port protocol driver in your + system. Among the devices supported by this driver are the SyQuest + EZ-135, EZ-230 and SparQ drives, the Avatar Shark and the backpack + hard drives from MicroSolutions. + +config PARIDE_PCD + tristate "Parallel port ATAPI CD-ROMs" + depends on PARIDE + ---help--- + This option enables the high-level driver for ATAPI CD-ROM devices + connected through a parallel port. If you chose to build PARIDE + support into your kernel, you may answer Y here to build in the + parallel port ATAPI CD-ROM driver, otherwise you should answer M to + build it as a loadable module. The module will be called pcd. You + must also have at least one parallel port protocol driver in your + system. Among the devices supported by this driver are the + MicroSolutions backpack CD-ROM drives and the Freecom Power CD. If + you have such a CD-ROM drive, you should also say Y or M to "ISO + 9660 CD-ROM file system support" below, because that's the file + system used on CD-ROMs. + +config PARIDE_PF + tristate "Parallel port ATAPI disks" + depends on PARIDE + help + This option enables the high-level driver for ATAPI disk devices + connected through a parallel port. If you chose to build PARIDE + support into your kernel, you may answer Y here to build in the + parallel port ATAPI disk driver, otherwise you should answer M + to build it as a loadable module. The module will be called pf. + You must also have at least one parallel port protocol driver in + your system. Among the devices supported by this driver are the + MicroSolutions backpack PD/CD drive and the Imation Superdisk + LS-120 drive. + +config PARIDE_PT + tristate "Parallel port ATAPI tapes" + depends on PARIDE + help + This option enables the high-level driver for ATAPI tape devices + connected through a parallel port. If you chose to build PARIDE + support into your kernel, you may answer Y here to build in the + parallel port ATAPI disk driver, otherwise you should answer M + to build it as a loadable module. The module will be called pt. + You must also have at least one parallel port protocol driver in + your system. Among the devices supported by this driver is the + parallel port version of the HP 5GB drive. + +config PARIDE_PG + tristate "Parallel port generic ATAPI devices" + depends on PARIDE + ---help--- + This option enables a special high-level driver for generic ATAPI + devices connected through a parallel port. The driver allows user + programs, such as cdrtools, to send ATAPI commands directly to a + device. + + If you chose to build PARIDE support into your kernel, you may + answer Y here to build in the parallel port generic ATAPI driver, + otherwise you should answer M to build it as a loadable module. The + module will be called pg. + + You must also have at least one parallel port protocol driver in + your system. + + This driver implements an API loosely related to the generic SCSI + driver. See <file:include/linux/pg.h>. for details. + + You can obtain the most recent version of cdrtools from + <ftp://ftp.berlios.de/pub/cdrecord/>. Versions 1.6.1a3 and + later fully support this driver. + +comment "Parallel IDE protocol modules" + depends on PARIDE + +config PARIDE_ATEN + tristate "ATEN EH-100 protocol" + depends on PARIDE + help + This option enables support for the ATEN EH-100 parallel port IDE + protocol. This protocol is used in some inexpensive low performance + parallel port kits made in Hong Kong. If you chose to build PARIDE + support into your kernel, you may answer Y here to build in the + protocol driver, otherwise you should answer M to build it as a + loadable module. The module will be called aten. You must also + have a high-level driver for the type of device that you want to + support. + +config PARIDE_BPCK + tristate "MicroSolutions backpack (Series 5) protocol" + depends on PARIDE + ---help--- + This option enables support for the Micro Solutions BACKPACK + parallel port Series 5 IDE protocol. (Most BACKPACK drives made + before 1999 were Series 5) Series 5 drives will NOT always have the + Series noted on the bottom of the drive. Series 6 drivers will. + + In other words, if your BACKPACK drive doesn't say "Series 6" on the + bottom, enable this option. + + If you chose to build PARIDE support into your kernel, you may + answer Y here to build in the protocol driver, otherwise you should + answer M to build it as a loadable module. The module will be + called bpck. You must also have a high-level driver for the type + of device that you want to support. + +config PARIDE_BPCK6 + tristate "MicroSolutions backpack (Series 6) protocol" + depends on PARIDE && !64BIT + ---help--- + This option enables support for the Micro Solutions BACKPACK + parallel port Series 6 IDE protocol. (Most BACKPACK drives made + after 1999 were Series 6) Series 6 drives will have the Series noted + on the bottom of the drive. Series 5 drivers don't always have it + noted. + + In other words, if your BACKPACK drive says "Series 6" on the + bottom, enable this option. + + If you chose to build PARIDE support into your kernel, you may + answer Y here to build in the protocol driver, otherwise you should + answer M to build it as a loadable module. The module will be + called bpck6. You must also have a high-level driver for the type + of device that you want to support. + +config PARIDE_COMM + tristate "DataStor Commuter protocol" + depends on PARIDE + help + This option enables support for the Commuter parallel port IDE + protocol from DataStor. If you chose to build PARIDE support + into your kernel, you may answer Y here to build in the protocol + driver, otherwise you should answer M to build it as a loadable + module. The module will be called comm. You must also have + a high-level driver for the type of device that you want to support. + +config PARIDE_DSTR + tristate "DataStor EP-2000 protocol" + depends on PARIDE + help + This option enables support for the EP-2000 parallel port IDE + protocol from DataStor. If you chose to build PARIDE support + into your kernel, you may answer Y here to build in the protocol + driver, otherwise you should answer M to build it as a loadable + module. The module will be called dstr. You must also have + a high-level driver for the type of device that you want to support. + +config PARIDE_FIT2 + tristate "FIT TD-2000 protocol" + depends on PARIDE + help + This option enables support for the TD-2000 parallel port IDE + protocol from Fidelity International Technology. This is a simple + (low speed) adapter that is used in some portable hard drives. If + you chose to build PARIDE support into your kernel, you may answer Y + here to build in the protocol driver, otherwise you should answer M + to build it as a loadable module. The module will be called ktti. + You must also have a high-level driver for the type of device that + you want to support. + +config PARIDE_FIT3 + tristate "FIT TD-3000 protocol" + depends on PARIDE + help + This option enables support for the TD-3000 parallel port IDE + protocol from Fidelity International Technology. This protocol is + used in newer models of their portable disk, CD-ROM and PD/CD + devices. If you chose to build PARIDE support into your kernel, you + may answer Y here to build in the protocol driver, otherwise you + should answer M to build it as a loadable module. The module will be + called fit3. You must also have a high-level driver for the type + of device that you want to support. + +config PARIDE_EPAT + tristate "Shuttle EPAT/EPEZ protocol" + depends on PARIDE + help + This option enables support for the EPAT parallel port IDE protocol. + EPAT is a parallel port IDE adapter manufactured by Shuttle + Technology and widely used in devices from major vendors such as + Hewlett-Packard, SyQuest, Imation and Avatar. If you chose to build + PARIDE support into your kernel, you may answer Y here to build in + the protocol driver, otherwise you should answer M to build it as a + loadable module. The module will be called epat. You must also + have a high-level driver for the type of device that you want to + support. + +config PARIDE_EPATC8 + bool "Support c7/c8 chips (EXPERIMENTAL)" + depends on PARIDE_EPAT && EXPERIMENTAL + help + This option enables support for the newer Shuttle EP1284 (aka c7 and + c8) chip. You need this if you are using any recent Imation SuperDisk + (LS-120) drive. + +config PARIDE_EPIA + tristate "Shuttle EPIA protocol" + depends on PARIDE + help + This option enables support for the (obsolete) EPIA parallel port + IDE protocol from Shuttle Technology. This adapter can still be + found in some no-name kits. If you chose to build PARIDE support + into your kernel, you may answer Y here to build in the protocol + driver, otherwise you should answer M to build it as a loadable + module. The module will be called epia. You must also have a + high-level driver for the type of device that you want to support. + +config PARIDE_FRIQ + tristate "Freecom IQ ASIC-2 protocol" + depends on PARIDE + help + This option enables support for version 2 of the Freecom IQ parallel + port IDE adapter. This adapter is used by the Maxell Superdisk + drive. If you chose to build PARIDE support into your kernel, you + may answer Y here to build in the protocol driver, otherwise you + should answer M to build it as a loadable module. The module will be + called friq. You must also have a high-level driver for the type + of device that you want to support. + +config PARIDE_FRPW + tristate "FreeCom power protocol" + depends on PARIDE + help + This option enables support for the Freecom power parallel port IDE + protocol. If you chose to build PARIDE support into your kernel, you + may answer Y here to build in the protocol driver, otherwise you + should answer M to build it as a loadable module. The module will be + called frpw. You must also have a high-level driver for the type + of device that you want to support. + +config PARIDE_KBIC + tristate "KingByte KBIC-951A/971A protocols" + depends on PARIDE + help + This option enables support for the KBIC-951A and KBIC-971A parallel + port IDE protocols from KingByte Information Corp. KingByte's + adapters appear in many no-name portable disk and CD-ROM products, + especially in Europe. If you chose to build PARIDE support into your + kernel, you may answer Y here to build in the protocol driver, + otherwise you should answer M to build it as a loadable module. The + module will be called kbic. You must also have a high-level driver + for the type of device that you want to support. + +config PARIDE_KTTI + tristate "KT PHd protocol" + depends on PARIDE + help + This option enables support for the "PHd" parallel port IDE protocol + from KT Technology. This is a simple (low speed) adapter that is + used in some 2.5" portable hard drives. If you chose to build PARIDE + support into your kernel, you may answer Y here to build in the + protocol driver, otherwise you should answer M to build it as a + loadable module. The module will be called ktti. You must also + have a high-level driver for the type of device that you want to + support. + +config PARIDE_ON20 + tristate "OnSpec 90c20 protocol" + depends on PARIDE + help + This option enables support for the (obsolete) 90c20 parallel port + IDE protocol from OnSpec (often marketed under the ValuStore brand + name). If you chose to build PARIDE support into your kernel, you + may answer Y here to build in the protocol driver, otherwise you + should answer M to build it as a loadable module. The module will + be called on20. You must also have a high-level driver for the + type of device that you want to support. + +config PARIDE_ON26 + tristate "OnSpec 90c26 protocol" + depends on PARIDE + help + This option enables support for the 90c26 parallel port IDE protocol + from OnSpec Electronics (often marketed under the ValuStore brand + name). If you chose to build PARIDE support into your kernel, you + may answer Y here to build in the protocol driver, otherwise you + should answer M to build it as a loadable module. The module will be + called on26. You must also have a high-level driver for the type + of device that you want to support. + +# diff --git a/drivers/block/paride/Makefile b/drivers/block/paride/Makefile new file mode 100644 index 0000000..a539e00 --- /dev/null +++ b/drivers/block/paride/Makefile @@ -0,0 +1,28 @@ +# +# Makefile for Parallel port IDE device drivers. +# +# 7 October 2000, Bartlomiej Zolnierkiewicz <bkz@linux-ide.org> +# Rewritten to use lists instead of if-statements. +# + +obj-$(CONFIG_PARIDE) += paride.o +obj-$(CONFIG_PARIDE_ATEN) += aten.o +obj-$(CONFIG_PARIDE_BPCK) += bpck.o +obj-$(CONFIG_PARIDE_COMM) += comm.o +obj-$(CONFIG_PARIDE_DSTR) += dstr.o +obj-$(CONFIG_PARIDE_KBIC) += kbic.o +obj-$(CONFIG_PARIDE_EPAT) += epat.o +obj-$(CONFIG_PARIDE_EPIA) += epia.o +obj-$(CONFIG_PARIDE_FRPW) += frpw.o +obj-$(CONFIG_PARIDE_FRIQ) += friq.o +obj-$(CONFIG_PARIDE_FIT2) += fit2.o +obj-$(CONFIG_PARIDE_FIT3) += fit3.o +obj-$(CONFIG_PARIDE_ON20) += on20.o +obj-$(CONFIG_PARIDE_ON26) += on26.o +obj-$(CONFIG_PARIDE_KTTI) += ktti.o +obj-$(CONFIG_PARIDE_BPCK6) += bpck6.o +obj-$(CONFIG_PARIDE_PD) += pd.o +obj-$(CONFIG_PARIDE_PCD) += pcd.o +obj-$(CONFIG_PARIDE_PF) += pf.o +obj-$(CONFIG_PARIDE_PT) += pt.o +obj-$(CONFIG_PARIDE_PG) += pg.o diff --git a/drivers/block/paride/Transition-notes b/drivers/block/paride/Transition-notes new file mode 100644 index 0000000..7037490 --- /dev/null +++ b/drivers/block/paride/Transition-notes @@ -0,0 +1,128 @@ +Lemma 1: + If ps_tq is scheduled, ps_tq_active is 1. ps_tq_int() can be called + only when ps_tq_active is 1. +Proof: All assignments to ps_tq_active and all scheduling of ps_tq happen + under ps_spinlock. There are three places where that can happen: + one in ps_set_intr() (A) and two in ps_tq_int() (B and C). + Consider the sequnce of these events. A can not be preceded by + anything except B, since it is under if (!ps_tq_active) under + ps_spinlock. C is always preceded by B, since we can't reach it + other than through B and we don't drop ps_spinlock between them. + IOW, the sequence is A?(BA|BC|B)*. OTOH, number of B can not exceed + the sum of numbers of A and C, since each call of ps_tq_int() is + the result of ps_tq execution. Therefore, the sequence starts with + A and each B is preceded by either A or C. Moments when we enter + ps_tq_int() are sandwiched between {A,C} and B in that sequence, + since at any time number of B can not exceed the number of these + moments which, in turn, can not exceed the number of A and C. + In other words, the sequence of events is (A or C set ps_tq_active to + 1 and schedule ps_tq, ps_tq is executed, ps_tq_int() is entered, + B resets ps_tq_active)*. + + +consider the following area: + * in do_pd_request1(): to calls of pi_do_claimed() and return in + case when pd_req is NULL. + * in next_request(): to call of do_pd_request1() + * in do_pd_read(): to call of ps_set_intr() + * in do_pd_read_start(): to calls of pi_do_claimed(), next_request() +and ps_set_intr() + * in do_pd_read_drq(): to calls of pi_do_claimed() and next_request() + * in do_pd_write(): to call of ps_set_intr() + * in do_pd_write_start(): to calls of pi_do_claimed(), next_request() +and ps_set_intr() + * in do_pd_write_done(): to calls of pi_do_claimed() and next_request() + * in ps_set_intr(): to check for ps_tq_active and to scheduling + ps_tq if ps_tq_active was 0. + * in ps_tq_int(): from the moment when we get ps_spinlock() to the + return, call of con() or scheduling ps_tq. + * in pi_schedule_claimed() when called from pi_do_claimed() called from + pd.c, everything until returning 1 or setting or setting ->claim_cont + on the path that returns 0 + * in pi_do_claimed() when called from pd.c, everything until the call + of pi_do_claimed() plus the everything until the call of cont() if + pi_do_claimed() has returned 1. + * in pi_wake_up() called for PIA that belongs to pd.c, everything from + the moment when pi_spinlock has been acquired. + +Lemma 2: + 1) at any time at most one thread of execution can be in that area or + be preempted there. + 2) When there is such a thread, pd_busy is set or pd_lock is held by + that thread. + 3) When there is such a thread, ps_tq_active is 0 or ps_spinlock is + held by that thread. + 4) When there is such a thread, all PIA belonging to pd.c have NULL + ->claim_cont or pi_spinlock is held by thread in question. + +Proof: consider the first moment when the above is not true. + +(1) can become not true if some thread enters that area while another is there. + a) do_pd_request1() can be called from next_request() or do_pd_request() + In the first case the thread was already in the area. In the second, + the thread was holding pd_lock and found pd_busy not set, which would + mean that (2) was already not true. + b) ps_set_intr() and pi_schedule_claimed() can be called only from the + area. + c) pi_do_claimed() is called by pd.c only from the area. + d) ps_tq_int() can enter the area only when the thread is holding + ps_spinlock and ps_tq_active is 1 (due to Lemma 1). It means that + (3) was already not true. + e) do_pd_{read,write}* could be called only from the area. The only + case that needs consideration is call from pi_wake_up() and there + we would have to be called for the PIA that got ->claimed_cont + from pd.c. That could happen only if pi_do_claimed() had been + called from pd.c for that PIA, which happens only for PIA belonging + to pd.c. + f) pi_wake_up() can enter the area only when the thread is holding + pi_spinlock and ->claimed_cont is non-NULL for PIA belonging to + pd.c. It means that (4) was already not true. + +(2) can become not true only when pd_lock is released by the thread in question. + Indeed, pd_busy is reset only in the area and thread that resets + it is holding pd_lock. The only place within the area where we + release pd_lock is in pd_next_buf() (called from within the area). + But that code does not reset pd_busy, so pd_busy would have to be + 0 when pd_next_buf() had acquired pd_lock. If it become 0 while + we were acquiring the lock, (1) would be already false, since + the thread that had reset it would be in the area simulateously. + If it was 0 before we tried to acquire pd_lock, (2) would be + already false. + +For similar reasons, (3) can become not true only when ps_spinlock is released +by the thread in question. However, all such places within the area are right +after resetting ps_tq_active to 0. + +(4) is done the same way - all places where we release pi_spinlock within +the area are either after resetting ->claimed_cont to NULL while holding +pi_spinlock, or after not tocuhing ->claimed_cont since acquiring pi_spinlock +also in the area. The only place where ->claimed_cont is made non-NULL is +in the area, under pi_spinlock and we do not release it until after leaving +the area. + +QED. + + +Corollary 1: ps_tq_active can be killed. Indeed, the only place where we +check its value is in ps_set_intr() and if it had been non-zero at that +point, we would have violated either (2.1) (if it was set while ps_set_intr() +was acquiring ps_spinlock) or (2.3) (if it was set when we started to +acquire ps_spinlock). + +Corollary 2: ps_spinlock can be killed. Indeed, Lemma 1 and Lemma 2 show +that the only possible contention is between scheduling ps_tq followed by +immediate release of spinlock and beginning of execution of ps_tq on +another CPU. + +Corollary 3: assignment to pd_busy in do_pd_read_start() and do_pd_write_start() +can be killed. Indeed, we are not holding pd_lock and thus pd_busy is already +1 here. + +Corollary 4: in ps_tq_int() uses of con can be replaced with uses of +ps_continuation, since the latter is changed only from the area. +We don't need to reset it to NULL, since we are guaranteed that there +will be a call of ps_set_intr() before we look at ps_continuation again. +We can remove the check for ps_continuation being NULL for the same +reason - the value is guaranteed to be set by the last ps_set_intr() and +we never pass it NULL. Assignements in the beginning of ps_set_intr() +can be taken to callers as long as they remain within the area. diff --git a/drivers/block/paride/aten.c b/drivers/block/paride/aten.c new file mode 100644 index 0000000..2695465 --- /dev/null +++ b/drivers/block/paride/aten.c @@ -0,0 +1,162 @@ +/* + aten.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + aten.c is a low-level protocol driver for the ATEN EH-100 + parallel port adapter. The EH-100 supports 4-bit and 8-bit + modes only. There is also an EH-132 which supports EPP mode + transfers. The EH-132 is not yet supported. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.05 init_proto, release_proto + +*/ + +#define ATEN_VERSION "1.01" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/wait.h> +#include <linux/types.h> +#include <asm/io.h> + +#include "paride.h" + +#define j44(a,b) ((((a>>4)&0x0f)|(b&0xf0))^0x88) + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set +*/ + +static int cont_map[2] = { 0x08, 0x20 }; + +static void aten_write_regr( PIA *pi, int cont, int regr, int val) + +{ int r; + + r = regr + cont_map[cont] + 0x80; + + w0(r); w2(0xe); w2(6); w0(val); w2(7); w2(6); w2(0xc); +} + +static int aten_read_regr( PIA *pi, int cont, int regr ) + +{ int a, b, r; + + r = regr + cont_map[cont] + 0x40; + + switch (pi->mode) { + + case 0: w0(r); w2(0xe); w2(6); + w2(7); w2(6); w2(0); + a = r1(); w0(0x10); b = r1(); w2(0xc); + return j44(a,b); + + case 1: r |= 0x10; + w0(r); w2(0xe); w2(6); w0(0xff); + w2(0x27); w2(0x26); w2(0x20); + a = r0(); + w2(0x26); w2(0xc); + return a; + } + return -1; +} + +static void aten_read_block( PIA *pi, char * buf, int count ) + +{ int k, a, b, c, d; + + switch (pi->mode) { + + case 0: w0(0x48); w2(0xe); w2(6); + for (k=0;k<count/2;k++) { + w2(7); w2(6); w2(2); + a = r1(); w0(0x58); b = r1(); + w2(0); d = r1(); w0(0x48); c = r1(); + buf[2*k] = j44(c,d); + buf[2*k+1] = j44(a,b); + } + w2(0xc); + break; + + case 1: w0(0x58); w2(0xe); w2(6); + for (k=0;k<count/2;k++) { + w2(0x27); w2(0x26); w2(0x22); + a = r0(); w2(0x20); b = r0(); + buf[2*k] = b; buf[2*k+1] = a; + } + w2(0x26); w2(0xc); + break; + } +} + +static void aten_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + w0(0x88); w2(0xe); w2(6); + for (k=0;k<count/2;k++) { + w0(buf[2*k+1]); w2(0xe); w2(6); + w0(buf[2*k]); w2(7); w2(6); + } + w2(0xc); +} + +static void aten_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + w2(0xc); +} + +static void aten_disconnect ( PIA *pi ) + +{ w0(pi->saved_r0); + w2(pi->saved_r2); +} + +static void aten_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ char *mode_string[2] = {"4-bit","8-bit"}; + + printk("%s: aten %s, ATEN EH-100 at 0x%x, ", + pi->device,ATEN_VERSION,pi->port); + printk("mode %d (%s), delay %d\n",pi->mode, + mode_string[pi->mode],pi->delay); + +} + +static struct pi_protocol aten = { + .owner = THIS_MODULE, + .name = "aten", + .max_mode = 2, + .epp_first = 2, + .default_delay = 1, + .max_units = 1, + .write_regr = aten_write_regr, + .read_regr = aten_read_regr, + .write_block = aten_write_block, + .read_block = aten_read_block, + .connect = aten_connect, + .disconnect = aten_disconnect, + .log_adapter = aten_log_adapter, +}; + +static int __init aten_init(void) +{ + return paride_register(&aten); +} + +static void __exit aten_exit(void) +{ + paride_unregister( &aten ); +} + +MODULE_LICENSE("GPL"); +module_init(aten_init) +module_exit(aten_exit) diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c new file mode 100644 index 0000000..4f27e73 --- /dev/null +++ b/drivers/block/paride/bpck.c @@ -0,0 +1,477 @@ +/* + bpck.c (c) 1996-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + bpck.c is a low-level protocol driver for the MicroSolutions + "backpack" parallel port IDE adapter. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.05 init_proto, release_proto, pi->delay + 1.02 GRG 1998.08.15 default pi->delay returned to 4 + +*/ + +#define BPCK_VERSION "1.02" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +#undef r2 +#undef w2 + +#define PC pi->private +#define r2() (PC=(in_p(2) & 0xff)) +#define w2(byte) {out_p(2,byte); PC = byte;} +#define t2(pat) {PC ^= pat; out_p(2,PC);} +#define e2() {PC &= 0xfe; out_p(2,PC);} +#define o2() {PC |= 1; out_p(2,PC);} + +#define j44(l,h) (((l>>3)&0x7)|((l>>4)&0x8)|((h<<1)&0x70)|(h&0x80)) + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set + cont = 2 - use internal bpck register addressing +*/ + +static int cont_map[3] = { 0x40, 0x48, 0 }; + +static int bpck_read_regr( PIA *pi, int cont, int regr ) + +{ int r, l, h; + + r = regr + cont_map[cont]; + + switch (pi->mode) { + + case 0: w0(r & 0xf); w0(r); t2(2); t2(4); + l = r1(); + t2(4); + h = r1(); + return j44(l,h); + + case 1: w0(r & 0xf); w0(r); t2(2); + e2(); t2(0x20); + t2(4); h = r0(); + t2(1); t2(0x20); + return h; + + case 2: + case 3: + case 4: w0(r); w2(9); w2(0); w2(0x20); + h = r4(); + w2(0); + return h; + + } + return -1; +} + +static void bpck_write_regr( PIA *pi, int cont, int regr, int val ) + +{ int r; + + r = regr + cont_map[cont]; + + switch (pi->mode) { + + case 0: + case 1: w0(r); + t2(2); + w0(val); + o2(); t2(4); t2(1); + break; + + case 2: + case 3: + case 4: w0(r); w2(9); w2(0); + w0(val); w2(1); w2(3); w2(0); + break; + + } +} + +/* These macros access the bpck registers in native addressing */ + +#define WR(r,v) bpck_write_regr(pi,2,r,v) +#define RR(r) (bpck_read_regr(pi,2,r)) + +static void bpck_write_block( PIA *pi, char * buf, int count ) + +{ int i; + + switch (pi->mode) { + + case 0: WR(4,0x40); + w0(0x40); t2(2); t2(1); + for (i=0;i<count;i++) { w0(buf[i]); t2(4); } + WR(4,0); + break; + + case 1: WR(4,0x50); + w0(0x40); t2(2); t2(1); + for (i=0;i<count;i++) { w0(buf[i]); t2(4); } + WR(4,0x10); + break; + + case 2: WR(4,0x48); + w0(0x40); w2(9); w2(0); w2(1); + for (i=0;i<count;i++) w4(buf[i]); + w2(0); + WR(4,8); + break; + + case 3: WR(4,0x48); + w0(0x40); w2(9); w2(0); w2(1); + for (i=0;i<count/2;i++) w4w(((u16 *)buf)[i]); + w2(0); + WR(4,8); + break; + + case 4: WR(4,0x48); + w0(0x40); w2(9); w2(0); w2(1); + for (i=0;i<count/4;i++) w4l(((u32 *)buf)[i]); + w2(0); + WR(4,8); + break; + } +} + +static void bpck_read_block( PIA *pi, char * buf, int count ) + +{ int i, l, h; + + switch (pi->mode) { + + case 0: WR(4,0x40); + w0(0x40); t2(2); + for (i=0;i<count;i++) { + t2(4); l = r1(); + t2(4); h = r1(); + buf[i] = j44(l,h); + } + WR(4,0); + break; + + case 1: WR(4,0x50); + w0(0x40); t2(2); t2(0x20); + for(i=0;i<count;i++) { t2(4); buf[i] = r0(); } + t2(1); t2(0x20); + WR(4,0x10); + break; + + case 2: WR(4,0x48); + w0(0x40); w2(9); w2(0); w2(0x20); + for (i=0;i<count;i++) buf[i] = r4(); + w2(0); + WR(4,8); + break; + + case 3: WR(4,0x48); + w0(0x40); w2(9); w2(0); w2(0x20); + for (i=0;i<count/2;i++) ((u16 *)buf)[i] = r4w(); + w2(0); + WR(4,8); + break; + + case 4: WR(4,0x48); + w0(0x40); w2(9); w2(0); w2(0x20); + for (i=0;i<count/4;i++) ((u32 *)buf)[i] = r4l(); + w2(0); + WR(4,8); + break; + + } +} + +static int bpck_probe_unit ( PIA *pi ) + +{ int o1, o0, f7, id; + int t, s; + + id = pi->unit; + s = 0; + w2(4); w2(0xe); r2(); t2(2); + o1 = r1()&0xf8; + o0 = r0(); + w0(255-id); w2(4); w0(id); + t2(8); t2(8); t2(8); + t2(2); t = r1()&0xf8; + f7 = ((id % 8) == 7); + if ((f7) || (t != o1)) { t2(2); s = r1()&0xf8; } + if ((t == o1) && ((!f7) || (s == o1))) { + w2(0x4c); w0(o0); + return 0; + } + t2(8); w0(0); t2(2); w2(0x4c); w0(o0); + return 1; +} + +static void bpck_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + w0(0xff-pi->unit); w2(4); w0(pi->unit); + t2(8); t2(8); t2(8); + t2(2); t2(2); + + switch (pi->mode) { + + case 0: t2(8); WR(4,0); + break; + + case 1: t2(8); WR(4,0x10); + break; + + case 2: + case 3: + case 4: w2(0); WR(4,8); + break; + + } + + WR(5,8); + + if (pi->devtype == PI_PCD) { + WR(0x46,0x10); /* fiddle with ESS logic ??? */ + WR(0x4c,0x38); + WR(0x4d,0x88); + WR(0x46,0xa0); + WR(0x41,0); + WR(0x4e,8); + } +} + +static void bpck_disconnect ( PIA *pi ) + +{ w0(0); + if (pi->mode >= 2) { w2(9); w2(0); } else t2(2); + w2(0x4c); w0(pi->saved_r0); +} + +static void bpck_force_spp ( PIA *pi ) + +/* This fakes the EPP protocol to turn off EPP ... */ + +{ pi->saved_r0 = r0(); + w0(0xff-pi->unit); w2(4); w0(pi->unit); + t2(8); t2(8); t2(8); + t2(2); t2(2); + + w2(0); + w0(4); w2(9); w2(0); + w0(0); w2(1); w2(3); w2(0); + w0(0); w2(9); w2(0); + w2(0x4c); w0(pi->saved_r0); +} + +#define TEST_LEN 16 + +static int bpck_test_proto( PIA *pi, char * scratch, int verbose ) + +{ int i, e, l, h, om; + char buf[TEST_LEN]; + + bpck_force_spp(pi); + + switch (pi->mode) { + + case 0: bpck_connect(pi); + WR(0x13,0x7f); + w0(0x13); t2(2); + for(i=0;i<TEST_LEN;i++) { + t2(4); l = r1(); + t2(4); h = r1(); + buf[i] = j44(l,h); + } + bpck_disconnect(pi); + break; + + case 1: bpck_connect(pi); + WR(0x13,0x7f); + w0(0x13); t2(2); t2(0x20); + for(i=0;i<TEST_LEN;i++) { t2(4); buf[i] = r0(); } + t2(1); t2(0x20); + bpck_disconnect(pi); + break; + + case 2: + case 3: + case 4: om = pi->mode; + pi->mode = 0; + bpck_connect(pi); + WR(7,3); + WR(4,8); + bpck_disconnect(pi); + + pi->mode = om; + bpck_connect(pi); + w0(0x13); w2(9); w2(1); w0(0); w2(3); w2(0); w2(0xe0); + + switch (pi->mode) { + case 2: for (i=0;i<TEST_LEN;i++) buf[i] = r4(); + break; + case 3: for (i=0;i<TEST_LEN/2;i++) ((u16 *)buf)[i] = r4w(); + break; + case 4: for (i=0;i<TEST_LEN/4;i++) ((u32 *)buf)[i] = r4l(); + break; + } + + w2(0); + WR(7,0); + bpck_disconnect(pi); + + break; + + } + + if (verbose) { + printk("%s: bpck: 0x%x unit %d mode %d: ", + pi->device,pi->port,pi->unit,pi->mode); + for (i=0;i<TEST_LEN;i++) printk("%3d",buf[i]); + printk("\n"); + } + + e = 0; + for (i=0;i<TEST_LEN;i++) if (buf[i] != (i+1)) e++; + return e; +} + +static void bpck_read_eeprom ( PIA *pi, char * buf ) + +{ int i,j,k,n,p,v,f, om, od; + + bpck_force_spp(pi); + + om = pi->mode; od = pi->delay; + pi->mode = 0; pi->delay = 6; + + bpck_connect(pi); + + n = 0; + WR(4,0); + for (i=0;i<64;i++) { + WR(6,8); + WR(6,0xc); + p = 0x100; + for (k=0;k<9;k++) { + f = (((i + 0x180) & p) != 0) * 2; + WR(6,f+0xc); + WR(6,f+0xd); + WR(6,f+0xc); + p = (p >> 1); + } + for (j=0;j<2;j++) { + v = 0; + for (k=0;k<8;k++) { + WR(6,0xc); + WR(6,0xd); + WR(6,0xc); + f = RR(0); + v = 2*v + (f == 0x84); + } + buf[2*i+1-j] = v; + } + } + WR(6,8); + WR(6,0); + WR(5,8); + + bpck_disconnect(pi); + + if (om >= 2) { + bpck_connect(pi); + WR(7,3); + WR(4,8); + bpck_disconnect(pi); + } + + pi->mode = om; pi->delay = od; +} + +static int bpck_test_port ( PIA *pi ) /* check for 8-bit port */ + +{ int i, r, m; + + w2(0x2c); i = r0(); w0(255-i); r = r0(); w0(i); + m = -1; + if (r == i) m = 2; + if (r == (255-i)) m = 0; + + w2(0xc); i = r0(); w0(255-i); r = r0(); w0(i); + if (r != (255-i)) m = -1; + + if (m == 0) { w2(6); w2(0xc); r = r0(); w0(0xaa); w0(r); w0(0xaa); } + if (m == 2) { w2(0x26); w2(0xc); } + + if (m == -1) return 0; + return 5; +} + +static void bpck_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ char *mode_string[5] = { "4-bit","8-bit","EPP-8", + "EPP-16","EPP-32" }; + +#ifdef DUMP_EEPROM + int i; +#endif + + bpck_read_eeprom(pi,scratch); + +#ifdef DUMP_EEPROM + if (verbose) { + for(i=0;i<128;i++) + if ((scratch[i] < ' ') || (scratch[i] > '~')) + scratch[i] = '.'; + printk("%s: bpck EEPROM: %64.64s\n",pi->device,scratch); + printk("%s: %64.64s\n",pi->device,&scratch[64]); + } +#endif + + printk("%s: bpck %s, backpack %8.8s unit %d", + pi->device,BPCK_VERSION,&scratch[110],pi->unit); + printk(" at 0x%x, mode %d (%s), delay %d\n",pi->port, + pi->mode,mode_string[pi->mode],pi->delay); +} + +static struct pi_protocol bpck = { + .owner = THIS_MODULE, + .name = "bpck", + .max_mode = 5, + .epp_first = 2, + .default_delay = 4, + .max_units = 255, + .write_regr = bpck_write_regr, + .read_regr = bpck_read_regr, + .write_block = bpck_write_block, + .read_block = bpck_read_block, + .connect = bpck_connect, + .disconnect = bpck_disconnect, + .test_port = bpck_test_port, + .probe_unit = bpck_probe_unit, + .test_proto = bpck_test_proto, + .log_adapter = bpck_log_adapter, +}; + +static int __init bpck_init(void) +{ + return paride_register(&bpck); +} + +static void __exit bpck_exit(void) +{ + paride_unregister(&bpck); +} + +MODULE_LICENSE("GPL"); +module_init(bpck_init) +module_exit(bpck_exit) diff --git a/drivers/block/paride/bpck6.c b/drivers/block/paride/bpck6.c new file mode 100644 index 0000000..ad12452 --- /dev/null +++ b/drivers/block/paride/bpck6.c @@ -0,0 +1,268 @@ +/* + backpack.c (c) 2001 Micro Solutions Inc. + Released under the terms of the GNU General Public license + + backpack.c is a low-level protocol driver for the Micro Solutions + "BACKPACK" parallel port IDE adapter + (Works on Series 6 drives) + + Written by: Ken Hahn (linux-dev@micro-solutions.com) + Clive Turvey (linux-dev@micro-solutions.com) + +*/ + +/* + This is Ken's linux wrapper for the PPC library + Version 1.0.0 is the backpack driver for which source is not available + Version 2.0.0 is the first to have source released + Version 2.0.1 is the "Cox-ified" source code + Version 2.0.2 - fixed version string usage, and made ppc functions static +*/ + + +/* PARAMETERS */ +static int verbose; /* set this to 1 to see debugging messages and whatnot */ + +#define BACKPACK_VERSION "2.0.2" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <asm/io.h> +#include <linux/parport.h> + +#include "ppc6lnx.c" +#include "paride.h" + + + +#define PPCSTRUCT(pi) ((Interface *)(pi->private)) + +/****************************************************************/ +/* + ATAPI CDROM DRIVE REGISTERS +*/ +#define ATAPI_DATA 0 /* data port */ +#define ATAPI_ERROR 1 /* error register (read) */ +#define ATAPI_FEATURES 1 /* feature register (write) */ +#define ATAPI_INT_REASON 2 /* interrupt reason register */ +#define ATAPI_COUNT_LOW 4 /* byte count register (low) */ +#define ATAPI_COUNT_HIGH 5 /* byte count register (high) */ +#define ATAPI_DRIVE_SEL 6 /* drive select register */ +#define ATAPI_STATUS 7 /* status port (read) */ +#define ATAPI_COMMAND 7 /* command port (write) */ +#define ATAPI_ALT_STATUS 0x0e /* alternate status reg (read) */ +#define ATAPI_DEVICE_CONTROL 0x0e /* device control (write) */ +/****************************************************************/ + +static int bpck6_read_regr(PIA *pi, int cont, int reg) +{ + unsigned int out; + + /* check for bad settings */ + if (reg<0 || reg>7 || cont<0 || cont>2) + { + return(-1); + } + out=ppc6_rd_port(PPCSTRUCT(pi),cont?reg|8:reg); + return(out); +} + +static void bpck6_write_regr(PIA *pi, int cont, int reg, int val) +{ + /* check for bad settings */ + if (reg>=0 && reg<=7 && cont>=0 && cont<=1) + { + ppc6_wr_port(PPCSTRUCT(pi),cont?reg|8:reg,(u8)val); + } +} + +static void bpck6_write_block( PIA *pi, char * buf, int len ) +{ + ppc6_wr_port16_blk(PPCSTRUCT(pi),ATAPI_DATA,buf,(u32)len>>1); +} + +static void bpck6_read_block( PIA *pi, char * buf, int len ) +{ + ppc6_rd_port16_blk(PPCSTRUCT(pi),ATAPI_DATA,buf,(u32)len>>1); +} + +static void bpck6_connect ( PIA *pi ) +{ + if(verbose) + { + printk(KERN_DEBUG "connect\n"); + } + + if(pi->mode >=2) + { + PPCSTRUCT(pi)->mode=4+pi->mode-2; + } + else if(pi->mode==1) + { + PPCSTRUCT(pi)->mode=3; + } + else + { + PPCSTRUCT(pi)->mode=1; + } + + ppc6_open(PPCSTRUCT(pi)); + ppc6_wr_extout(PPCSTRUCT(pi),0x3); +} + +static void bpck6_disconnect ( PIA *pi ) +{ + if(verbose) + { + printk("disconnect\n"); + } + ppc6_wr_extout(PPCSTRUCT(pi),0x0); + ppc6_close(PPCSTRUCT(pi)); +} + +static int bpck6_test_port ( PIA *pi ) /* check for 8-bit port */ +{ + if(verbose) + { + printk(KERN_DEBUG "PARPORT indicates modes=%x for lp=0x%lx\n", + ((struct pardevice*)(pi->pardev))->port->modes, + ((struct pardevice *)(pi->pardev))->port->base); + } + + /*copy over duplicate stuff.. initialize state info*/ + PPCSTRUCT(pi)->ppc_id=pi->unit; + PPCSTRUCT(pi)->lpt_addr=pi->port; + + /* look at the parport device to see if what modes we can use */ + if(((struct pardevice *)(pi->pardev))->port->modes & + (PARPORT_MODE_EPP) + ) + { + return 5; /* Can do EPP*/ + } + else if(((struct pardevice *)(pi->pardev))->port->modes & + (PARPORT_MODE_TRISTATE) + ) + { + return 2; + } + else /*Just flat SPP*/ + { + return 1; + } +} + +static int bpck6_probe_unit ( PIA *pi ) +{ + int out; + + if(verbose) + { + printk(KERN_DEBUG "PROBE UNIT %x on port:%x\n",pi->unit,pi->port); + } + + /*SET PPC UNIT NUMBER*/ + PPCSTRUCT(pi)->ppc_id=pi->unit; + + /*LOWER DOWN TO UNIDIRECTIONAL*/ + PPCSTRUCT(pi)->mode=1; + + out=ppc6_open(PPCSTRUCT(pi)); + + if(verbose) + { + printk(KERN_DEBUG "ppc_open returned %2x\n",out); + } + + if(out) + { + ppc6_close(PPCSTRUCT(pi)); + if(verbose) + { + printk(KERN_DEBUG "leaving probe\n"); + } + return(1); + } + else + { + if(verbose) + { + printk(KERN_DEBUG "Failed open\n"); + } + return(0); + } +} + +static void bpck6_log_adapter( PIA *pi, char * scratch, int verbose ) +{ + char *mode_string[5]= + {"4-bit","8-bit","EPP-8","EPP-16","EPP-32"}; + + printk("%s: BACKPACK Protocol Driver V"BACKPACK_VERSION"\n",pi->device); + printk("%s: Copyright 2001 by Micro Solutions, Inc., DeKalb IL.\n",pi->device); + printk("%s: BACKPACK %s, Micro Solutions BACKPACK Drive at 0x%x\n", + pi->device,BACKPACK_VERSION,pi->port); + printk("%s: Unit: %d Mode:%d (%s) Delay %d\n",pi->device, + pi->unit,pi->mode,mode_string[pi->mode],pi->delay); +} + +static int bpck6_init_proto(PIA *pi) +{ + Interface *p = kzalloc(sizeof(Interface), GFP_KERNEL); + + if (p) { + pi->private = (unsigned long)p; + return 0; + } + + printk(KERN_ERR "%s: ERROR COULDN'T ALLOCATE MEMORY\n", pi->device); + return -1; +} + +static void bpck6_release_proto(PIA *pi) +{ + kfree((void *)(pi->private)); +} + +static struct pi_protocol bpck6 = { + .owner = THIS_MODULE, + .name = "bpck6", + .max_mode = 5, + .epp_first = 2, /* 2-5 use epp (need 8 ports) */ + .max_units = 255, + .write_regr = bpck6_write_regr, + .read_regr = bpck6_read_regr, + .write_block = bpck6_write_block, + .read_block = bpck6_read_block, + .connect = bpck6_connect, + .disconnect = bpck6_disconnect, + .test_port = bpck6_test_port, + .probe_unit = bpck6_probe_unit, + .log_adapter = bpck6_log_adapter, + .init_proto = bpck6_init_proto, + .release_proto = bpck6_release_proto, +}; + +static int __init bpck6_init(void) +{ + printk(KERN_INFO "bpck6: BACKPACK Protocol Driver V"BACKPACK_VERSION"\n"); + printk(KERN_INFO "bpck6: Copyright 2001 by Micro Solutions, Inc., DeKalb IL. USA\n"); + if(verbose) + printk(KERN_DEBUG "bpck6: verbose debug enabled.\n"); + return paride_register(&bpck6); +} + +static void __exit bpck6_exit(void) +{ + paride_unregister(&bpck6); +} + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Micro Solutions Inc."); +MODULE_DESCRIPTION("BACKPACK Protocol module, compatible with PARIDE"); +module_param(verbose, bool, 0644); +module_init(bpck6_init) +module_exit(bpck6_exit) diff --git a/drivers/block/paride/comm.c b/drivers/block/paride/comm.c new file mode 100644 index 0000000..9bcd354 --- /dev/null +++ b/drivers/block/paride/comm.c @@ -0,0 +1,218 @@ +/* + comm.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + comm.c is a low-level protocol driver for some older models + of the DataStor "Commuter" parallel to IDE adapter. Some of + the parallel port devices marketed by Arista currently + use this adapter. +*/ + +/* Changes: + + 1.01 GRG 1998.05.05 init_proto, release_proto + +*/ + +#define COMM_VERSION "1.01" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +/* mode codes: 0 nybble reads, 8-bit writes + 1 8-bit reads and writes + 2 8-bit EPP mode +*/ + +#define j44(a,b) (((a>>3)&0x0f)|((b<<1)&0xf0)) + +#define P1 w2(5);w2(0xd);w2(0xd);w2(5);w2(4); +#define P2 w2(5);w2(7);w2(7);w2(5);w2(4); + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set +*/ + +static int cont_map[2] = { 0x08, 0x10 }; + +static int comm_read_regr( PIA *pi, int cont, int regr ) + +{ int l, h, r; + + r = regr + cont_map[cont]; + + switch (pi->mode) { + + case 0: w0(r); P1; w0(0); + w2(6); l = r1(); w0(0x80); h = r1(); w2(4); + return j44(l,h); + + case 1: w0(r+0x20); P1; + w0(0); w2(0x26); h = r0(); w2(4); + return h; + + case 2: + case 3: + case 4: w3(r+0x20); (void)r1(); + w2(0x24); h = r4(); w2(4); + return h; + + } + return -1; +} + +static void comm_write_regr( PIA *pi, int cont, int regr, int val ) + +{ int r; + + r = regr + cont_map[cont]; + + switch (pi->mode) { + + case 0: + case 1: w0(r); P1; w0(val); P2; + break; + + case 2: + case 3: + case 4: w3(r); (void)r1(); w4(val); + break; + } +} + +static void comm_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + w2(4); w0(0xff); w2(6); + w2(4); w0(0xaa); w2(6); + w2(4); w0(0x00); w2(6); + w2(4); w0(0x87); w2(6); + w2(4); w0(0xe0); w2(0xc); w2(0xc); w2(4); +} + +static void comm_disconnect ( PIA *pi ) + +{ w2(0); w2(0); w2(0); w2(4); + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +static void comm_read_block( PIA *pi, char * buf, int count ) + +{ int i, l, h; + + switch (pi->mode) { + + case 0: w0(0x48); P1; + for(i=0;i<count;i++) { + w0(0); w2(6); l = r1(); + w0(0x80); h = r1(); w2(4); + buf[i] = j44(l,h); + } + break; + + case 1: w0(0x68); P1; w0(0); + for(i=0;i<count;i++) { + w2(0x26); buf[i] = r0(); w2(0x24); + } + w2(4); + break; + + case 2: w3(0x68); (void)r1(); w2(0x24); + for (i=0;i<count;i++) buf[i] = r4(); + w2(4); + break; + + case 3: w3(0x68); (void)r1(); w2(0x24); + for (i=0;i<count/2;i++) ((u16 *)buf)[i] = r4w(); + w2(4); + break; + + case 4: w3(0x68); (void)r1(); w2(0x24); + for (i=0;i<count/4;i++) ((u32 *)buf)[i] = r4l(); + w2(4); + break; + + } +} + +/* NB: Watch out for the byte swapped writes ! */ + +static void comm_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + switch (pi->mode) { + + case 0: + case 1: w0(0x68); P1; + for (k=0;k<count;k++) { + w2(5); w0(buf[k^1]); w2(7); + } + w2(5); w2(4); + break; + + case 2: w3(0x48); (void)r1(); + for (k=0;k<count;k++) w4(buf[k^1]); + break; + + case 3: w3(0x48); (void)r1(); + for (k=0;k<count/2;k++) w4w(pi_swab16(buf,k)); + break; + + case 4: w3(0x48); (void)r1(); + for (k=0;k<count/4;k++) w4l(pi_swab32(buf,k)); + break; + + + } +} + +static void comm_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ char *mode_string[5] = {"4-bit","8-bit","EPP-8","EPP-16","EPP-32"}; + + printk("%s: comm %s, DataStor Commuter at 0x%x, ", + pi->device,COMM_VERSION,pi->port); + printk("mode %d (%s), delay %d\n",pi->mode, + mode_string[pi->mode],pi->delay); + +} + +static struct pi_protocol comm = { + .owner = THIS_MODULE, + .name = "comm", + .max_mode = 5, + .epp_first = 2, + .default_delay = 1, + .max_units = 1, + .write_regr = comm_write_regr, + .read_regr = comm_read_regr, + .write_block = comm_write_block, + .read_block = comm_read_block, + .connect = comm_connect, + .disconnect = comm_disconnect, + .log_adapter = comm_log_adapter, +}; + +static int __init comm_init(void) +{ + return paride_register(&comm); +} + +static void __exit comm_exit(void) +{ + paride_unregister(&comm); +} + +MODULE_LICENSE("GPL"); +module_init(comm_init) +module_exit(comm_exit) diff --git a/drivers/block/paride/dstr.c b/drivers/block/paride/dstr.c new file mode 100644 index 0000000..accc5c7 --- /dev/null +++ b/drivers/block/paride/dstr.c @@ -0,0 +1,233 @@ +/* + dstr.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + dstr.c is a low-level protocol driver for the + DataStor EP2000 parallel to IDE adapter chip. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.06 init_proto, release_proto + +*/ + +#define DSTR_VERSION "1.01" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +/* mode codes: 0 nybble reads, 8-bit writes + 1 8-bit reads and writes + 2 8-bit EPP mode + 3 EPP-16 + 4 EPP-32 +*/ + +#define j44(a,b) (((a>>3)&0x07)|((~a>>4)&0x08)|((b<<1)&0x70)|((~b)&0x80)) + +#define P1 w2(5);w2(0xd);w2(5);w2(4); +#define P2 w2(5);w2(7);w2(5);w2(4); +#define P3 w2(6);w2(4);w2(6);w2(4); + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set +*/ + +static int cont_map[2] = { 0x20, 0x40 }; + +static int dstr_read_regr( PIA *pi, int cont, int regr ) + +{ int a, b, r; + + r = regr + cont_map[cont]; + + w0(0x81); P1; + if (pi->mode) { w0(0x11); } else { w0(1); } + P2; w0(r); P1; + + switch (pi->mode) { + + case 0: w2(6); a = r1(); w2(4); w2(6); b = r1(); w2(4); + return j44(a,b); + + case 1: w0(0); w2(0x26); a = r0(); w2(4); + return a; + + case 2: + case 3: + case 4: w2(0x24); a = r4(); w2(4); + return a; + + } + return -1; +} + +static void dstr_write_regr( PIA *pi, int cont, int regr, int val ) + +{ int r; + + r = regr + cont_map[cont]; + + w0(0x81); P1; + if (pi->mode >= 2) { w0(0x11); } else { w0(1); } + P2; w0(r); P1; + + switch (pi->mode) { + + case 0: + case 1: w0(val); w2(5); w2(7); w2(5); w2(4); + break; + + case 2: + case 3: + case 4: w4(val); + break; + } +} + +#define CCP(x) w0(0xff);w2(0xc);w2(4);\ + w0(0xaa);w0(0x55);w0(0);w0(0xff);w0(0x87);w0(0x78);\ + w0(x);w2(5);w2(4); + +static void dstr_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + w2(4); CCP(0xe0); w0(0xff); +} + +static void dstr_disconnect ( PIA *pi ) + +{ CCP(0x30); + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +static void dstr_read_block( PIA *pi, char * buf, int count ) + +{ int k, a, b; + + w0(0x81); P1; + if (pi->mode) { w0(0x19); } else { w0(9); } + P2; w0(0x82); P1; P3; w0(0x20); P1; + + switch (pi->mode) { + + case 0: for (k=0;k<count;k++) { + w2(6); a = r1(); w2(4); + w2(6); b = r1(); w2(4); + buf[k] = j44(a,b); + } + break; + + case 1: w0(0); + for (k=0;k<count;k++) { + w2(0x26); buf[k] = r0(); w2(0x24); + } + w2(4); + break; + + case 2: w2(0x24); + for (k=0;k<count;k++) buf[k] = r4(); + w2(4); + break; + + case 3: w2(0x24); + for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w(); + w2(4); + break; + + case 4: w2(0x24); + for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l(); + w2(4); + break; + + } +} + +static void dstr_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + w0(0x81); P1; + if (pi->mode) { w0(0x19); } else { w0(9); } + P2; w0(0x82); P1; P3; w0(0x20); P1; + + switch (pi->mode) { + + case 0: + case 1: for (k=0;k<count;k++) { + w2(5); w0(buf[k]); w2(7); + } + w2(5); w2(4); + break; + + case 2: w2(0xc5); + for (k=0;k<count;k++) w4(buf[k]); + w2(0xc4); + break; + + case 3: w2(0xc5); + for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]); + w2(0xc4); + break; + + case 4: w2(0xc5); + for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]); + w2(0xc4); + break; + + } +} + + +static void dstr_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ char *mode_string[5] = {"4-bit","8-bit","EPP-8", + "EPP-16","EPP-32"}; + + printk("%s: dstr %s, DataStor EP2000 at 0x%x, ", + pi->device,DSTR_VERSION,pi->port); + printk("mode %d (%s), delay %d\n",pi->mode, + mode_string[pi->mode],pi->delay); + +} + +static struct pi_protocol dstr = { + .owner = THIS_MODULE, + .name = "dstr", + .max_mode = 5, + .epp_first = 2, + .default_delay = 1, + .max_units = 1, + .write_regr = dstr_write_regr, + .read_regr = dstr_read_regr, + .write_block = dstr_write_block, + .read_block = dstr_read_block, + .connect = dstr_connect, + .disconnect = dstr_disconnect, + .log_adapter = dstr_log_adapter, +}; + +static int __init dstr_init(void) +{ + return paride_register(&dstr); +} + +static void __exit dstr_exit(void) +{ + paride_unregister(&dstr); +} + +MODULE_LICENSE("GPL"); +module_init(dstr_init) +module_exit(dstr_exit) diff --git a/drivers/block/paride/epat.c b/drivers/block/paride/epat.c new file mode 100644 index 0000000..1bcdff7 --- /dev/null +++ b/drivers/block/paride/epat.c @@ -0,0 +1,340 @@ +/* + epat.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + This is the low level protocol driver for the EPAT parallel + to IDE adapter from Shuttle Technologies. This adapter is + used in many popular parallel port disk products such as the + SyQuest EZ drives, the Avatar Shark and the Imation SuperDisk. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.06 init_proto, release_proto + 1.02 Joshua b. Jore CPP(renamed), epat_connect, epat_disconnect + +*/ + +#define EPAT_VERSION "1.02" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +#define j44(a,b) (((a>>4)&0x0f)+(b&0xf0)) +#define j53(a,b) (((a>>3)&0x1f)+((b<<4)&0xe0)) + +static int epatc8; + +module_param(epatc8, int, 0); +MODULE_PARM_DESC(epatc8, "support for the Shuttle EP1284 chip, " + "used in any recent Imation SuperDisk (LS-120) drive."); + +/* cont = 0 IDE register file + cont = 1 IDE control registers + cont = 2 internal EPAT registers +*/ + +static int cont_map[3] = { 0x18, 0x10, 0 }; + +static void epat_write_regr( PIA *pi, int cont, int regr, int val) + +{ int r; + + r = regr + cont_map[cont]; + + switch (pi->mode) { + + case 0: + case 1: + case 2: w0(0x60+r); w2(1); w0(val); w2(4); + break; + + case 3: + case 4: + case 5: w3(0x40+r); w4(val); + break; + + } +} + +static int epat_read_regr( PIA *pi, int cont, int regr ) + +{ int a, b, r; + + r = regr + cont_map[cont]; + + switch (pi->mode) { + + case 0: w0(r); w2(1); w2(3); + a = r1(); w2(4); b = r1(); + return j44(a,b); + + case 1: w0(0x40+r); w2(1); w2(4); + a = r1(); b = r2(); w0(0xff); + return j53(a,b); + + case 2: w0(0x20+r); w2(1); w2(0x25); + a = r0(); w2(4); + return a; + + case 3: + case 4: + case 5: w3(r); w2(0x24); a = r4(); w2(4); + return a; + + } + return -1; /* never gets here */ +} + +static void epat_read_block( PIA *pi, char * buf, int count ) + +{ int k, ph, a, b; + + switch (pi->mode) { + + case 0: w0(7); w2(1); w2(3); w0(0xff); + ph = 0; + for(k=0;k<count;k++) { + if (k == count-1) w0(0xfd); + w2(6+ph); a = r1(); + if (a & 8) b = a; + else { w2(4+ph); b = r1(); } + buf[k] = j44(a,b); + ph = 1 - ph; + } + w0(0); w2(4); + break; + + case 1: w0(0x47); w2(1); w2(5); w0(0xff); + ph = 0; + for(k=0;k<count;k++) { + if (k == count-1) w0(0xfd); + w2(4+ph); + a = r1(); b = r2(); + buf[k] = j53(a,b); + ph = 1 - ph; + } + w0(0); w2(4); + break; + + case 2: w0(0x27); w2(1); w2(0x25); w0(0); + ph = 0; + for(k=0;k<count-1;k++) { + w2(0x24+ph); + buf[k] = r0(); + ph = 1 - ph; + } + w2(0x26); w2(0x27); buf[count-1] = r0(); + w2(0x25); w2(4); + break; + + case 3: w3(0x80); w2(0x24); + for(k=0;k<count-1;k++) buf[k] = r4(); + w2(4); w3(0xa0); w2(0x24); buf[count-1] = r4(); + w2(4); + break; + + case 4: w3(0x80); w2(0x24); + for(k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w(); + buf[count-2] = r4(); + w2(4); w3(0xa0); w2(0x24); buf[count-1] = r4(); + w2(4); + break; + + case 5: w3(0x80); w2(0x24); + for(k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l(); + for(k=count-4;k<count-1;k++) buf[k] = r4(); + w2(4); w3(0xa0); w2(0x24); buf[count-1] = r4(); + w2(4); + break; + + } +} + +static void epat_write_block( PIA *pi, char * buf, int count ) + +{ int ph, k; + + switch (pi->mode) { + + case 0: + case 1: + case 2: w0(0x67); w2(1); w2(5); + ph = 0; + for(k=0;k<count;k++) { + w0(buf[k]); + w2(4+ph); + ph = 1 - ph; + } + w2(7); w2(4); + break; + + case 3: w3(0xc0); + for(k=0;k<count;k++) w4(buf[k]); + w2(4); + break; + + case 4: w3(0xc0); + for(k=0;k<(count/2);k++) w4w(((u16 *)buf)[k]); + w2(4); + break; + + case 5: w3(0xc0); + for(k=0;k<(count/4);k++) w4l(((u32 *)buf)[k]); + w2(4); + break; + + } +} + +/* these macros access the EPAT registers in native addressing */ + +#define WR(r,v) epat_write_regr(pi,2,r,v) +#define RR(r) (epat_read_regr(pi,2,r)) + +/* and these access the IDE task file */ + +#define WRi(r,v) epat_write_regr(pi,0,r,v) +#define RRi(r) (epat_read_regr(pi,0,r)) + +/* FIXME: the CPP stuff should be fixed to handle multiple EPATs on a chain */ + +#define CPP(x) w2(4);w0(0x22);w0(0xaa);w0(0x55);w0(0);w0(0xff);\ + w0(0x87);w0(0x78);w0(x);w2(4);w2(5);w2(4);w0(0xff); + +static void epat_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + + /* Initialize the chip */ + CPP(0); + + if (epatc8) { + CPP(0x40);CPP(0xe0); + w0(0);w2(1);w2(4); + WR(0x8,0x12);WR(0xc,0x14);WR(0x12,0x10); + WR(0xe,0xf);WR(0xf,4); + /* WR(0xe,0xa);WR(0xf,4); */ + WR(0xe,0xd);WR(0xf,0); + /* CPP(0x30); */ + } + + /* Connect to the chip */ + CPP(0xe0); + w0(0);w2(1);w2(4); /* Idle into SPP */ + if (pi->mode >= 3) { + w0(0);w2(1);w2(4);w2(0xc); + /* Request EPP */ + w0(0x40);w2(6);w2(7);w2(4);w2(0xc);w2(4); + } + + if (!epatc8) { + WR(8,0x10); WR(0xc,0x14); WR(0xa,0x38); WR(0x12,0x10); + } +} + +static void epat_disconnect (PIA *pi) +{ CPP(0x30); + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +static int epat_test_proto( PIA *pi, char * scratch, int verbose ) + +{ int k, j, f, cc; + int e[2] = {0,0}; + + epat_connect(pi); + cc = RR(0xd); + epat_disconnect(pi); + + epat_connect(pi); + for (j=0;j<2;j++) { + WRi(6,0xa0+j*0x10); + for (k=0;k<256;k++) { + WRi(2,k^0xaa); + WRi(3,k^0x55); + if (RRi(2) != (k^0xaa)) e[j]++; + } + } + epat_disconnect(pi); + + f = 0; + epat_connect(pi); + WR(0x13,1); WR(0x13,0); WR(0xa,0x11); + epat_read_block(pi,scratch,512); + + for (k=0;k<256;k++) { + if ((scratch[2*k] & 0xff) != k) f++; + if ((scratch[2*k+1] & 0xff) != (0xff-k)) f++; + } + epat_disconnect(pi); + + if (verbose) { + printk("%s: epat: port 0x%x, mode %d, ccr %x, test=(%d,%d,%d)\n", + pi->device,pi->port,pi->mode,cc,e[0],e[1],f); + } + + return (e[0] && e[1]) || f; +} + +static void epat_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ int ver; + char *mode_string[6] = + {"4-bit","5/3","8-bit","EPP-8","EPP-16","EPP-32"}; + + epat_connect(pi); + WR(0xa,0x38); /* read the version code */ + ver = RR(0xb); + epat_disconnect(pi); + + printk("%s: epat %s, Shuttle EPAT chip %x at 0x%x, ", + pi->device,EPAT_VERSION,ver,pi->port); + printk("mode %d (%s), delay %d\n",pi->mode, + mode_string[pi->mode],pi->delay); + +} + +static struct pi_protocol epat = { + .owner = THIS_MODULE, + .name = "epat", + .max_mode = 6, + .epp_first = 3, + .default_delay = 1, + .max_units = 1, + .write_regr = epat_write_regr, + .read_regr = epat_read_regr, + .write_block = epat_write_block, + .read_block = epat_read_block, + .connect = epat_connect, + .disconnect = epat_disconnect, + .test_proto = epat_test_proto, + .log_adapter = epat_log_adapter, +}; + +static int __init epat_init(void) +{ +#ifdef CONFIG_PARIDE_EPATC8 + epatc8 = 1; +#endif + return paride_register(&epat); +} + +static void __exit epat_exit(void) +{ + paride_unregister(&epat); +} + +MODULE_LICENSE("GPL"); +module_init(epat_init) +module_exit(epat_exit) diff --git a/drivers/block/paride/epia.c b/drivers/block/paride/epia.c new file mode 100644 index 0000000..fb0e782 --- /dev/null +++ b/drivers/block/paride/epia.c @@ -0,0 +1,316 @@ +/* + epia.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + epia.c is a low-level protocol driver for Shuttle Technologies + EPIA parallel to IDE adapter chip. This device is now obsolete + and has been replaced with the EPAT chip, which is supported + by epat.c, however, some devices based on EPIA are still + available. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.06 init_proto, release_proto + 1.02 GRG 1998.06.17 support older versions of EPIA + +*/ + +#define EPIA_VERSION "1.02" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +/* mode codes: 0 nybble reads on port 1, 8-bit writes + 1 5/3 reads on ports 1 & 2, 8-bit writes + 2 8-bit reads and writes + 3 8-bit EPP mode + 4 16-bit EPP + 5 32-bit EPP +*/ + +#define j44(a,b) (((a>>4)&0x0f)+(b&0xf0)) +#define j53(a,b) (((a>>3)&0x1f)+((b<<4)&0xe0)) + +/* cont = 0 IDE register file + cont = 1 IDE control registers +*/ + +static int cont_map[2] = { 0, 0x80 }; + +static int epia_read_regr( PIA *pi, int cont, int regr ) + +{ int a, b, r; + + regr += cont_map[cont]; + + switch (pi->mode) { + + case 0: r = regr^0x39; + w0(r); w2(1); w2(3); w0(r); + a = r1(); w2(1); b = r1(); w2(4); + return j44(a,b); + + case 1: r = regr^0x31; + w0(r); w2(1); w0(r&0x37); + w2(3); w2(5); w0(r|0xf0); + a = r1(); b = r2(); w2(4); + return j53(a,b); + + case 2: r = regr^0x29; + w0(r); w2(1); w2(0X21); w2(0x23); + a = r0(); w2(4); + return a; + + case 3: + case 4: + case 5: w3(regr); w2(0x24); a = r4(); w2(4); + return a; + + } + return -1; +} + +static void epia_write_regr( PIA *pi, int cont, int regr, int val) + +{ int r; + + regr += cont_map[cont]; + + switch (pi->mode) { + + case 0: + case 1: + case 2: r = regr^0x19; + w0(r); w2(1); w0(val); w2(3); w2(4); + break; + + case 3: + case 4: + case 5: r = regr^0x40; + w3(r); w4(val); w2(4); + break; + } +} + +#define WR(r,v) epia_write_regr(pi,0,r,v) +#define RR(r) (epia_read_regr(pi,0,r)) + +/* The use of register 0x84 is entirely unclear - it seems to control + some EPP counters ... currently we know about 3 different block + sizes: the standard 512 byte reads and writes, 12 byte writes and + 2048 byte reads (the last two being used in the CDrom drivers. +*/ + +static void epia_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + + w2(4); w0(0xa0); w0(0x50); w0(0xc0); w0(0x30); w0(0xa0); w0(0); + w2(1); w2(4); + if (pi->mode >= 3) { + w0(0xa); w2(1); w2(4); w0(0x82); w2(4); w2(0xc); w2(4); + w2(0x24); w2(0x26); w2(4); + } + WR(0x86,8); +} + +static void epia_disconnect ( PIA *pi ) + +{ /* WR(0x84,0x10); */ + w0(pi->saved_r0); + w2(1); w2(4); + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +static void epia_read_block( PIA *pi, char * buf, int count ) + +{ int k, ph, a, b; + + switch (pi->mode) { + + case 0: w0(0x81); w2(1); w2(3); w0(0xc1); + ph = 1; + for (k=0;k<count;k++) { + w2(2+ph); a = r1(); + w2(4+ph); b = r1(); + buf[k] = j44(a,b); + ph = 1 - ph; + } + w0(0); w2(4); + break; + + case 1: w0(0x91); w2(1); w0(0x10); w2(3); + w0(0x51); w2(5); w0(0xd1); + ph = 1; + for (k=0;k<count;k++) { + w2(4+ph); + a = r1(); b = r2(); + buf[k] = j53(a,b); + ph = 1 - ph; + } + w0(0); w2(4); + break; + + case 2: w0(0x89); w2(1); w2(0x23); w2(0x21); + ph = 1; + for (k=0;k<count;k++) { + w2(0x24+ph); + buf[k] = r0(); + ph = 1 - ph; + } + w2(6); w2(4); + break; + + case 3: if (count > 512) WR(0x84,3); + w3(0); w2(0x24); + for (k=0;k<count;k++) buf[k] = r4(); + w2(4); WR(0x84,0); + break; + + case 4: if (count > 512) WR(0x84,3); + w3(0); w2(0x24); + for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w(); + w2(4); WR(0x84,0); + break; + + case 5: if (count > 512) WR(0x84,3); + w3(0); w2(0x24); + for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l(); + w2(4); WR(0x84,0); + break; + + } +} + +static void epia_write_block( PIA *pi, char * buf, int count ) + +{ int ph, k, last, d; + + switch (pi->mode) { + + case 0: + case 1: + case 2: w0(0xa1); w2(1); w2(3); w2(1); w2(5); + ph = 0; last = 0x8000; + for (k=0;k<count;k++) { + d = buf[k]; + if (d != last) { last = d; w0(d); } + w2(4+ph); + ph = 1 - ph; + } + w2(7); w2(4); + break; + + case 3: if (count < 512) WR(0x84,1); + w3(0x40); + for (k=0;k<count;k++) w4(buf[k]); + if (count < 512) WR(0x84,0); + break; + + case 4: if (count < 512) WR(0x84,1); + w3(0x40); + for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]); + if (count < 512) WR(0x84,0); + break; + + case 5: if (count < 512) WR(0x84,1); + w3(0x40); + for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]); + if (count < 512) WR(0x84,0); + break; + + } + +} + +static int epia_test_proto( PIA *pi, char * scratch, int verbose ) + +{ int j, k, f; + int e[2] = {0,0}; + + epia_connect(pi); + for (j=0;j<2;j++) { + WR(6,0xa0+j*0x10); + for (k=0;k<256;k++) { + WR(2,k^0xaa); + WR(3,k^0x55); + if (RR(2) != (k^0xaa)) e[j]++; + } + WR(2,1); WR(3,1); + } + epia_disconnect(pi); + + f = 0; + epia_connect(pi); + WR(0x84,8); + epia_read_block(pi,scratch,512); + for (k=0;k<256;k++) { + if ((scratch[2*k] & 0xff) != ((k+1) & 0xff)) f++; + if ((scratch[2*k+1] & 0xff) != ((-2-k) & 0xff)) f++; + } + WR(0x84,0); + epia_disconnect(pi); + + if (verbose) { + printk("%s: epia: port 0x%x, mode %d, test=(%d,%d,%d)\n", + pi->device,pi->port,pi->mode,e[0],e[1],f); + } + + return (e[0] && e[1]) || f; + +} + + +static void epia_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ char *mode_string[6] = {"4-bit","5/3","8-bit", + "EPP-8","EPP-16","EPP-32"}; + + printk("%s: epia %s, Shuttle EPIA at 0x%x, ", + pi->device,EPIA_VERSION,pi->port); + printk("mode %d (%s), delay %d\n",pi->mode, + mode_string[pi->mode],pi->delay); + +} + +static struct pi_protocol epia = { + .owner = THIS_MODULE, + .name = "epia", + .max_mode = 6, + .epp_first = 3, + .default_delay = 1, + .max_units = 1, + .write_regr = epia_write_regr, + .read_regr = epia_read_regr, + .write_block = epia_write_block, + .read_block = epia_read_block, + .connect = epia_connect, + .disconnect = epia_disconnect, + .test_proto = epia_test_proto, + .log_adapter = epia_log_adapter, +}; + +static int __init epia_init(void) +{ + return paride_register(&epia); +} + +static void __exit epia_exit(void) +{ + paride_unregister(&epia); +} + +MODULE_LICENSE("GPL"); +module_init(epia_init) +module_exit(epia_exit) diff --git a/drivers/block/paride/fit2.c b/drivers/block/paride/fit2.c new file mode 100644 index 0000000..3812837 --- /dev/null +++ b/drivers/block/paride/fit2.c @@ -0,0 +1,151 @@ +/* + fit2.c (c) 1998 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + fit2.c is a low-level protocol driver for the older version + of the Fidelity International Technology parallel port adapter. + This adapter is used in their TransDisk 2000 and older TransDisk + 3000 portable hard-drives. As far as I can tell, this device + supports 4-bit mode _only_. + + Newer models of the FIT products use an enhanced protocol. + The "fit3" protocol module should support current drives. + +*/ + +#define FIT2_VERSION "1.0" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0)) + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set + +NB: The FIT adapter does not appear to use the control registers. +So, we map ALT_STATUS to STATUS and NO-OP writes to the device +control register - this means that IDE reset will not work on these +devices. + +*/ + +static void fit2_write_regr( PIA *pi, int cont, int regr, int val) + +{ if (cont == 1) return; + w2(0xc); w0(regr); w2(4); w0(val); w2(5); w0(0); w2(4); +} + +static int fit2_read_regr( PIA *pi, int cont, int regr ) + +{ int a, b, r; + + if (cont) { + if (regr != 6) return 0xff; + r = 7; + } else r = regr + 0x10; + + w2(0xc); w0(r); w2(4); w2(5); + w0(0); a = r1(); + w0(1); b = r1(); + w2(4); + + return j44(a,b); + +} + +static void fit2_read_block( PIA *pi, char * buf, int count ) + +{ int k, a, b, c, d; + + w2(0xc); w0(0x10); + + for (k=0;k<count/4;k++) { + + w2(4); w2(5); + w0(0); a = r1(); w0(1); b = r1(); + w0(3); c = r1(); w0(2); d = r1(); + buf[4*k+0] = j44(a,b); + buf[4*k+1] = j44(d,c); + + w2(4); w2(5); + a = r1(); w0(3); b = r1(); + w0(1); c = r1(); w0(0); d = r1(); + buf[4*k+2] = j44(d,c); + buf[4*k+3] = j44(a,b); + + } + + w2(4); + +} + +static void fit2_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + + w2(0xc); w0(0); + for (k=0;k<count/2;k++) { + w2(4); w0(buf[2*k]); + w2(5); w0(buf[2*k+1]); + } + w2(4); +} + +static void fit2_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + w2(0xcc); +} + +static void fit2_disconnect ( PIA *pi ) + +{ w0(pi->saved_r0); + w2(pi->saved_r2); +} + +static void fit2_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ printk("%s: fit2 %s, FIT 2000 adapter at 0x%x, delay %d\n", + pi->device,FIT2_VERSION,pi->port,pi->delay); + +} + +static struct pi_protocol fit2 = { + .owner = THIS_MODULE, + .name = "fit2", + .max_mode = 1, + .epp_first = 2, + .default_delay = 1, + .max_units = 1, + .write_regr = fit2_write_regr, + .read_regr = fit2_read_regr, + .write_block = fit2_write_block, + .read_block = fit2_read_block, + .connect = fit2_connect, + .disconnect = fit2_disconnect, + .log_adapter = fit2_log_adapter, +}; + +static int __init fit2_init(void) +{ + return paride_register(&fit2); +} + +static void __exit fit2_exit(void) +{ + paride_unregister(&fit2); +} + +MODULE_LICENSE("GPL"); +module_init(fit2_init) +module_exit(fit2_exit) diff --git a/drivers/block/paride/fit3.c b/drivers/block/paride/fit3.c new file mode 100644 index 0000000..275d269 --- /dev/null +++ b/drivers/block/paride/fit3.c @@ -0,0 +1,211 @@ +/* + fit3.c (c) 1998 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + fit3.c is a low-level protocol driver for newer models + of the Fidelity International Technology parallel port adapter. + This adapter is used in their TransDisk 3000 portable + hard-drives, as well as CD-ROM, PD-CD and other devices. + + The TD-2000 and certain older devices use a different protocol. + Try the fit2 protocol module with them. + + NB: The FIT adapters do not appear to support the control + registers. So, we map ALT_STATUS to STATUS and NO-OP writes + to the device control register - this means that IDE reset + will not work on these devices. + +*/ + +#define FIT3_VERSION "1.0" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +#define j44(a,b) (((a>>3)&0x0f)|((b<<1)&0xf0)) + +#define w7(byte) {out_p(7,byte);} +#define r7() (in_p(7) & 0xff) + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set + +*/ + +static void fit3_write_regr( PIA *pi, int cont, int regr, int val) + +{ if (cont == 1) return; + + switch (pi->mode) { + + case 0: + case 1: w2(0xc); w0(regr); w2(0x8); w2(0xc); + w0(val); w2(0xd); + w0(0); w2(0xc); + break; + + case 2: w2(0xc); w0(regr); w2(0x8); w2(0xc); + w4(val); w4(0); + w2(0xc); + break; + + } +} + +static int fit3_read_regr( PIA *pi, int cont, int regr ) + +{ int a, b; + + if (cont) { + if (regr != 6) return 0xff; + regr = 7; + } + + switch (pi->mode) { + + case 0: w2(0xc); w0(regr + 0x10); w2(0x8); w2(0xc); + w2(0xd); a = r1(); + w2(0xf); b = r1(); + w2(0xc); + return j44(a,b); + + case 1: w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc); + w2(0xec); w2(0xee); w2(0xef); a = r0(); + w2(0xc); + return a; + + case 2: w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc); + w2(0xec); + a = r4(); b = r4(); + w2(0xc); + return a; + + } + return -1; + +} + +static void fit3_read_block( PIA *pi, char * buf, int count ) + +{ int k, a, b, c, d; + + switch (pi->mode) { + + case 0: w2(0xc); w0(0x10); w2(0x8); w2(0xc); + for (k=0;k<count/2;k++) { + w2(0xd); a = r1(); + w2(0xf); b = r1(); + w2(0xc); c = r1(); + w2(0xe); d = r1(); + buf[2*k ] = j44(a,b); + buf[2*k+1] = j44(c,d); + } + w2(0xc); + break; + + case 1: w2(0xc); w0(0x90); w2(0x8); w2(0xc); + w2(0xec); w2(0xee); + for (k=0;k<count/2;k++) { + w2(0xef); a = r0(); + w2(0xee); b = r0(); + buf[2*k ] = a; + buf[2*k+1] = b; + } + w2(0xec); + w2(0xc); + break; + + case 2: w2(0xc); w0(0x90); w2(0x8); w2(0xc); + w2(0xec); + for (k=0;k<count;k++) buf[k] = r4(); + w2(0xc); + break; + + } +} + +static void fit3_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + switch (pi->mode) { + + case 0: + case 1: w2(0xc); w0(0); w2(0x8); w2(0xc); + for (k=0;k<count/2;k++) { + w0(buf[2*k ]); w2(0xd); + w0(buf[2*k+1]); w2(0xc); + } + break; + + case 2: w2(0xc); w0(0); w2(0x8); w2(0xc); + for (k=0;k<count;k++) w4(buf[k]); + w2(0xc); + break; + } +} + +static void fit3_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + w2(0xc); w0(0); w2(0xa); + if (pi->mode == 2) { + w2(0xc); w0(0x9); w2(0x8); w2(0xc); + } +} + +static void fit3_disconnect ( PIA *pi ) + +{ w2(0xc); w0(0xa); w2(0x8); w2(0xc); + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +static void fit3_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ char *mode_string[3] = {"4-bit","8-bit","EPP"}; + + printk("%s: fit3 %s, FIT 3000 adapter at 0x%x, " + "mode %d (%s), delay %d\n", + pi->device,FIT3_VERSION,pi->port, + pi->mode,mode_string[pi->mode],pi->delay); + +} + +static struct pi_protocol fit3 = { + .owner = THIS_MODULE, + .name = "fit3", + .max_mode = 3, + .epp_first = 2, + .default_delay = 1, + .max_units = 1, + .write_regr = fit3_write_regr, + .read_regr = fit3_read_regr, + .write_block = fit3_write_block, + .read_block = fit3_read_block, + .connect = fit3_connect, + .disconnect = fit3_disconnect, + .log_adapter = fit3_log_adapter, +}; + +static int __init fit3_init(void) +{ + return paride_register(&fit3); +} + +static void __exit fit3_exit(void) +{ + paride_unregister(&fit3); +} + +MODULE_LICENSE("GPL"); +module_init(fit3_init) +module_exit(fit3_exit) diff --git a/drivers/block/paride/friq.c b/drivers/block/paride/friq.c new file mode 100644 index 0000000..4f2ba24 --- /dev/null +++ b/drivers/block/paride/friq.c @@ -0,0 +1,276 @@ +/* + friq.c (c) 1998 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License + + friq.c is a low-level protocol driver for the Freecom "IQ" + parallel port IDE adapter. Early versions of this adapter + use the 'frpw' protocol. + + Freecom uses this adapter in a battery powered external + CD-ROM drive. It is also used in LS-120 drives by + Maxell and Panasonic, and other devices. + + The battery powered drive requires software support to + control the power to the drive. This module enables the + drive power when the high level driver (pcd) is loaded + and disables it when the module is unloaded. Note, if + the friq module is built in to the kernel, the power + will never be switched off, so other means should be + used to conserve battery power. + +*/ + +/* Changes: + + 1.01 GRG 1998.12.20 Added support for soft power switch +*/ + +#define FRIQ_VERSION "1.01" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +#define CMD(x) w2(4);w0(0xff);w0(0xff);w0(0x73);w0(0x73);\ + w0(0xc9);w0(0xc9);w0(0x26);w0(0x26);w0(x);w0(x); + +#define j44(l,h) (((l>>4)&0x0f)|(h&0xf0)) + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set +*/ + +static int cont_map[2] = { 0x08, 0x10 }; + +static int friq_read_regr( PIA *pi, int cont, int regr ) + +{ int h,l,r; + + r = regr + cont_map[cont]; + + CMD(r); + w2(6); l = r1(); + w2(4); h = r1(); + w2(4); + + return j44(l,h); + +} + +static void friq_write_regr( PIA *pi, int cont, int regr, int val) + +{ int r; + + r = regr + cont_map[cont]; + + CMD(r); + w0(val); + w2(5);w2(7);w2(5);w2(4); +} + +static void friq_read_block_int( PIA *pi, char * buf, int count, int regr ) + +{ int h, l, k, ph; + + switch(pi->mode) { + + case 0: CMD(regr); + for (k=0;k<count;k++) { + w2(6); l = r1(); + w2(4); h = r1(); + buf[k] = j44(l,h); + } + w2(4); + break; + + case 1: ph = 2; + CMD(regr+0xc0); + w0(0xff); + for (k=0;k<count;k++) { + w2(0xa4 + ph); + buf[k] = r0(); + ph = 2 - ph; + } + w2(0xac); w2(0xa4); w2(4); + break; + + case 2: CMD(regr+0x80); + for (k=0;k<count-2;k++) buf[k] = r4(); + w2(0xac); w2(0xa4); + buf[count-2] = r4(); + buf[count-1] = r4(); + w2(4); + break; + + case 3: CMD(regr+0x80); + for (k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w(); + w2(0xac); w2(0xa4); + buf[count-2] = r4(); + buf[count-1] = r4(); + w2(4); + break; + + case 4: CMD(regr+0x80); + for (k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l(); + buf[count-4] = r4(); + buf[count-3] = r4(); + w2(0xac); w2(0xa4); + buf[count-2] = r4(); + buf[count-1] = r4(); + w2(4); + break; + + } +} + +static void friq_read_block( PIA *pi, char * buf, int count) + +{ friq_read_block_int(pi,buf,count,0x08); +} + +static void friq_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + switch(pi->mode) { + + case 0: + case 1: CMD(8); w2(5); + for (k=0;k<count;k++) { + w0(buf[k]); + w2(7);w2(5); + } + w2(4); + break; + + case 2: CMD(0xc8); w2(5); + for (k=0;k<count;k++) w4(buf[k]); + w2(4); + break; + + case 3: CMD(0xc8); w2(5); + for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]); + w2(4); + break; + + case 4: CMD(0xc8); w2(5); + for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]); + w2(4); + break; + } +} + +static void friq_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + w2(4); +} + +static void friq_disconnect ( PIA *pi ) + +{ CMD(0x20); + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +static int friq_test_proto( PIA *pi, char * scratch, int verbose ) + +{ int j, k, r; + int e[2] = {0,0}; + + pi->saved_r0 = r0(); + w0(0xff); udelay(20); CMD(0x3d); /* turn the power on */ + udelay(500); + w0(pi->saved_r0); + + friq_connect(pi); + for (j=0;j<2;j++) { + friq_write_regr(pi,0,6,0xa0+j*0x10); + for (k=0;k<256;k++) { + friq_write_regr(pi,0,2,k^0xaa); + friq_write_regr(pi,0,3,k^0x55); + if (friq_read_regr(pi,0,2) != (k^0xaa)) e[j]++; + } + } + friq_disconnect(pi); + + friq_connect(pi); + friq_read_block_int(pi,scratch,512,0x10); + r = 0; + for (k=0;k<128;k++) if (scratch[k] != k) r++; + friq_disconnect(pi); + + if (verbose) { + printk("%s: friq: port 0x%x, mode %d, test=(%d,%d,%d)\n", + pi->device,pi->port,pi->mode,e[0],e[1],r); + } + + return (r || (e[0] && e[1])); +} + + +static void friq_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ char *mode_string[6] = {"4-bit","8-bit", + "EPP-8","EPP-16","EPP-32"}; + + printk("%s: friq %s, Freecom IQ ASIC-2 adapter at 0x%x, ", pi->device, + FRIQ_VERSION,pi->port); + printk("mode %d (%s), delay %d\n",pi->mode, + mode_string[pi->mode],pi->delay); + + pi->private = 1; + friq_connect(pi); + CMD(0x9e); /* disable sleep timer */ + friq_disconnect(pi); + +} + +static void friq_release_proto( PIA *pi) +{ + if (pi->private) { /* turn off the power */ + friq_connect(pi); + CMD(0x1d); CMD(0x1e); + friq_disconnect(pi); + pi->private = 0; + } +} + +static struct pi_protocol friq = { + .owner = THIS_MODULE, + .name = "friq", + .max_mode = 5, + .epp_first = 2, + .default_delay = 1, + .max_units = 1, + .write_regr = friq_write_regr, + .read_regr = friq_read_regr, + .write_block = friq_write_block, + .read_block = friq_read_block, + .connect = friq_connect, + .disconnect = friq_disconnect, + .test_proto = friq_test_proto, + .log_adapter = friq_log_adapter, + .release_proto = friq_release_proto, +}; + +static int __init friq_init(void) +{ + return paride_register(&friq); +} + +static void __exit friq_exit(void) +{ + paride_unregister(&friq); +} + +MODULE_LICENSE("GPL"); +module_init(friq_init) +module_exit(friq_exit) diff --git a/drivers/block/paride/frpw.c b/drivers/block/paride/frpw.c new file mode 100644 index 0000000..c3cde36 --- /dev/null +++ b/drivers/block/paride/frpw.c @@ -0,0 +1,313 @@ +/* + frpw.c (c) 1996-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License + + frpw.c is a low-level protocol driver for the Freecom "Power" + parallel port IDE adapter. + + Some applications of this adapter may require a "printer" reset + prior to loading the driver. This can be done by loading and + unloading the "lp" driver, or it can be done by this driver + if you define FRPW_HARD_RESET. The latter is not recommended + as it may upset devices on other ports. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.06 init_proto, release_proto + fix chip detect + added EPP-16 and EPP-32 + 1.02 GRG 1998.09.23 added hard reset to initialisation process + 1.03 GRG 1998.12.14 made hard reset conditional + +*/ + +#define FRPW_VERSION "1.03" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +#define cec4 w2(0xc);w2(0xe);w2(0xe);w2(0xc);w2(4);w2(4);w2(4); +#define j44(l,h) (((l>>4)&0x0f)|(h&0xf0)) + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set +*/ + +static int cont_map[2] = { 0x08, 0x10 }; + +static int frpw_read_regr( PIA *pi, int cont, int regr ) + +{ int h,l,r; + + r = regr + cont_map[cont]; + + w2(4); + w0(r); cec4; + w2(6); l = r1(); + w2(4); h = r1(); + w2(4); + + return j44(l,h); + +} + +static void frpw_write_regr( PIA *pi, int cont, int regr, int val) + +{ int r; + + r = regr + cont_map[cont]; + + w2(4); w0(r); cec4; + w0(val); + w2(5);w2(7);w2(5);w2(4); +} + +static void frpw_read_block_int( PIA *pi, char * buf, int count, int regr ) + +{ int h, l, k, ph; + + switch(pi->mode) { + + case 0: w2(4); w0(regr); cec4; + for (k=0;k<count;k++) { + w2(6); l = r1(); + w2(4); h = r1(); + buf[k] = j44(l,h); + } + w2(4); + break; + + case 1: ph = 2; + w2(4); w0(regr + 0xc0); cec4; + w0(0xff); + for (k=0;k<count;k++) { + w2(0xa4 + ph); + buf[k] = r0(); + ph = 2 - ph; + } + w2(0xac); w2(0xa4); w2(4); + break; + + case 2: w2(4); w0(regr + 0x80); cec4; + for (k=0;k<count;k++) buf[k] = r4(); + w2(0xac); w2(0xa4); + w2(4); + break; + + case 3: w2(4); w0(regr + 0x80); cec4; + for (k=0;k<count-2;k++) buf[k] = r4(); + w2(0xac); w2(0xa4); + buf[count-2] = r4(); + buf[count-1] = r4(); + w2(4); + break; + + case 4: w2(4); w0(regr + 0x80); cec4; + for (k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w(); + w2(0xac); w2(0xa4); + buf[count-2] = r4(); + buf[count-1] = r4(); + w2(4); + break; + + case 5: w2(4); w0(regr + 0x80); cec4; + for (k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l(); + buf[count-4] = r4(); + buf[count-3] = r4(); + w2(0xac); w2(0xa4); + buf[count-2] = r4(); + buf[count-1] = r4(); + w2(4); + break; + + } +} + +static void frpw_read_block( PIA *pi, char * buf, int count) + +{ frpw_read_block_int(pi,buf,count,0x08); +} + +static void frpw_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + switch(pi->mode) { + + case 0: + case 1: + case 2: w2(4); w0(8); cec4; w2(5); + for (k=0;k<count;k++) { + w0(buf[k]); + w2(7);w2(5); + } + w2(4); + break; + + case 3: w2(4); w0(0xc8); cec4; w2(5); + for (k=0;k<count;k++) w4(buf[k]); + w2(4); + break; + + case 4: w2(4); w0(0xc8); cec4; w2(5); + for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]); + w2(4); + break; + + case 5: w2(4); w0(0xc8); cec4; w2(5); + for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]); + w2(4); + break; + } +} + +static void frpw_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + w2(4); +} + +static void frpw_disconnect ( PIA *pi ) + +{ w2(4); w0(0x20); cec4; + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +/* Stub logic to see if PNP string is available - used to distinguish + between the Xilinx and ASIC implementations of the Freecom adapter. +*/ + +static int frpw_test_pnp ( PIA *pi ) + +/* returns chip_type: 0 = Xilinx, 1 = ASIC */ + +{ int olddelay, a, b; + +#ifdef FRPW_HARD_RESET + w0(0); w2(8); udelay(50); w2(0xc); /* parallel bus reset */ + mdelay(1500); +#endif + + olddelay = pi->delay; + pi->delay = 10; + + pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + + w2(4); w0(4); w2(6); w2(7); + a = r1() & 0xff; w2(4); b = r1() & 0xff; + w2(0xc); w2(0xe); w2(4); + + pi->delay = olddelay; + w0(pi->saved_r0); + w2(pi->saved_r2); + + return ((~a&0x40) && (b&0x40)); +} + +/* We use the pi->private to remember the result of the PNP test. + To make this work, private = port*2 + chip. Yes, I know it's + a hack :-( +*/ + +static int frpw_test_proto( PIA *pi, char * scratch, int verbose ) + +{ int j, k, r; + int e[2] = {0,0}; + + if ((pi->private>>1) != pi->port) + pi->private = frpw_test_pnp(pi) + 2*pi->port; + + if (((pi->private%2) == 0) && (pi->mode > 2)) { + if (verbose) + printk("%s: frpw: Xilinx does not support mode %d\n", + pi->device, pi->mode); + return 1; + } + + if (((pi->private%2) == 1) && (pi->mode == 2)) { + if (verbose) + printk("%s: frpw: ASIC does not support mode 2\n", + pi->device); + return 1; + } + + frpw_connect(pi); + for (j=0;j<2;j++) { + frpw_write_regr(pi,0,6,0xa0+j*0x10); + for (k=0;k<256;k++) { + frpw_write_regr(pi,0,2,k^0xaa); + frpw_write_regr(pi,0,3,k^0x55); + if (frpw_read_regr(pi,0,2) != (k^0xaa)) e[j]++; + } + } + frpw_disconnect(pi); + + frpw_connect(pi); + frpw_read_block_int(pi,scratch,512,0x10); + r = 0; + for (k=0;k<128;k++) if (scratch[k] != k) r++; + frpw_disconnect(pi); + + if (verbose) { + printk("%s: frpw: port 0x%x, chip %ld, mode %d, test=(%d,%d,%d)\n", + pi->device,pi->port,(pi->private%2),pi->mode,e[0],e[1],r); + } + + return (r || (e[0] && e[1])); +} + + +static void frpw_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ char *mode_string[6] = {"4-bit","8-bit","EPP", + "EPP-8","EPP-16","EPP-32"}; + + printk("%s: frpw %s, Freecom (%s) adapter at 0x%x, ", pi->device, + FRPW_VERSION,((pi->private%2) == 0)?"Xilinx":"ASIC",pi->port); + printk("mode %d (%s), delay %d\n",pi->mode, + mode_string[pi->mode],pi->delay); + +} + +static struct pi_protocol frpw = { + .owner = THIS_MODULE, + .name = "frpw", + .max_mode = 6, + .epp_first = 2, + .default_delay = 2, + .max_units = 1, + .write_regr = frpw_write_regr, + .read_regr = frpw_read_regr, + .write_block = frpw_write_block, + .read_block = frpw_read_block, + .connect = frpw_connect, + .disconnect = frpw_disconnect, + .test_proto = frpw_test_proto, + .log_adapter = frpw_log_adapter, +}; + +static int __init frpw_init(void) +{ + return paride_register(&frpw); +} + +static void __exit frpw_exit(void) +{ + paride_unregister(&frpw); +} + +MODULE_LICENSE("GPL"); +module_init(frpw_init) +module_exit(frpw_exit) diff --git a/drivers/block/paride/kbic.c b/drivers/block/paride/kbic.c new file mode 100644 index 0000000..35999c4 --- /dev/null +++ b/drivers/block/paride/kbic.c @@ -0,0 +1,305 @@ +/* + kbic.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + This is a low-level driver for the KBIC-951A and KBIC-971A + parallel to IDE adapter chips from KingByte Information Systems. + + The chips are almost identical, however, the wakeup code + required for the 971A interferes with the correct operation of + the 951A, so this driver registers itself twice, once for + each chip. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.06 init_proto, release_proto + +*/ + +#define KBIC_VERSION "1.01" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +#define r12w() (delay_p,inw(pi->port+1)&0xffff) + +#define j44(a,b) ((((a>>4)&0x0f)|(b&0xf0))^0x88) +#define j53(w) (((w>>3)&0x1f)|((w>>4)&0xe0)) + + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set +*/ + +static int cont_map[2] = { 0x80, 0x40 }; + +static int kbic_read_regr( PIA *pi, int cont, int regr ) + +{ int a, b, s; + + s = cont_map[cont]; + + switch (pi->mode) { + + case 0: w0(regr|0x18|s); w2(4); w2(6); w2(4); w2(1); w0(8); + a = r1(); w0(0x28); b = r1(); w2(4); + return j44(a,b); + + case 1: w0(regr|0x38|s); w2(4); w2(6); w2(4); w2(5); w0(8); + a = r12w(); w2(4); + return j53(a); + + case 2: w0(regr|0x08|s); w2(4); w2(6); w2(4); w2(0xa5); w2(0xa1); + a = r0(); w2(4); + return a; + + case 3: + case 4: + case 5: w0(0x20|s); w2(4); w2(6); w2(4); w3(regr); + a = r4(); b = r4(); w2(4); w2(0); w2(4); + return a; + + } + return -1; +} + +static void kbic_write_regr( PIA *pi, int cont, int regr, int val) + +{ int s; + + s = cont_map[cont]; + + switch (pi->mode) { + + case 0: + case 1: + case 2: w0(regr|0x10|s); w2(4); w2(6); w2(4); + w0(val); w2(5); w2(4); + break; + + case 3: + case 4: + case 5: w0(0x20|s); w2(4); w2(6); w2(4); w3(regr); + w4(val); w4(val); + w2(4); w2(0); w2(4); + break; + + } +} + +static void k951_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + w2(4); +} + +static void k951_disconnect ( PIA *pi ) + +{ w0(pi->saved_r0); + w2(pi->saved_r2); +} + +#define CCP(x) w2(0xc4);w0(0xaa);w0(0x55);w0(0);w0(0xff);w0(0x87);\ + w0(0x78);w0(x);w2(0xc5);w2(0xc4);w0(0xff); + +static void k971_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + CCP(0x20); + w2(4); +} + +static void k971_disconnect ( PIA *pi ) + +{ CCP(0x30); + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +/* counts must be congruent to 0 MOD 4, but all known applications + have this property. +*/ + +static void kbic_read_block( PIA *pi, char * buf, int count ) + +{ int k, a, b; + + switch (pi->mode) { + + case 0: w0(0x98); w2(4); w2(6); w2(4); + for (k=0;k<count/2;k++) { + w2(1); w0(8); a = r1(); + w0(0x28); b = r1(); + buf[2*k] = j44(a,b); + w2(5); b = r1(); + w0(8); a = r1(); + buf[2*k+1] = j44(a,b); + w2(4); + } + break; + + case 1: w0(0xb8); w2(4); w2(6); w2(4); + for (k=0;k<count/4;k++) { + w0(0xb8); + w2(4); w2(5); + w0(8); buf[4*k] = j53(r12w()); + w0(0xb8); buf[4*k+1] = j53(r12w()); + w2(4); w2(5); + buf[4*k+3] = j53(r12w()); + w0(8); buf[4*k+2] = j53(r12w()); + } + w2(4); + break; + + case 2: w0(0x88); w2(4); w2(6); w2(4); + for (k=0;k<count/2;k++) { + w2(0xa0); w2(0xa1); buf[2*k] = r0(); + w2(0xa5); buf[2*k+1] = r0(); + } + w2(4); + break; + + case 3: w0(0xa0); w2(4); w2(6); w2(4); w3(0); + for (k=0;k<count;k++) buf[k] = r4(); + w2(4); w2(0); w2(4); + break; + + case 4: w0(0xa0); w2(4); w2(6); w2(4); w3(0); + for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w(); + w2(4); w2(0); w2(4); + break; + + case 5: w0(0xa0); w2(4); w2(6); w2(4); w3(0); + for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l(); + w2(4); w2(0); w2(4); + break; + + + } +} + +static void kbic_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + switch (pi->mode) { + + case 0: + case 1: + case 2: w0(0x90); w2(4); w2(6); w2(4); + for(k=0;k<count/2;k++) { + w0(buf[2*k+1]); w2(0); w2(4); + w0(buf[2*k]); w2(5); w2(4); + } + break; + + case 3: w0(0xa0); w2(4); w2(6); w2(4); w3(0); + for(k=0;k<count/2;k++) { + w4(buf[2*k+1]); + w4(buf[2*k]); + } + w2(4); w2(0); w2(4); + break; + + case 4: w0(0xa0); w2(4); w2(6); w2(4); w3(0); + for(k=0;k<count/2;k++) w4w(pi_swab16(buf,k)); + w2(4); w2(0); w2(4); + break; + + case 5: w0(0xa0); w2(4); w2(6); w2(4); w3(0); + for(k=0;k<count/4;k++) w4l(pi_swab32(buf,k)); + w2(4); w2(0); w2(4); + break; + + } + +} + +static void kbic_log_adapter( PIA *pi, char * scratch, + int verbose, char * chip ) + +{ char *mode_string[6] = {"4-bit","5/3","8-bit", + "EPP-8","EPP_16","EPP-32"}; + + printk("%s: kbic %s, KingByte %s at 0x%x, ", + pi->device,KBIC_VERSION,chip,pi->port); + printk("mode %d (%s), delay %d\n",pi->mode, + mode_string[pi->mode],pi->delay); + +} + +static void k951_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ kbic_log_adapter(pi,scratch,verbose,"KBIC-951A"); +} + +static void k971_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ kbic_log_adapter(pi,scratch,verbose,"KBIC-971A"); +} + +static struct pi_protocol k951 = { + .owner = THIS_MODULE, + .name = "k951", + .max_mode = 6, + .epp_first = 3, + .default_delay = 1, + .max_units = 1, + .write_regr = kbic_write_regr, + .read_regr = kbic_read_regr, + .write_block = kbic_write_block, + .read_block = kbic_read_block, + .connect = k951_connect, + .disconnect = k951_disconnect, + .log_adapter = k951_log_adapter, +}; + +static struct pi_protocol k971 = { + .owner = THIS_MODULE, + .name = "k971", + .max_mode = 6, + .epp_first = 3, + .default_delay = 1, + .max_units = 1, + .write_regr = kbic_write_regr, + .read_regr = kbic_read_regr, + .write_block = kbic_write_block, + .read_block = kbic_read_block, + .connect = k971_connect, + .disconnect = k971_disconnect, + .log_adapter = k971_log_adapter, +}; + +static int __init kbic_init(void) +{ + int rv; + + rv = paride_register(&k951); + if (rv < 0) + return rv; + rv = paride_register(&k971); + if (rv < 0) + paride_unregister(&k951); + return rv; +} + +static void __exit kbic_exit(void) +{ + paride_unregister(&k951); + paride_unregister(&k971); +} + +MODULE_LICENSE("GPL"); +module_init(kbic_init) +module_exit(kbic_exit) diff --git a/drivers/block/paride/ktti.c b/drivers/block/paride/ktti.c new file mode 100644 index 0000000..117ab0e --- /dev/null +++ b/drivers/block/paride/ktti.c @@ -0,0 +1,128 @@ +/* + ktti.c (c) 1998 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + ktti.c is a low-level protocol driver for the KT Technology + parallel port adapter. This adapter is used in the "PHd" + portable hard-drives. As far as I can tell, this device + supports 4-bit mode _only_. + +*/ + +#define KTTI_VERSION "1.0" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0)) + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set +*/ + +static int cont_map[2] = { 0x10, 0x08 }; + +static void ktti_write_regr( PIA *pi, int cont, int regr, int val) + +{ int r; + + r = regr + cont_map[cont]; + + w0(r); w2(0xb); w2(0xa); w2(3); w2(6); + w0(val); w2(3); w0(0); w2(6); w2(0xb); +} + +static int ktti_read_regr( PIA *pi, int cont, int regr ) + +{ int a, b, r; + + r = regr + cont_map[cont]; + + w0(r); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9); + a = r1(); w2(0xc); b = r1(); w2(9); w2(0xc); w2(9); + return j44(a,b); + +} + +static void ktti_read_block( PIA *pi, char * buf, int count ) + +{ int k, a, b; + + for (k=0;k<count/2;k++) { + w0(0x10); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9); + a = r1(); w2(0xc); b = r1(); w2(9); + buf[2*k] = j44(a,b); + a = r1(); w2(0xc); b = r1(); w2(9); + buf[2*k+1] = j44(a,b); + } +} + +static void ktti_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + for (k=0;k<count/2;k++) { + w0(0x10); w2(0xb); w2(0xa); w2(3); w2(6); + w0(buf[2*k]); w2(3); + w0(buf[2*k+1]); w2(6); + w2(0xb); + } +} + +static void ktti_connect ( PIA *pi ) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + w2(0xb); w2(0xa); w0(0); w2(3); w2(6); +} + +static void ktti_disconnect ( PIA *pi ) + +{ w2(0xb); w2(0xa); w0(0xa0); w2(3); w2(4); + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +static void ktti_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ printk("%s: ktti %s, KT adapter at 0x%x, delay %d\n", + pi->device,KTTI_VERSION,pi->port,pi->delay); + +} + +static struct pi_protocol ktti = { + .owner = THIS_MODULE, + .name = "ktti", + .max_mode = 1, + .epp_first = 2, + .default_delay = 1, + .max_units = 1, + .write_regr = ktti_write_regr, + .read_regr = ktti_read_regr, + .write_block = ktti_write_block, + .read_block = ktti_read_block, + .connect = ktti_connect, + .disconnect = ktti_disconnect, + .log_adapter = ktti_log_adapter, +}; + +static int __init ktti_init(void) +{ + return paride_register(&ktti); +} + +static void __exit ktti_exit(void) +{ + paride_unregister(&ktti); +} + +MODULE_LICENSE("GPL"); +module_init(ktti_init) +module_exit(ktti_exit) diff --git a/drivers/block/paride/mkd b/drivers/block/paride/mkd new file mode 100644 index 0000000..971f099 --- /dev/null +++ b/drivers/block/paride/mkd @@ -0,0 +1,30 @@ +#!/bin/bash +# +# mkd -- a script to create the device special files for the PARIDE subsystem +# +# block devices: pd (45), pcd (46), pf (47) +# character devices: pt (96), pg (97) +# +function mkdev { + mknod $1 $2 $3 $4 ; chmod 0660 $1 ; chown root:disk $1 +} +# +function pd { + D=$( printf \\$( printf "x%03x" $[ $1 + 97 ] ) ) + mkdev pd$D b 45 $[ $1 * 16 ] + for P in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + do mkdev pd$D$P b 45 $[ $1 * 16 + $P ] + done +} +# +cd /dev +# +for u in 0 1 2 3 ; do pd $u ; done +for u in 0 1 2 3 ; do mkdev pcd$u b 46 $u ; done +for u in 0 1 2 3 ; do mkdev pf$u b 47 $u ; done +for u in 0 1 2 3 ; do mkdev pt$u c 96 $u ; done +for u in 0 1 2 3 ; do mkdev npt$u c 96 $[ $u + 128 ] ; done +for u in 0 1 2 3 ; do mkdev pg$u c 97 $u ; done +# +# end of mkd + diff --git a/drivers/block/paride/on20.c b/drivers/block/paride/on20.c new file mode 100644 index 0000000..0173697 --- /dev/null +++ b/drivers/block/paride/on20.c @@ -0,0 +1,153 @@ +/* + on20.c (c) 1996-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + on20.c is a low-level protocol driver for the + Onspec 90c20 parallel to IDE adapter. +*/ + +/* Changes: + + 1.01 GRG 1998.05.06 init_proto, release_proto + +*/ + +#define ON20_VERSION "1.01" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +#define op(f) w2(4);w0(f);w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4); +#define vl(v) w2(4);w0(v);w2(5);w2(7);w2(5);w2(4); + +#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0)) + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set +*/ + +static int on20_read_regr( PIA *pi, int cont, int regr ) + +{ int h,l, r ; + + r = (regr<<2) + 1 + cont; + + op(1); vl(r); op(0); + + switch (pi->mode) { + + case 0: w2(4); w2(6); l = r1(); + w2(4); w2(6); h = r1(); + w2(4); w2(6); w2(4); w2(6); w2(4); + return j44(l,h); + + case 1: w2(4); w2(0x26); r = r0(); + w2(4); w2(0x26); w2(4); + return r; + + } + return -1; +} + +static void on20_write_regr( PIA *pi, int cont, int regr, int val ) + +{ int r; + + r = (regr<<2) + 1 + cont; + + op(1); vl(r); + op(0); vl(val); + op(0); vl(val); +} + +static void on20_connect ( PIA *pi) + +{ pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + + w2(4);w0(0);w2(0xc);w2(4);w2(6);w2(4);w2(6);w2(4); + if (pi->mode) { op(2); vl(8); op(2); vl(9); } + else { op(2); vl(0); op(2); vl(8); } +} + +static void on20_disconnect ( PIA *pi ) + +{ w2(4);w0(7);w2(4);w2(0xc);w2(4); + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +static void on20_read_block( PIA *pi, char * buf, int count ) + +{ int k, l, h; + + op(1); vl(1); op(0); + + for (k=0;k<count;k++) + if (pi->mode) { + w2(4); w2(0x26); buf[k] = r0(); + } else { + w2(6); l = r1(); w2(4); + w2(6); h = r1(); w2(4); + buf[k] = j44(l,h); + } + w2(4); +} + +static void on20_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + op(1); vl(1); op(0); + + for (k=0;k<count;k++) { w2(5); w0(buf[k]); w2(7); } + w2(4); +} + +static void on20_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ char *mode_string[2] = {"4-bit","8-bit"}; + + printk("%s: on20 %s, OnSpec 90c20 at 0x%x, ", + pi->device,ON20_VERSION,pi->port); + printk("mode %d (%s), delay %d\n",pi->mode, + mode_string[pi->mode],pi->delay); + +} + +static struct pi_protocol on20 = { + .owner = THIS_MODULE, + .name = "on20", + .max_mode = 2, + .epp_first = 2, + .default_delay = 1, + .max_units = 1, + .write_regr = on20_write_regr, + .read_regr = on20_read_regr, + .write_block = on20_write_block, + .read_block = on20_read_block, + .connect = on20_connect, + .disconnect = on20_disconnect, + .log_adapter = on20_log_adapter, +}; + +static int __init on20_init(void) +{ + return paride_register(&on20); +} + +static void __exit on20_exit(void) +{ + paride_unregister(&on20); +} + +MODULE_LICENSE("GPL"); +module_init(on20_init) +module_exit(on20_exit) diff --git a/drivers/block/paride/on26.c b/drivers/block/paride/on26.c new file mode 100644 index 0000000..95ba256 --- /dev/null +++ b/drivers/block/paride/on26.c @@ -0,0 +1,319 @@ +/* + on26.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + on26.c is a low-level protocol driver for the + OnSpec 90c26 parallel to IDE adapter chip. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.06 init_proto, release_proto + 1.02 GRG 1998.09.23 updates for the -E rev chip + 1.03 GRG 1998.12.14 fix for slave drives + 1.04 GRG 1998.12.20 yet another bug fix + +*/ + +#define ON26_VERSION "1.04" + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <asm/io.h> + +#include "paride.h" + +/* mode codes: 0 nybble reads, 8-bit writes + 1 8-bit reads and writes + 2 8-bit EPP mode + 3 EPP-16 + 4 EPP-32 +*/ + +#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0)) + +#define P1 w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4); +#define P2 w2(5);w2(7);w2(5);w2(4); + +/* cont = 0 - access the IDE register file + cont = 1 - access the IDE command set +*/ + +static int on26_read_regr( PIA *pi, int cont, int regr ) + +{ int a, b, r; + + r = (regr<<2) + 1 + cont; + + switch (pi->mode) { + + case 0: w0(1); P1; w0(r); P2; w0(0); P1; + w2(6); a = r1(); w2(4); + w2(6); b = r1(); w2(4); + w2(6); w2(4); w2(6); w2(4); + return j44(a,b); + + case 1: w0(1); P1; w0(r); P2; w0(0); P1; + w2(0x26); a = r0(); w2(4); w2(0x26); w2(4); + return a; + + case 2: + case 3: + case 4: w3(1); w3(1); w2(5); w4(r); w2(4); + w3(0); w3(0); w2(0x24); a = r4(); w2(4); + w2(0x24); (void)r4(); w2(4); + return a; + + } + return -1; +} + +static void on26_write_regr( PIA *pi, int cont, int regr, int val ) + +{ int r; + + r = (regr<<2) + 1 + cont; + + switch (pi->mode) { + + case 0: + case 1: w0(1); P1; w0(r); P2; w0(0); P1; + w0(val); P2; w0(val); P2; + break; + + case 2: + case 3: + case 4: w3(1); w3(1); w2(5); w4(r); w2(4); + w3(0); w3(0); + w2(5); w4(val); w2(4); + w2(5); w4(val); w2(4); + break; + } +} + +#define CCP(x) w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff);\ + w0(0x87);w0(0x78);w0(x);w2(4);w2(5);w2(4);w0(0xff); + +static void on26_connect ( PIA *pi ) + +{ int x; + + pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + + CCP(0x20); + x = 8; if (pi->mode) x = 9; + + w0(2); P1; w0(8); P2; + w0(2); P1; w0(x); P2; +} + +static void on26_disconnect ( PIA *pi ) + +{ if (pi->mode >= 2) { w3(4); w3(4); w3(4); w3(4); } + else { w0(4); P1; w0(4); P1; } + CCP(0x30); + w0(pi->saved_r0); + w2(pi->saved_r2); +} + +#define RESET_WAIT 200 + +static int on26_test_port( PIA *pi) /* hard reset */ + +{ int i, m, d, x=0, y=0; + + pi->saved_r0 = r0(); + pi->saved_r2 = r2(); + + d = pi->delay; + m = pi->mode; + pi->delay = 5; + pi->mode = 0; + + w2(0xc); + + CCP(0x30); CCP(0); + + w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff); + i = ((r1() & 0xf0) << 4); w0(0x87); + i |= (r1() & 0xf0); w0(0x78); + w0(0x20);w2(4);w2(5); + i |= ((r1() & 0xf0) >> 4); + w2(4);w0(0xff); + + if (i == 0xb5f) { + + w0(2); P1; w0(0); P2; + w0(3); P1; w0(0); P2; + w0(2); P1; w0(8); P2; udelay(100); + w0(2); P1; w0(0xa); P2; udelay(100); + w0(2); P1; w0(8); P2; udelay(1000); + + on26_write_regr(pi,0,6,0xa0); + + for (i=0;i<RESET_WAIT;i++) { + on26_write_regr(pi,0,6,0xa0); + x = on26_read_regr(pi,0,7); + on26_write_regr(pi,0,6,0xb0); + y = on26_read_regr(pi,0,7); + if (!((x&0x80)||(y&0x80))) break; + mdelay(100); + } + + if (i == RESET_WAIT) + printk("on26: Device reset failed (%x,%x)\n",x,y); + + w0(4); P1; w0(4); P1; + } + + CCP(0x30); + + pi->delay = d; + pi->mode = m; + w0(pi->saved_r0); + w2(pi->saved_r2); + + return 5; +} + + +static void on26_read_block( PIA *pi, char * buf, int count ) + +{ int k, a, b; + + switch (pi->mode) { + + case 0: w0(1); P1; w0(1); P2; w0(2); P1; w0(0x18); P2; w0(0); P1; + udelay(10); + for (k=0;k<count;k++) { + w2(6); a = r1(); + w2(4); b = r1(); + buf[k] = j44(a,b); + } + w0(2); P1; w0(8); P2; + break; + + case 1: w0(1); P1; w0(1); P2; w0(2); P1; w0(0x19); P2; w0(0); P1; + udelay(10); + for (k=0;k<count/2;k++) { + w2(0x26); buf[2*k] = r0(); + w2(0x24); buf[2*k+1] = r0(); + } + w0(2); P1; w0(9); P2; + break; + + case 2: w3(1); w3(1); w2(5); w4(1); w2(4); + w3(0); w3(0); w2(0x24); + udelay(10); + for (k=0;k<count;k++) buf[k] = r4(); + w2(4); + break; + + case 3: w3(1); w3(1); w2(5); w4(1); w2(4); + w3(0); w3(0); w2(0x24); + udelay(10); + for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w(); + w2(4); + break; + + case 4: w3(1); w3(1); w2(5); w4(1); w2(4); + w3(0); w3(0); w2(0x24); + udelay(10); + for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l(); + w2(4); + break; + + } +} + +static void on26_write_block( PIA *pi, char * buf, int count ) + +{ int k; + + switch (pi->mode) { + + case 0: + case 1: w0(1); P1; w0(1); P2; + w0(2); P1; w0(0x18+pi->mode); P2; w0(0); P1; + udelay(10); + for (k=0;k<count/2;k++) { + w2(5); w0(buf[2*k]); + w2(7); w0(buf[2*k+1]); + } + w2(5); w2(4); + w0(2); P1; w0(8+pi->mode); P2; + break; + + case 2: w3(1); w3(1); w2(5); w4(1); w2(4); + w3(0); w3(0); w2(0xc5); + udelay(10); + for (k=0;k<count;k++) w4(buf[k]); + w2(0xc4); + break; + + case 3: w3(1); w3(1); w2(5); w4(1); w2(4); + w3(0); w3(0); w2(0xc5); + udelay(10); + for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]); + w2(0xc4); + break; + + case 4: w3(1); w3(1); w2(5); w4(1); w2(4); + w3(0); w3(0); w2(0xc5); + udelay(10); + for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]); + w2(0xc4); + break; + + } + +} + +static void on26_log_adapter( PIA *pi, char * scratch, int verbose ) + +{ char *mode_string[5] = {"4-bit","8-bit","EPP-8", + "EPP-16","EPP-32"}; + + printk("%s: on26 %s, OnSpec 90c26 at 0x%x, ", + pi->device,ON26_VERSION,pi->port); + printk("mode %d (%s), delay %d\n",pi->mode, + mode_string[pi->mode],pi->delay); + +} + +static struct pi_protocol on26 = { + .owner = THIS_MODULE, + .name = "on26", + .max_mode = 5, + .epp_first = 2, + .default_delay = 1, + .max_units = 1, + .write_regr = on26_write_regr, + .read_regr = on26_read_regr, + .write_block = on26_write_block, + .read_block = on26_read_block, + .connect = on26_connect, + .disconnect = on26_disconnect, + .test_port = on26_test_port, + .log_adapter = on26_log_adapter, +}; + +static int __init on26_init(void) +{ + return paride_register(&on26); +} + +static void __exit on26_exit(void) +{ + paride_unregister(&on26); +} + +MODULE_LICENSE("GPL"); +module_init(on26_init) +module_exit(on26_exit) diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c new file mode 100644 index 0000000..48c50f1 --- /dev/null +++ b/drivers/block/paride/paride.c @@ -0,0 +1,434 @@ +/* + paride.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + This is the base module for the family of device drivers + that support parallel port IDE devices. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.03 Use spinlocks + 1.02 GRG 1998.05.05 init_proto, release_proto, ktti + 1.03 GRG 1998.08.15 eliminate compiler warning + 1.04 GRG 1998.11.28 added support for FRIQ + 1.05 TMW 2000.06.06 use parport_find_number instead of + parport_enumerate + 1.06 TMW 2001.03.26 more sane parport-or-not resource management +*/ + +#define PI_VERSION "1.06" + +#include <linux/module.h> +#include <linux/kmod.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <linux/string.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/sched.h> /* TASK_* */ +#include <linux/parport.h> + +#include "paride.h" + +MODULE_LICENSE("GPL"); + +#define MAX_PROTOS 32 + +static struct pi_protocol *protocols[MAX_PROTOS]; + +static DEFINE_SPINLOCK(pi_spinlock); + +void pi_write_regr(PIA * pi, int cont, int regr, int val) +{ + pi->proto->write_regr(pi, cont, regr, val); +} + +EXPORT_SYMBOL(pi_write_regr); + +int pi_read_regr(PIA * pi, int cont, int regr) +{ + return pi->proto->read_regr(pi, cont, regr); +} + +EXPORT_SYMBOL(pi_read_regr); + +void pi_write_block(PIA * pi, char *buf, int count) +{ + pi->proto->write_block(pi, buf, count); +} + +EXPORT_SYMBOL(pi_write_block); + +void pi_read_block(PIA * pi, char *buf, int count) +{ + pi->proto->read_block(pi, buf, count); +} + +EXPORT_SYMBOL(pi_read_block); + +static void pi_wake_up(void *p) +{ + PIA *pi = (PIA *) p; + unsigned long flags; + void (*cont) (void) = NULL; + + spin_lock_irqsave(&pi_spinlock, flags); + + if (pi->claim_cont && !parport_claim(pi->pardev)) { + cont = pi->claim_cont; + pi->claim_cont = NULL; + pi->claimed = 1; + } + + spin_unlock_irqrestore(&pi_spinlock, flags); + + wake_up(&(pi->parq)); + + if (cont) + cont(); +} + +int pi_schedule_claimed(PIA * pi, void (*cont) (void)) +{ + unsigned long flags; + + spin_lock_irqsave(&pi_spinlock, flags); + if (pi->pardev && parport_claim(pi->pardev)) { + pi->claim_cont = cont; + spin_unlock_irqrestore(&pi_spinlock, flags); + return 0; + } + pi->claimed = 1; + spin_unlock_irqrestore(&pi_spinlock, flags); + return 1; +} +EXPORT_SYMBOL(pi_schedule_claimed); + +void pi_do_claimed(PIA * pi, void (*cont) (void)) +{ + if (pi_schedule_claimed(pi, cont)) + cont(); +} + +EXPORT_SYMBOL(pi_do_claimed); + +static void pi_claim(PIA * pi) +{ + if (pi->claimed) + return; + pi->claimed = 1; + if (pi->pardev) + wait_event(pi->parq, + !parport_claim((struct pardevice *) pi->pardev)); +} + +static void pi_unclaim(PIA * pi) +{ + pi->claimed = 0; + if (pi->pardev) + parport_release((struct pardevice *) (pi->pardev)); +} + +void pi_connect(PIA * pi) +{ + pi_claim(pi); + pi->proto->connect(pi); +} + +EXPORT_SYMBOL(pi_connect); + +void pi_disconnect(PIA * pi) +{ + pi->proto->disconnect(pi); + pi_unclaim(pi); +} + +EXPORT_SYMBOL(pi_disconnect); + +static void pi_unregister_parport(PIA * pi) +{ + if (pi->pardev) { + parport_unregister_device((struct pardevice *) (pi->pardev)); + pi->pardev = NULL; + } +} + +void pi_release(PIA * pi) +{ + pi_unregister_parport(pi); + if (pi->proto->release_proto) + pi->proto->release_proto(pi); + module_put(pi->proto->owner); +} + +EXPORT_SYMBOL(pi_release); + +static int default_test_proto(PIA * pi, char *scratch, int verbose) +{ + int j, k; + int e[2] = { 0, 0 }; + + pi->proto->connect(pi); + + for (j = 0; j < 2; j++) { + pi_write_regr(pi, 0, 6, 0xa0 + j * 0x10); + for (k = 0; k < 256; k++) { + pi_write_regr(pi, 0, 2, k ^ 0xaa); + pi_write_regr(pi, 0, 3, k ^ 0x55); + if (pi_read_regr(pi, 0, 2) != (k ^ 0xaa)) + e[j]++; + } + } + pi->proto->disconnect(pi); + + if (verbose) + printk("%s: %s: port 0x%x, mode %d, test=(%d,%d)\n", + pi->device, pi->proto->name, pi->port, + pi->mode, e[0], e[1]); + + return (e[0] && e[1]); /* not here if both > 0 */ +} + +static int pi_test_proto(PIA * pi, char *scratch, int verbose) +{ + int res; + + pi_claim(pi); + if (pi->proto->test_proto) + res = pi->proto->test_proto(pi, scratch, verbose); + else + res = default_test_proto(pi, scratch, verbose); + pi_unclaim(pi); + + return res; +} + +int paride_register(PIP * pr) +{ + int k; + + for (k = 0; k < MAX_PROTOS; k++) + if (protocols[k] && !strcmp(pr->name, protocols[k]->name)) { + printk("paride: %s protocol already registered\n", + pr->name); + return -1; + } + k = 0; + while ((k < MAX_PROTOS) && (protocols[k])) + k++; + if (k == MAX_PROTOS) { + printk("paride: protocol table full\n"); + return -1; + } + protocols[k] = pr; + pr->index = k; + printk("paride: %s registered as protocol %d\n", pr->name, k); + return 0; +} + +EXPORT_SYMBOL(paride_register); + +void paride_unregister(PIP * pr) +{ + if (!pr) + return; + if (protocols[pr->index] != pr) { + printk("paride: %s not registered\n", pr->name); + return; + } + protocols[pr->index] = NULL; +} + +EXPORT_SYMBOL(paride_unregister); + +static int pi_register_parport(PIA * pi, int verbose) +{ + struct parport *port; + + port = parport_find_base(pi->port); + if (!port) + return 0; + + pi->pardev = parport_register_device(port, + pi->device, NULL, + pi_wake_up, NULL, 0, (void *) pi); + parport_put_port(port); + if (!pi->pardev) + return 0; + + init_waitqueue_head(&pi->parq); + + if (verbose) + printk("%s: 0x%x is %s\n", pi->device, pi->port, port->name); + + pi->parname = (char *) port->name; + + return 1; +} + +static int pi_probe_mode(PIA * pi, int max, char *scratch, int verbose) +{ + int best, range; + + if (pi->mode != -1) { + if (pi->mode >= max) + return 0; + range = 3; + if (pi->mode >= pi->proto->epp_first) + range = 8; + if ((range == 8) && (pi->port % 8)) + return 0; + pi->reserved = range; + return (!pi_test_proto(pi, scratch, verbose)); + } + best = -1; + for (pi->mode = 0; pi->mode < max; pi->mode++) { + range = 3; + if (pi->mode >= pi->proto->epp_first) + range = 8; + if ((range == 8) && (pi->port % 8)) + break; + pi->reserved = range; + if (!pi_test_proto(pi, scratch, verbose)) + best = pi->mode; + } + pi->mode = best; + return (best > -1); +} + +static int pi_probe_unit(PIA * pi, int unit, char *scratch, int verbose) +{ + int max, s, e; + + s = unit; + e = s + 1; + + if (s == -1) { + s = 0; + e = pi->proto->max_units; + } + + if (!pi_register_parport(pi, verbose)) + return 0; + + if (pi->proto->test_port) { + pi_claim(pi); + max = pi->proto->test_port(pi); + pi_unclaim(pi); + } else + max = pi->proto->max_mode; + + if (pi->proto->probe_unit) { + pi_claim(pi); + for (pi->unit = s; pi->unit < e; pi->unit++) + if (pi->proto->probe_unit(pi)) { + pi_unclaim(pi); + if (pi_probe_mode(pi, max, scratch, verbose)) + return 1; + pi_unregister_parport(pi); + return 0; + } + pi_unclaim(pi); + pi_unregister_parport(pi); + return 0; + } + + if (!pi_probe_mode(pi, max, scratch, verbose)) { + pi_unregister_parport(pi); + return 0; + } + return 1; + +} + +int pi_init(PIA * pi, int autoprobe, int port, int mode, + int unit, int protocol, int delay, char *scratch, + int devtype, int verbose, char *device) +{ + int p, k, s, e; + int lpts[7] = { 0x3bc, 0x378, 0x278, 0x268, 0x27c, 0x26c, 0 }; + + s = protocol; + e = s + 1; + + if (!protocols[0]) + request_module("paride_protocol"); + + if (autoprobe) { + s = 0; + e = MAX_PROTOS; + } else if ((s < 0) || (s >= MAX_PROTOS) || (port <= 0) || + (!protocols[s]) || (unit < 0) || + (unit >= protocols[s]->max_units)) { + printk("%s: Invalid parameters\n", device); + return 0; + } + + for (p = s; p < e; p++) { + struct pi_protocol *proto = protocols[p]; + if (!proto) + continue; + /* still racy */ + if (!try_module_get(proto->owner)) + continue; + pi->proto = proto; + pi->private = 0; + if (proto->init_proto && proto->init_proto(pi) < 0) { + pi->proto = NULL; + module_put(proto->owner); + continue; + } + if (delay == -1) + pi->delay = pi->proto->default_delay; + else + pi->delay = delay; + pi->devtype = devtype; + pi->device = device; + + pi->parname = NULL; + pi->pardev = NULL; + init_waitqueue_head(&pi->parq); + pi->claimed = 0; + pi->claim_cont = NULL; + + pi->mode = mode; + if (port != -1) { + pi->port = port; + if (pi_probe_unit(pi, unit, scratch, verbose)) + break; + pi->port = 0; + } else { + k = 0; + while ((pi->port = lpts[k++])) + if (pi_probe_unit + (pi, unit, scratch, verbose)) + break; + if (pi->port) + break; + } + if (pi->proto->release_proto) + pi->proto->release_proto(pi); + module_put(proto->owner); + } + + if (!pi->port) { + if (autoprobe) + printk("%s: Autoprobe failed\n", device); + else + printk("%s: Adapter not found\n", device); + return 0; + } + + if (pi->parname) + printk("%s: Sharing %s at 0x%x\n", pi->device, + pi->parname, pi->port); + + pi->proto->log_adapter(pi, scratch, verbose); + + return 1; +} + +EXPORT_SYMBOL(pi_init); diff --git a/drivers/block/paride/paride.h b/drivers/block/paride/paride.h new file mode 100644 index 0000000..2bddbf4 --- /dev/null +++ b/drivers/block/paride/paride.h @@ -0,0 +1,170 @@ +#ifndef __DRIVERS_PARIDE_H__ +#define __DRIVERS_PARIDE_H__ + +/* + paride.h (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GPL. + + This file defines the interface between the high-level parallel + IDE device drivers (pd, pf, pcd, pt) and the adapter chips. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.05 init_proto, release_proto +*/ + +#define PARIDE_H_VERSION "1.01" + +/* Some adapters need to know what kind of device they are in + + Values for devtype: +*/ + +#define PI_PD 0 /* IDE disk */ +#define PI_PCD 1 /* ATAPI CDrom */ +#define PI_PF 2 /* ATAPI disk */ +#define PI_PT 3 /* ATAPI tape */ +#define PI_PG 4 /* ATAPI generic */ + +/* The paride module contains no state, instead the drivers allocate + a pi_adapter data structure and pass it to paride in every operation. + +*/ + +struct pi_adapter { + + struct pi_protocol *proto; /* adapter protocol */ + int port; /* base address of parallel port */ + int mode; /* transfer mode in use */ + int delay; /* adapter delay setting */ + int devtype; /* device type: PI_PD etc. */ + char *device; /* name of driver */ + int unit; /* unit number for chained adapters */ + int saved_r0; /* saved port state */ + int saved_r2; /* saved port state */ + int reserved; /* number of ports reserved */ + unsigned long private; /* for protocol module */ + + wait_queue_head_t parq; /* semaphore for parport sharing */ + void *pardev; /* pointer to pardevice */ + char *parname; /* parport name */ + int claimed; /* parport has already been claimed */ + void (*claim_cont)(void); /* continuation for parport wait */ +}; + +typedef struct pi_adapter PIA; + +/* functions exported by paride to the high level drivers */ + +extern int pi_init(PIA *pi, + int autoprobe, /* 1 to autoprobe */ + int port, /* base port address */ + int mode, /* -1 for autoprobe */ + int unit, /* unit number, if supported */ + int protocol, /* protocol to use */ + int delay, /* -1 to use adapter specific default */ + char * scratch, /* address of 512 byte buffer */ + int devtype, /* device type: PI_PD, PI_PCD, etc ... */ + int verbose, /* log verbose data while probing */ + char *device /* name of the driver */ + ); /* returns 0 on failure, 1 on success */ + +extern void pi_release(PIA *pi); + +/* registers are addressed as (cont,regr) + + cont: 0 for command register file, 1 for control register(s) + regr: 0-7 for register number. + +*/ + +extern void pi_write_regr(PIA *pi, int cont, int regr, int val); + +extern int pi_read_regr(PIA *pi, int cont, int regr); + +extern void pi_write_block(PIA *pi, char * buf, int count); + +extern void pi_read_block(PIA *pi, char * buf, int count); + +extern void pi_connect(PIA *pi); + +extern void pi_disconnect(PIA *pi); + +extern void pi_do_claimed(PIA *pi, void (*cont)(void)); +extern int pi_schedule_claimed(PIA *pi, void (*cont)(void)); + +/* macros and functions exported to the protocol modules */ + +#define delay_p (pi->delay?udelay(pi->delay):(void)0) +#define out_p(offs,byte) outb(byte,pi->port+offs); delay_p; +#define in_p(offs) (delay_p,inb(pi->port+offs)) + +#define w0(byte) {out_p(0,byte);} +#define r0() (in_p(0) & 0xff) +#define w1(byte) {out_p(1,byte);} +#define r1() (in_p(1) & 0xff) +#define w2(byte) {out_p(2,byte);} +#define r2() (in_p(2) & 0xff) +#define w3(byte) {out_p(3,byte);} +#define w4(byte) {out_p(4,byte);} +#define r4() (in_p(4) & 0xff) +#define w4w(data) {outw(data,pi->port+4); delay_p;} +#define w4l(data) {outl(data,pi->port+4); delay_p;} +#define r4w() (delay_p,inw(pi->port+4)&0xffff) +#define r4l() (delay_p,inl(pi->port+4)&0xffffffff) + +static inline u16 pi_swab16( char *b, int k) + +{ union { u16 u; char t[2]; } r; + + r.t[0]=b[2*k+1]; r.t[1]=b[2*k]; + return r.u; +} + +static inline u32 pi_swab32( char *b, int k) + +{ union { u32 u; char f[4]; } r; + + r.f[0]=b[4*k+1]; r.f[1]=b[4*k]; + r.f[2]=b[4*k+3]; r.f[3]=b[4*k+2]; + return r.u; +} + +struct pi_protocol { + + char name[8]; /* name for this protocol */ + int index; /* index into protocol table */ + + int max_mode; /* max mode number */ + int epp_first; /* modes >= this use 8 ports */ + + int default_delay; /* delay parameter if not specified */ + int max_units; /* max chained units probed for */ + + void (*write_regr)(PIA *,int,int,int); + int (*read_regr)(PIA *,int,int); + void (*write_block)(PIA *,char *,int); + void (*read_block)(PIA *,char *,int); + + void (*connect)(PIA *); + void (*disconnect)(PIA *); + + int (*test_port)(PIA *); + int (*probe_unit)(PIA *); + int (*test_proto)(PIA *,char *,int); + void (*log_adapter)(PIA *,char *,int); + + int (*init_proto)(PIA *); + void (*release_proto)(PIA *); + struct module *owner; +}; + +typedef struct pi_protocol PIP; + +extern int paride_register( PIP * ); +extern void paride_unregister ( PIP * ); + +#endif /* __DRIVERS_PARIDE_H__ */ +/* end of paride.h */ diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c new file mode 100644 index 0000000..e91d4b4 --- /dev/null +++ b/drivers/block/paride/pcd.c @@ -0,0 +1,971 @@ +/* + pcd.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + This is a high-level driver for parallel port ATAPI CD-ROM + drives based on chips supported by the paride module. + + By default, the driver will autoprobe for a single parallel + port ATAPI CD-ROM drive, but if their individual parameters are + specified, the driver can handle up to 4 drives. + + The behaviour of the pcd driver can be altered by setting + some parameters from the insmod command line. The following + parameters are adjustable: + + drive0 These four arguments can be arrays of + drive1 1-6 integers as follows: + drive2 + drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly> + + Where, + + <prt> is the base of the parallel port address for + the corresponding drive. (required) + + <pro> is the protocol number for the adapter that + supports this drive. These numbers are + logged by 'paride' when the protocol modules + are initialised. (0 if not given) + + <uni> for those adapters that support chained + devices, this is the unit selector for the + chain of devices on the given port. It should + be zero for devices that don't support chaining. + (0 if not given) + + <mod> this can be -1 to choose the best mode, or one + of the mode numbers supported by the adapter. + (-1 if not given) + + <slv> ATAPI CD-ROMs can be jumpered to master or slave. + Set this to 0 to choose the master drive, 1 to + choose the slave, -1 (the default) to choose the + first drive found. + + <dly> some parallel ports require the driver to + go more slowly. -1 sets a default value that + should work with the chosen protocol. Otherwise, + set this to a small integer, the larger it is + the slower the port i/o. In some cases, setting + this to zero will speed up the device. (default -1) + + major You may use this parameter to overide the + default major number (46) that this driver + will use. Be sure to change the device + name as well. + + name This parameter is a character string that + contains the name the kernel will use for this + device (in /proc output, for instance). + (default "pcd") + + verbose This parameter controls the amount of logging + that the driver will do. Set it to 0 for + normal operation, 1 to see autoprobe progress + messages, or 2 to see additional debugging + output. (default 0) + + nice This parameter controls the driver's use of + idle CPU time, at the expense of some speed. + + If this driver is built into the kernel, you can use kernel + the following command line parameters, with the same values + as the corresponding module parameters listed above: + + pcd.drive0 + pcd.drive1 + pcd.drive2 + pcd.drive3 + pcd.nice + + In addition, you can use the parameter pcd.disable to disable + the driver entirely. + +*/ + +/* Changes: + + 1.01 GRG 1998.01.24 Added test unit ready support + 1.02 GRG 1998.05.06 Changes to pcd_completion, ready_wait, + and loosen interpretation of ATAPI + standard for clearing error status. + Use spinlocks. Eliminate sti(). + 1.03 GRG 1998.06.16 Eliminated an Ugh + 1.04 GRG 1998.08.15 Added extra debugging, improvements to + pcd_completion, use HZ in loop timing + 1.05 GRG 1998.08.16 Conformed to "Uniform CD-ROM" standard + 1.06 GRG 1998.08.19 Added audio ioctl support + 1.07 GRG 1998.09.24 Increased reset timeout, added jumbo support + +*/ + +#define PCD_VERSION "1.07" +#define PCD_MAJOR 46 +#define PCD_NAME "pcd" +#define PCD_UNITS 4 + +/* Here are things one can override from the insmod command. + Most are autoprobed by paride unless set here. Verbose is off + by default. + +*/ + +static int verbose = 0; +static int major = PCD_MAJOR; +static char *name = PCD_NAME; +static int nice = 0; +static int disable = 0; + +static int drive0[6] = { 0, 0, 0, -1, -1, -1 }; +static int drive1[6] = { 0, 0, 0, -1, -1, -1 }; +static int drive2[6] = { 0, 0, 0, -1, -1, -1 }; +static int drive3[6] = { 0, 0, 0, -1, -1, -1 }; + +static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3}; +static int pcd_drive_count; + +enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY}; + +/* end of parameters */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/cdrom.h> +#include <linux/spinlock.h> +#include <linux/blkdev.h> +#include <asm/uaccess.h> + +static DEFINE_SPINLOCK(pcd_lock); + +module_param(verbose, bool, 0644); +module_param(major, int, 0); +module_param(name, charp, 0); +module_param(nice, int, 0); +module_param_array(drive0, int, NULL, 0); +module_param_array(drive1, int, NULL, 0); +module_param_array(drive2, int, NULL, 0); +module_param_array(drive3, int, NULL, 0); + +#include "paride.h" +#include "pseudo.h" + +#define PCD_RETRIES 5 +#define PCD_TMO 800 /* timeout in jiffies */ +#define PCD_DELAY 50 /* spin delay in uS */ +#define PCD_READY_TMO 20 /* in seconds */ +#define PCD_RESET_TMO 100 /* in tenths of a second */ + +#define PCD_SPIN (1000000*PCD_TMO)/(HZ*PCD_DELAY) + +#define IDE_ERR 0x01 +#define IDE_DRQ 0x08 +#define IDE_READY 0x40 +#define IDE_BUSY 0x80 + +static int pcd_open(struct cdrom_device_info *cdi, int purpose); +static void pcd_release(struct cdrom_device_info *cdi); +static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr); +static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr); +static int pcd_tray_move(struct cdrom_device_info *cdi, int position); +static int pcd_lock_door(struct cdrom_device_info *cdi, int lock); +static int pcd_drive_reset(struct cdrom_device_info *cdi); +static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn); +static int pcd_audio_ioctl(struct cdrom_device_info *cdi, + unsigned int cmd, void *arg); +static int pcd_packet(struct cdrom_device_info *cdi, + struct packet_command *cgc); + +static int pcd_detect(void); +static void pcd_probe_capabilities(void); +static void do_pcd_read_drq(void); +static void do_pcd_request(struct request_queue * q); +static void do_pcd_read(void); + +struct pcd_unit { + struct pi_adapter pia; /* interface to paride layer */ + struct pi_adapter *pi; + int drive; /* master/slave */ + int last_sense; /* result of last request sense */ + int changed; /* media change seen */ + int present; /* does this unit exist ? */ + char *name; /* pcd0, pcd1, etc */ + struct cdrom_device_info info; /* uniform cdrom interface */ + struct gendisk *disk; +}; + +static struct pcd_unit pcd[PCD_UNITS]; + +static char pcd_scratch[64]; +static char pcd_buffer[2048]; /* raw block buffer */ +static int pcd_bufblk = -1; /* block in buffer, in CD units, + -1 for nothing there. See also + pd_unit. + */ + +/* the variables below are used mainly in the I/O request engine, which + processes only one request at a time. +*/ + +static struct pcd_unit *pcd_current; /* current request's drive */ +static struct request *pcd_req; +static int pcd_retries; /* retries on current request */ +static int pcd_busy; /* request being processed ? */ +static int pcd_sector; /* address of next requested sector */ +static int pcd_count; /* number of blocks still to do */ +static char *pcd_buf; /* buffer for request in progress */ + +static int pcd_warned; /* Have we logged a phase warning ? */ + +/* kernel glue structures */ + +static int pcd_block_open(struct block_device *bdev, fmode_t mode) +{ + struct pcd_unit *cd = bdev->bd_disk->private_data; + return cdrom_open(&cd->info, bdev, mode); +} + +static int pcd_block_release(struct gendisk *disk, fmode_t mode) +{ + struct pcd_unit *cd = disk->private_data; + cdrom_release(&cd->info, mode); + return 0; +} + +static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg) +{ + struct pcd_unit *cd = bdev->bd_disk->private_data; + return cdrom_ioctl(&cd->info, bdev, mode, cmd, arg); +} + +static int pcd_block_media_changed(struct gendisk *disk) +{ + struct pcd_unit *cd = disk->private_data; + return cdrom_media_changed(&cd->info); +} + +static struct block_device_operations pcd_bdops = { + .owner = THIS_MODULE, + .open = pcd_block_open, + .release = pcd_block_release, + .locked_ioctl = pcd_block_ioctl, + .media_changed = pcd_block_media_changed, +}; + +static struct cdrom_device_ops pcd_dops = { + .open = pcd_open, + .release = pcd_release, + .drive_status = pcd_drive_status, + .media_changed = pcd_media_changed, + .tray_move = pcd_tray_move, + .lock_door = pcd_lock_door, + .get_mcn = pcd_get_mcn, + .reset = pcd_drive_reset, + .audio_ioctl = pcd_audio_ioctl, + .generic_packet = pcd_packet, + .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | + CDC_MCN | CDC_MEDIA_CHANGED | CDC_RESET | + CDC_PLAY_AUDIO | CDC_GENERIC_PACKET | CDC_CD_R | + CDC_CD_RW, +}; + +static void pcd_init_units(void) +{ + struct pcd_unit *cd; + int unit; + + pcd_drive_count = 0; + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + struct gendisk *disk = alloc_disk(1); + if (!disk) + continue; + cd->disk = disk; + cd->pi = &cd->pia; + cd->present = 0; + cd->last_sense = 0; + cd->changed = 1; + cd->drive = (*drives[unit])[D_SLV]; + if ((*drives[unit])[D_PRT]) + pcd_drive_count++; + + cd->name = &cd->info.name[0]; + snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit); + cd->info.ops = &pcd_dops; + cd->info.handle = cd; + cd->info.speed = 0; + cd->info.capacity = 1; + cd->info.mask = 0; + disk->major = major; + disk->first_minor = unit; + strcpy(disk->disk_name, cd->name); /* umm... */ + disk->fops = &pcd_bdops; + } +} + +static int pcd_open(struct cdrom_device_info *cdi, int purpose) +{ + struct pcd_unit *cd = cdi->handle; + if (!cd->present) + return -ENODEV; + return 0; +} + +static void pcd_release(struct cdrom_device_info *cdi) +{ +} + +static inline int status_reg(struct pcd_unit *cd) +{ + return pi_read_regr(cd->pi, 1, 6); +} + +static inline int read_reg(struct pcd_unit *cd, int reg) +{ + return pi_read_regr(cd->pi, 0, reg); +} + +static inline void write_reg(struct pcd_unit *cd, int reg, int val) +{ + pi_write_regr(cd->pi, 0, reg, val); +} + +static int pcd_wait(struct pcd_unit *cd, int go, int stop, char *fun, char *msg) +{ + int j, r, e, s, p; + + j = 0; + while ((((r = status_reg(cd)) & go) || (stop && (!(r & stop)))) + && (j++ < PCD_SPIN)) + udelay(PCD_DELAY); + + if ((r & (IDE_ERR & stop)) || (j >= PCD_SPIN)) { + s = read_reg(cd, 7); + e = read_reg(cd, 1); + p = read_reg(cd, 2); + if (j >= PCD_SPIN) + e |= 0x100; + if (fun) + printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" + " loop=%d phase=%d\n", + cd->name, fun, msg, r, s, e, j, p); + return (s << 8) + r; + } + return 0; +} + +static int pcd_command(struct pcd_unit *cd, char *cmd, int dlen, char *fun) +{ + pi_connect(cd->pi); + + write_reg(cd, 6, 0xa0 + 0x10 * cd->drive); + + if (pcd_wait(cd, IDE_BUSY | IDE_DRQ, 0, fun, "before command")) { + pi_disconnect(cd->pi); + return -1; + } + + write_reg(cd, 4, dlen % 256); + write_reg(cd, 5, dlen / 256); + write_reg(cd, 7, 0xa0); /* ATAPI packet command */ + + if (pcd_wait(cd, IDE_BUSY, IDE_DRQ, fun, "command DRQ")) { + pi_disconnect(cd->pi); + return -1; + } + + if (read_reg(cd, 2) != 1) { + printk("%s: %s: command phase error\n", cd->name, fun); + pi_disconnect(cd->pi); + return -1; + } + + pi_write_block(cd->pi, cmd, 12); + + return 0; +} + +static int pcd_completion(struct pcd_unit *cd, char *buf, char *fun) +{ + int r, d, p, n, k, j; + + r = -1; + k = 0; + j = 0; + + if (!pcd_wait(cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR, + fun, "completion")) { + r = 0; + while (read_reg(cd, 7) & IDE_DRQ) { + d = read_reg(cd, 4) + 256 * read_reg(cd, 5); + n = (d + 3) & 0xfffc; + p = read_reg(cd, 2) & 3; + + if ((p == 2) && (n > 0) && (j == 0)) { + pi_read_block(cd->pi, buf, n); + if (verbose > 1) + printk("%s: %s: Read %d bytes\n", + cd->name, fun, n); + r = 0; + j++; + } else { + if (verbose > 1) + printk + ("%s: %s: Unexpected phase %d, d=%d, k=%d\n", + cd->name, fun, p, d, k); + if ((verbose < 2) && !pcd_warned) { + pcd_warned = 1; + printk + ("%s: WARNING: ATAPI phase errors\n", + cd->name); + } + mdelay(1); + } + if (k++ > PCD_TMO) { + printk("%s: Stuck DRQ\n", cd->name); + break; + } + if (pcd_wait + (cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR, fun, + "completion")) { + r = -1; + break; + } + } + } + + pi_disconnect(cd->pi); + + return r; +} + +static void pcd_req_sense(struct pcd_unit *cd, char *fun) +{ + char rs_cmd[12] = { 0x03, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 }; + char buf[16]; + int r, c; + + r = pcd_command(cd, rs_cmd, 16, "Request sense"); + mdelay(1); + if (!r) + pcd_completion(cd, buf, "Request sense"); + + cd->last_sense = -1; + c = 2; + if (!r) { + if (fun) + printk("%s: %s: Sense key: %x, ASC: %x, ASQ: %x\n", + cd->name, fun, buf[2] & 0xf, buf[12], buf[13]); + c = buf[2] & 0xf; + cd->last_sense = + c | ((buf[12] & 0xff) << 8) | ((buf[13] & 0xff) << 16); + } + if ((c == 2) || (c == 6)) + cd->changed = 1; +} + +static int pcd_atapi(struct pcd_unit *cd, char *cmd, int dlen, char *buf, char *fun) +{ + int r; + + r = pcd_command(cd, cmd, dlen, fun); + mdelay(1); + if (!r) + r = pcd_completion(cd, buf, fun); + if (r) + pcd_req_sense(cd, fun); + + return r; +} + +static int pcd_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) +{ + return pcd_atapi(cdi->handle, cgc->cmd, cgc->buflen, cgc->buffer, + "generic packet"); +} + +#define DBMSG(msg) ((verbose>1)?(msg):NULL) + +static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr) +{ + struct pcd_unit *cd = cdi->handle; + int res = cd->changed; + if (res) + cd->changed = 0; + return res; +} + +static int pcd_lock_door(struct cdrom_device_info *cdi, int lock) +{ + char un_cmd[12] = { 0x1e, 0, 0, 0, lock, 0, 0, 0, 0, 0, 0, 0 }; + + return pcd_atapi(cdi->handle, un_cmd, 0, pcd_scratch, + lock ? "lock door" : "unlock door"); +} + +static int pcd_tray_move(struct cdrom_device_info *cdi, int position) +{ + char ej_cmd[12] = { 0x1b, 0, 0, 0, 3 - position, 0, 0, 0, 0, 0, 0, 0 }; + + return pcd_atapi(cdi->handle, ej_cmd, 0, pcd_scratch, + position ? "eject" : "close tray"); +} + +static void pcd_sleep(int cs) +{ + schedule_timeout_interruptible(cs); +} + +static int pcd_reset(struct pcd_unit *cd) +{ + int i, k, flg; + int expect[5] = { 1, 1, 1, 0x14, 0xeb }; + + pi_connect(cd->pi); + write_reg(cd, 6, 0xa0 + 0x10 * cd->drive); + write_reg(cd, 7, 8); + + pcd_sleep(20 * HZ / 1000); /* delay a bit */ + + k = 0; + while ((k++ < PCD_RESET_TMO) && (status_reg(cd) & IDE_BUSY)) + pcd_sleep(HZ / 10); + + flg = 1; + for (i = 0; i < 5; i++) + flg &= (read_reg(cd, i + 1) == expect[i]); + + if (verbose) { + printk("%s: Reset (%d) signature = ", cd->name, k); + for (i = 0; i < 5; i++) + printk("%3x", read_reg(cd, i + 1)); + if (!flg) + printk(" (incorrect)"); + printk("\n"); + } + + pi_disconnect(cd->pi); + return flg - 1; +} + +static int pcd_drive_reset(struct cdrom_device_info *cdi) +{ + return pcd_reset(cdi->handle); +} + +static int pcd_ready_wait(struct pcd_unit *cd, int tmo) +{ + char tr_cmd[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + int k, p; + + k = 0; + while (k < tmo) { + cd->last_sense = 0; + pcd_atapi(cd, tr_cmd, 0, NULL, DBMSG("test unit ready")); + p = cd->last_sense; + if (!p) + return 0; + if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6))) + return p; + k++; + pcd_sleep(HZ); + } + return 0x000020; /* timeout */ +} + +static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr) +{ + char rc_cmd[12] = { 0x25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + struct pcd_unit *cd = cdi->handle; + + if (pcd_ready_wait(cd, PCD_READY_TMO)) + return CDS_DRIVE_NOT_READY; + if (pcd_atapi(cd, rc_cmd, 8, pcd_scratch, DBMSG("check media"))) + return CDS_NO_DISC; + return CDS_DISC_OK; +} + +static int pcd_identify(struct pcd_unit *cd, char *id) +{ + int k, s; + char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 }; + + pcd_bufblk = -1; + + s = pcd_atapi(cd, id_cmd, 36, pcd_buffer, "identify"); + + if (s) + return -1; + if ((pcd_buffer[0] & 0x1f) != 5) { + if (verbose) + printk("%s: %s is not a CD-ROM\n", + cd->name, cd->drive ? "Slave" : "Master"); + return -1; + } + memcpy(id, pcd_buffer + 16, 16); + id[16] = 0; + k = 16; + while ((k >= 0) && (id[k] <= 0x20)) { + id[k] = 0; + k--; + } + + printk("%s: %s: %s\n", cd->name, cd->drive ? "Slave" : "Master", id); + + return 0; +} + +/* + * returns 0, with id set if drive is detected + * -1, if drive detection failed + */ +static int pcd_probe(struct pcd_unit *cd, int ms, char *id) +{ + if (ms == -1) { + for (cd->drive = 0; cd->drive <= 1; cd->drive++) + if (!pcd_reset(cd) && !pcd_identify(cd, id)) + return 0; + } else { + cd->drive = ms; + if (!pcd_reset(cd) && !pcd_identify(cd, id)) + return 0; + } + return -1; +} + +static void pcd_probe_capabilities(void) +{ + int unit, r; + char buffer[32]; + char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 }; + struct pcd_unit *cd; + + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + if (!cd->present) + continue; + r = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities"); + if (r) + continue; + /* we should now have the cap page */ + if ((buffer[11] & 1) == 0) + cd->info.mask |= CDC_CD_R; + if ((buffer[11] & 2) == 0) + cd->info.mask |= CDC_CD_RW; + if ((buffer[12] & 1) == 0) + cd->info.mask |= CDC_PLAY_AUDIO; + if ((buffer[14] & 1) == 0) + cd->info.mask |= CDC_LOCK; + if ((buffer[14] & 8) == 0) + cd->info.mask |= CDC_OPEN_TRAY; + if ((buffer[14] >> 6) == 0) + cd->info.mask |= CDC_CLOSE_TRAY; + } +} + +static int pcd_detect(void) +{ + char id[18]; + int k, unit; + struct pcd_unit *cd; + + printk("%s: %s version %s, major %d, nice %d\n", + name, name, PCD_VERSION, major, nice); + + k = 0; + if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */ + cd = pcd; + if (pi_init(cd->pi, 1, -1, -1, -1, -1, -1, pcd_buffer, + PI_PCD, verbose, cd->name)) { + if (!pcd_probe(cd, -1, id) && cd->disk) { + cd->present = 1; + k++; + } else + pi_release(cd->pi); + } + } else { + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + int *conf = *drives[unit]; + if (!conf[D_PRT]) + continue; + if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD], + conf[D_UNI], conf[D_PRO], conf[D_DLY], + pcd_buffer, PI_PCD, verbose, cd->name)) + continue; + if (!pcd_probe(cd, conf[D_SLV], id) && cd->disk) { + cd->present = 1; + k++; + } else + pi_release(cd->pi); + } + } + if (k) + return 0; + + printk("%s: No CD-ROM drive found\n", name); + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) + put_disk(cd->disk); + return -1; +} + +/* I/O request processing */ +static struct request_queue *pcd_queue; + +static void do_pcd_request(struct request_queue * q) +{ + if (pcd_busy) + return; + while (1) { + pcd_req = elv_next_request(q); + if (!pcd_req) + return; + + if (rq_data_dir(pcd_req) == READ) { + struct pcd_unit *cd = pcd_req->rq_disk->private_data; + if (cd != pcd_current) + pcd_bufblk = -1; + pcd_current = cd; + pcd_sector = pcd_req->sector; + pcd_count = pcd_req->current_nr_sectors; + pcd_buf = pcd_req->buffer; + pcd_busy = 1; + ps_set_intr(do_pcd_read, NULL, 0, nice); + return; + } else + end_request(pcd_req, 0); + } +} + +static inline void next_request(int success) +{ + unsigned long saved_flags; + + spin_lock_irqsave(&pcd_lock, saved_flags); + end_request(pcd_req, success); + pcd_busy = 0; + do_pcd_request(pcd_queue); + spin_unlock_irqrestore(&pcd_lock, saved_flags); +} + +static int pcd_ready(void) +{ + return (((status_reg(pcd_current) & (IDE_BUSY | IDE_DRQ)) == IDE_DRQ)); +} + +static void pcd_transfer(void) +{ + + while (pcd_count && (pcd_sector / 4 == pcd_bufblk)) { + int o = (pcd_sector % 4) * 512; + memcpy(pcd_buf, pcd_buffer + o, 512); + pcd_count--; + pcd_buf += 512; + pcd_sector++; + } +} + +static void pcd_start(void) +{ + int b, i; + char rd_cmd[12] = { 0xa8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0 }; + + pcd_bufblk = pcd_sector / 4; + b = pcd_bufblk; + for (i = 0; i < 4; i++) { + rd_cmd[5 - i] = b & 0xff; + b = b >> 8; + } + + if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { + pcd_bufblk = -1; + next_request(0); + return; + } + + mdelay(1); + + ps_set_intr(do_pcd_read_drq, pcd_ready, PCD_TMO, nice); +} + +static void do_pcd_read(void) +{ + pcd_busy = 1; + pcd_retries = 0; + pcd_transfer(); + if (!pcd_count) { + next_request(1); + return; + } + + pi_do_claimed(pcd_current->pi, pcd_start); +} + +static void do_pcd_read_drq(void) +{ + unsigned long saved_flags; + + if (pcd_completion(pcd_current, pcd_buffer, "read block")) { + if (pcd_retries < PCD_RETRIES) { + mdelay(1); + pcd_retries++; + pi_do_claimed(pcd_current->pi, pcd_start); + return; + } + pcd_bufblk = -1; + next_request(0); + return; + } + + do_pcd_read(); + spin_lock_irqsave(&pcd_lock, saved_flags); + do_pcd_request(pcd_queue); + spin_unlock_irqrestore(&pcd_lock, saved_flags); +} + +/* the audio_ioctl stuff is adapted from sr_ioctl.c */ + +static int pcd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg) +{ + struct pcd_unit *cd = cdi->handle; + + switch (cmd) { + + case CDROMREADTOCHDR: + + { + char cmd[12] = + { GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12, + 0, 0, 0 }; + struct cdrom_tochdr *tochdr = + (struct cdrom_tochdr *) arg; + char buffer[32]; + int r; + + r = pcd_atapi(cd, cmd, 12, buffer, "read toc header"); + + tochdr->cdth_trk0 = buffer[2]; + tochdr->cdth_trk1 = buffer[3]; + + return r ? -EIO : 0; + } + + case CDROMREADTOCENTRY: + + { + char cmd[12] = + { GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12, + 0, 0, 0 }; + + struct cdrom_tocentry *tocentry = + (struct cdrom_tocentry *) arg; + unsigned char buffer[32]; + int r; + + cmd[1] = + (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0); + cmd[6] = tocentry->cdte_track; + + r = pcd_atapi(cd, cmd, 12, buffer, "read toc entry"); + + tocentry->cdte_ctrl = buffer[5] & 0xf; + tocentry->cdte_adr = buffer[5] >> 4; + tocentry->cdte_datamode = + (tocentry->cdte_ctrl & 0x04) ? 1 : 0; + if (tocentry->cdte_format == CDROM_MSF) { + tocentry->cdte_addr.msf.minute = buffer[9]; + tocentry->cdte_addr.msf.second = buffer[10]; + tocentry->cdte_addr.msf.frame = buffer[11]; + } else + tocentry->cdte_addr.lba = + (((((buffer[8] << 8) + buffer[9]) << 8) + + buffer[10]) << 8) + buffer[11]; + + return r ? -EIO : 0; + } + + default: + + return -ENOSYS; + } +} + +static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn) +{ + char cmd[12] = + { GPCMD_READ_SUBCHANNEL, 0, 0x40, 2, 0, 0, 0, 0, 24, 0, 0, 0 }; + char buffer[32]; + + if (pcd_atapi(cdi->handle, cmd, 24, buffer, "get mcn")) + return -EIO; + + memcpy(mcn->medium_catalog_number, buffer + 9, 13); + mcn->medium_catalog_number[13] = 0; + + return 0; +} + +static int __init pcd_init(void) +{ + struct pcd_unit *cd; + int unit; + + if (disable) + return -EINVAL; + + pcd_init_units(); + + if (pcd_detect()) + return -ENODEV; + + /* get the atapi capabilities page */ + pcd_probe_capabilities(); + + if (register_blkdev(major, name)) { + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) + put_disk(cd->disk); + return -EBUSY; + } + + pcd_queue = blk_init_queue(do_pcd_request, &pcd_lock); + if (!pcd_queue) { + unregister_blkdev(major, name); + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) + put_disk(cd->disk); + return -ENOMEM; + } + + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + if (cd->present) { + register_cdrom(&cd->info); + cd->disk->private_data = cd; + cd->disk->queue = pcd_queue; + add_disk(cd->disk); + } + } + + return 0; +} + +static void __exit pcd_exit(void) +{ + struct pcd_unit *cd; + int unit; + + for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { + if (cd->present) { + del_gendisk(cd->disk); + pi_release(cd->pi); + unregister_cdrom(&cd->info); + } + put_disk(cd->disk); + } + blk_cleanup_queue(pcd_queue); + unregister_blkdev(major, name); +} + +MODULE_LICENSE("GPL"); +module_init(pcd_init) +module_exit(pcd_exit) diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c new file mode 100644 index 0000000..9299455 --- /dev/null +++ b/drivers/block/paride/pd.c @@ -0,0 +1,946 @@ +/* + pd.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + This is the high-level driver for parallel port IDE hard + drives based on chips supported by the paride module. + + By default, the driver will autoprobe for a single parallel + port IDE drive, but if their individual parameters are + specified, the driver can handle up to 4 drives. + + The behaviour of the pd driver can be altered by setting + some parameters from the insmod command line. The following + parameters are adjustable: + + drive0 These four arguments can be arrays of + drive1 1-8 integers as follows: + drive2 + drive3 <prt>,<pro>,<uni>,<mod>,<geo>,<sby>,<dly>,<slv> + + Where, + + <prt> is the base of the parallel port address for + the corresponding drive. (required) + + <pro> is the protocol number for the adapter that + supports this drive. These numbers are + logged by 'paride' when the protocol modules + are initialised. (0 if not given) + + <uni> for those adapters that support chained + devices, this is the unit selector for the + chain of devices on the given port. It should + be zero for devices that don't support chaining. + (0 if not given) + + <mod> this can be -1 to choose the best mode, or one + of the mode numbers supported by the adapter. + (-1 if not given) + + <geo> this defaults to 0 to indicate that the driver + should use the CHS geometry provided by the drive + itself. If set to 1, the driver will provide + a logical geometry with 64 heads and 32 sectors + per track, to be consistent with most SCSI + drivers. (0 if not given) + + <sby> set this to zero to disable the power saving + standby mode, if needed. (1 if not given) + + <dly> some parallel ports require the driver to + go more slowly. -1 sets a default value that + should work with the chosen protocol. Otherwise, + set this to a small integer, the larger it is + the slower the port i/o. In some cases, setting + this to zero will speed up the device. (default -1) + + <slv> IDE disks can be jumpered to master or slave. + Set this to 0 to choose the master drive, 1 to + choose the slave, -1 (the default) to choose the + first drive found. + + + major You may use this parameter to overide the + default major number (45) that this driver + will use. Be sure to change the device + name as well. + + name This parameter is a character string that + contains the name the kernel will use for this + device (in /proc output, for instance). + (default "pd") + + cluster The driver will attempt to aggregate requests + for adjacent blocks into larger multi-block + clusters. The maximum cluster size (in 512 + byte sectors) is set with this parameter. + (default 64) + + verbose This parameter controls the amount of logging + that the driver will do. Set it to 0 for + normal operation, 1 to see autoprobe progress + messages, or 2 to see additional debugging + output. (default 0) + + nice This parameter controls the driver's use of + idle CPU time, at the expense of some speed. + + If this driver is built into the kernel, you can use kernel + the following command line parameters, with the same values + as the corresponding module parameters listed above: + + pd.drive0 + pd.drive1 + pd.drive2 + pd.drive3 + pd.cluster + pd.nice + + In addition, you can use the parameter pd.disable to disable + the driver entirely. + +*/ + +/* Changes: + + 1.01 GRG 1997.01.24 Restored pd_reset() + Added eject ioctl + 1.02 GRG 1998.05.06 SMP spinlock changes, + Added slave support + 1.03 GRG 1998.06.16 Eliminate an Ugh. + 1.04 GRG 1998.08.15 Extra debugging, use HZ in loop timing + 1.05 GRG 1998.09.24 Added jumbo support + +*/ + +#define PD_VERSION "1.05" +#define PD_MAJOR 45 +#define PD_NAME "pd" +#define PD_UNITS 4 + +/* Here are things one can override from the insmod command. + Most are autoprobed by paride unless set here. Verbose is off + by default. + +*/ + +static int verbose = 0; +static int major = PD_MAJOR; +static char *name = PD_NAME; +static int cluster = 64; +static int nice = 0; +static int disable = 0; + +static int drive0[8] = { 0, 0, 0, -1, 0, 1, -1, -1 }; +static int drive1[8] = { 0, 0, 0, -1, 0, 1, -1, -1 }; +static int drive2[8] = { 0, 0, 0, -1, 0, 1, -1, -1 }; +static int drive3[8] = { 0, 0, 0, -1, 0, 1, -1, -1 }; + +static int (*drives[4])[8] = {&drive0, &drive1, &drive2, &drive3}; + +enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV}; + +/* end of parameters */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/delay.h> +#include <linux/hdreg.h> +#include <linux/cdrom.h> /* for the eject ioctl */ +#include <linux/blkdev.h> +#include <linux/blkpg.h> +#include <linux/kernel.h> +#include <asm/uaccess.h> +#include <linux/workqueue.h> + +static DEFINE_SPINLOCK(pd_lock); + +module_param(verbose, bool, 0); +module_param(major, int, 0); +module_param(name, charp, 0); +module_param(cluster, int, 0); +module_param(nice, int, 0); +module_param_array(drive0, int, NULL, 0); +module_param_array(drive1, int, NULL, 0); +module_param_array(drive2, int, NULL, 0); +module_param_array(drive3, int, NULL, 0); + +#include "paride.h" + +#define PD_BITS 4 + +/* numbers for "SCSI" geometry */ + +#define PD_LOG_HEADS 64 +#define PD_LOG_SECTS 32 + +#define PD_ID_OFF 54 +#define PD_ID_LEN 14 + +#define PD_MAX_RETRIES 5 +#define PD_TMO 800 /* interrupt timeout in jiffies */ +#define PD_SPIN_DEL 50 /* spin delay in micro-seconds */ + +#define PD_SPIN (1000000*PD_TMO)/(HZ*PD_SPIN_DEL) + +#define STAT_ERR 0x00001 +#define STAT_INDEX 0x00002 +#define STAT_ECC 0x00004 +#define STAT_DRQ 0x00008 +#define STAT_SEEK 0x00010 +#define STAT_WRERR 0x00020 +#define STAT_READY 0x00040 +#define STAT_BUSY 0x00080 + +#define ERR_AMNF 0x00100 +#define ERR_TK0NF 0x00200 +#define ERR_ABRT 0x00400 +#define ERR_MCR 0x00800 +#define ERR_IDNF 0x01000 +#define ERR_MC 0x02000 +#define ERR_UNC 0x04000 +#define ERR_TMO 0x10000 + +#define IDE_READ 0x20 +#define IDE_WRITE 0x30 +#define IDE_READ_VRFY 0x40 +#define IDE_INIT_DEV_PARMS 0x91 +#define IDE_STANDBY 0x96 +#define IDE_ACKCHANGE 0xdb +#define IDE_DOORLOCK 0xde +#define IDE_DOORUNLOCK 0xdf +#define IDE_IDENTIFY 0xec +#define IDE_EJECT 0xed + +#define PD_NAMELEN 8 + +struct pd_unit { + struct pi_adapter pia; /* interface to paride layer */ + struct pi_adapter *pi; + int access; /* count of active opens ... */ + int capacity; /* Size of this volume in sectors */ + int heads; /* physical geometry */ + int sectors; + int cylinders; + int can_lba; + int drive; /* master=0 slave=1 */ + int changed; /* Have we seen a disk change ? */ + int removable; /* removable media device ? */ + int standby; + int alt_geom; + char name[PD_NAMELEN]; /* pda, pdb, etc ... */ + struct gendisk *gd; +}; + +static struct pd_unit pd[PD_UNITS]; + +static char pd_scratch[512]; /* scratch block buffer */ + +static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR", + "READY", "BUSY", "AMNF", "TK0NF", "ABRT", "MCR", + "IDNF", "MC", "UNC", "???", "TMO" +}; + +static inline int status_reg(struct pd_unit *disk) +{ + return pi_read_regr(disk->pi, 1, 6); +} + +static inline int read_reg(struct pd_unit *disk, int reg) +{ + return pi_read_regr(disk->pi, 0, reg); +} + +static inline void write_status(struct pd_unit *disk, int val) +{ + pi_write_regr(disk->pi, 1, 6, val); +} + +static inline void write_reg(struct pd_unit *disk, int reg, int val) +{ + pi_write_regr(disk->pi, 0, reg, val); +} + +static inline u8 DRIVE(struct pd_unit *disk) +{ + return 0xa0+0x10*disk->drive; +} + +/* ide command interface */ + +static void pd_print_error(struct pd_unit *disk, char *msg, int status) +{ + int i; + + printk("%s: %s: status = 0x%x =", disk->name, msg, status); + for (i = 0; i < ARRAY_SIZE(pd_errs); i++) + if (status & (1 << i)) + printk(" %s", pd_errs[i]); + printk("\n"); +} + +static void pd_reset(struct pd_unit *disk) +{ /* called only for MASTER drive */ + write_status(disk, 4); + udelay(50); + write_status(disk, 0); + udelay(250); +} + +#define DBMSG(msg) ((verbose>1)?(msg):NULL) + +static int pd_wait_for(struct pd_unit *disk, int w, char *msg) +{ /* polled wait */ + int k, r, e; + + k = 0; + while (k < PD_SPIN) { + r = status_reg(disk); + k++; + if (((r & w) == w) && !(r & STAT_BUSY)) + break; + udelay(PD_SPIN_DEL); + } + e = (read_reg(disk, 1) << 8) + read_reg(disk, 7); + if (k >= PD_SPIN) + e |= ERR_TMO; + if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL)) + pd_print_error(disk, msg, e); + return e; +} + +static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func) +{ + write_reg(disk, 6, DRIVE(disk) + h); + write_reg(disk, 1, 0); /* the IDE task file */ + write_reg(disk, 2, n); + write_reg(disk, 3, s); + write_reg(disk, 4, c0); + write_reg(disk, 5, c1); + write_reg(disk, 7, func); + + udelay(1); +} + +static void pd_ide_command(struct pd_unit *disk, int func, int block, int count) +{ + int c1, c0, h, s; + + if (disk->can_lba) { + s = block & 255; + c0 = (block >>= 8) & 255; + c1 = (block >>= 8) & 255; + h = ((block >>= 8) & 15) + 0x40; + } else { + s = (block % disk->sectors) + 1; + h = (block /= disk->sectors) % disk->heads; + c0 = (block /= disk->heads) % 256; + c1 = (block >>= 8); + } + pd_send_command(disk, count, s, h, c0, c1, func); +} + +/* The i/o request engine */ + +enum action {Fail = 0, Ok = 1, Hold, Wait}; + +static struct request *pd_req; /* current request */ +static enum action (*phase)(void); + +static void run_fsm(void); + +static void ps_tq_int(struct work_struct *work); + +static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int); + +static void schedule_fsm(void) +{ + if (!nice) + schedule_delayed_work(&fsm_tq, 0); + else + schedule_delayed_work(&fsm_tq, nice-1); +} + +static void ps_tq_int(struct work_struct *work) +{ + run_fsm(); +} + +static enum action do_pd_io_start(void); +static enum action pd_special(void); +static enum action do_pd_read_start(void); +static enum action do_pd_write_start(void); +static enum action do_pd_read_drq(void); +static enum action do_pd_write_done(void); + +static struct request_queue *pd_queue; +static int pd_claimed; + +static struct pd_unit *pd_current; /* current request's drive */ +static PIA *pi_current; /* current request's PIA */ + +static void run_fsm(void) +{ + while (1) { + enum action res; + unsigned long saved_flags; + int stop = 0; + + if (!phase) { + pd_current = pd_req->rq_disk->private_data; + pi_current = pd_current->pi; + phase = do_pd_io_start; + } + + switch (pd_claimed) { + case 0: + pd_claimed = 1; + if (!pi_schedule_claimed(pi_current, run_fsm)) + return; + case 1: + pd_claimed = 2; + pi_current->proto->connect(pi_current); + } + + switch(res = phase()) { + case Ok: case Fail: + pi_disconnect(pi_current); + pd_claimed = 0; + phase = NULL; + spin_lock_irqsave(&pd_lock, saved_flags); + end_request(pd_req, res); + pd_req = elv_next_request(pd_queue); + if (!pd_req) + stop = 1; + spin_unlock_irqrestore(&pd_lock, saved_flags); + if (stop) + return; + case Hold: + schedule_fsm(); + return; + case Wait: + pi_disconnect(pi_current); + pd_claimed = 0; + } + } +} + +static int pd_retries = 0; /* i/o error retry count */ +static int pd_block; /* address of next requested block */ +static int pd_count; /* number of blocks still to do */ +static int pd_run; /* sectors in current cluster */ +static int pd_cmd; /* current command READ/WRITE */ +static char *pd_buf; /* buffer for request in progress */ + +static enum action do_pd_io_start(void) +{ + if (blk_special_request(pd_req)) { + phase = pd_special; + return pd_special(); + } + + pd_cmd = rq_data_dir(pd_req); + if (pd_cmd == READ || pd_cmd == WRITE) { + pd_block = pd_req->sector; + pd_count = pd_req->current_nr_sectors; + if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) + return Fail; + pd_run = pd_req->nr_sectors; + pd_buf = pd_req->buffer; + pd_retries = 0; + if (pd_cmd == READ) + return do_pd_read_start(); + else + return do_pd_write_start(); + } + return Fail; +} + +static enum action pd_special(void) +{ + enum action (*func)(struct pd_unit *) = pd_req->special; + return func(pd_current); +} + +static int pd_next_buf(void) +{ + unsigned long saved_flags; + + pd_count--; + pd_run--; + pd_buf += 512; + pd_block++; + if (!pd_run) + return 1; + if (pd_count) + return 0; + spin_lock_irqsave(&pd_lock, saved_flags); + end_request(pd_req, 1); + pd_count = pd_req->current_nr_sectors; + pd_buf = pd_req->buffer; + spin_unlock_irqrestore(&pd_lock, saved_flags); + return 0; +} + +static unsigned long pd_timeout; + +static enum action do_pd_read_start(void) +{ + if (pd_wait_for(pd_current, STAT_READY, "do_pd_read") & STAT_ERR) { + if (pd_retries < PD_MAX_RETRIES) { + pd_retries++; + return Wait; + } + return Fail; + } + pd_ide_command(pd_current, IDE_READ, pd_block, pd_run); + phase = do_pd_read_drq; + pd_timeout = jiffies + PD_TMO; + return Hold; +} + +static enum action do_pd_write_start(void) +{ + if (pd_wait_for(pd_current, STAT_READY, "do_pd_write") & STAT_ERR) { + if (pd_retries < PD_MAX_RETRIES) { + pd_retries++; + return Wait; + } + return Fail; + } + pd_ide_command(pd_current, IDE_WRITE, pd_block, pd_run); + while (1) { + if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_write_drq") & STAT_ERR) { + if (pd_retries < PD_MAX_RETRIES) { + pd_retries++; + return Wait; + } + return Fail; + } + pi_write_block(pd_current->pi, pd_buf, 512); + if (pd_next_buf()) + break; + } + phase = do_pd_write_done; + pd_timeout = jiffies + PD_TMO; + return Hold; +} + +static inline int pd_ready(void) +{ + return !(status_reg(pd_current) & STAT_BUSY); +} + +static enum action do_pd_read_drq(void) +{ + if (!pd_ready() && !time_after_eq(jiffies, pd_timeout)) + return Hold; + + while (1) { + if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_read_drq") & STAT_ERR) { + if (pd_retries < PD_MAX_RETRIES) { + pd_retries++; + phase = do_pd_read_start; + return Wait; + } + return Fail; + } + pi_read_block(pd_current->pi, pd_buf, 512); + if (pd_next_buf()) + break; + } + return Ok; +} + +static enum action do_pd_write_done(void) +{ + if (!pd_ready() && !time_after_eq(jiffies, pd_timeout)) + return Hold; + + if (pd_wait_for(pd_current, STAT_READY, "do_pd_write_done") & STAT_ERR) { + if (pd_retries < PD_MAX_RETRIES) { + pd_retries++; + phase = do_pd_write_start; + return Wait; + } + return Fail; + } + return Ok; +} + +/* special io requests */ + +/* According to the ATA standard, the default CHS geometry should be + available following a reset. Some Western Digital drives come up + in a mode where only LBA addresses are accepted until the device + parameters are initialised. +*/ + +static void pd_init_dev_parms(struct pd_unit *disk) +{ + pd_wait_for(disk, 0, DBMSG("before init_dev_parms")); + pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0, + IDE_INIT_DEV_PARMS); + udelay(300); + pd_wait_for(disk, 0, "Initialise device parameters"); +} + +static enum action pd_door_lock(struct pd_unit *disk) +{ + if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) { + pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORLOCK); + pd_wait_for(disk, STAT_READY, "Lock done"); + } + return Ok; +} + +static enum action pd_door_unlock(struct pd_unit *disk) +{ + if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) { + pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK); + pd_wait_for(disk, STAT_READY, "Lock done"); + } + return Ok; +} + +static enum action pd_eject(struct pd_unit *disk) +{ + pd_wait_for(disk, 0, DBMSG("before unlock on eject")); + pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK); + pd_wait_for(disk, 0, DBMSG("after unlock on eject")); + pd_wait_for(disk, 0, DBMSG("before eject")); + pd_send_command(disk, 0, 0, 0, 0, 0, IDE_EJECT); + pd_wait_for(disk, 0, DBMSG("after eject")); + return Ok; +} + +static enum action pd_media_check(struct pd_unit *disk) +{ + int r = pd_wait_for(disk, STAT_READY, DBMSG("before media_check")); + if (!(r & STAT_ERR)) { + pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY); + r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after READ_VRFY")); + } else + disk->changed = 1; /* say changed if other error */ + if (r & ERR_MC) { + disk->changed = 1; + pd_send_command(disk, 1, 0, 0, 0, 0, IDE_ACKCHANGE); + pd_wait_for(disk, STAT_READY, DBMSG("RDY after ACKCHANGE")); + pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY); + r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after VRFY")); + } + return Ok; +} + +static void pd_standby_off(struct pd_unit *disk) +{ + pd_wait_for(disk, 0, DBMSG("before STANDBY")); + pd_send_command(disk, 0, 0, 0, 0, 0, IDE_STANDBY); + pd_wait_for(disk, 0, DBMSG("after STANDBY")); +} + +static enum action pd_identify(struct pd_unit *disk) +{ + int j; + char id[PD_ID_LEN + 1]; + +/* WARNING: here there may be dragons. reset() applies to both drives, + but we call it only on probing the MASTER. This should allow most + common configurations to work, but be warned that a reset can clear + settings on the SLAVE drive. +*/ + + if (disk->drive == 0) + pd_reset(disk); + + write_reg(disk, 6, DRIVE(disk)); + pd_wait_for(disk, 0, DBMSG("before IDENT")); + pd_send_command(disk, 1, 0, 0, 0, 0, IDE_IDENTIFY); + + if (pd_wait_for(disk, STAT_DRQ, DBMSG("IDENT DRQ")) & STAT_ERR) + return Fail; + pi_read_block(disk->pi, pd_scratch, 512); + disk->can_lba = pd_scratch[99] & 2; + disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12)); + disk->heads = le16_to_cpu(*(__le16 *) (pd_scratch + 6)); + disk->cylinders = le16_to_cpu(*(__le16 *) (pd_scratch + 2)); + if (disk->can_lba) + disk->capacity = le32_to_cpu(*(__le32 *) (pd_scratch + 120)); + else + disk->capacity = disk->sectors * disk->heads * disk->cylinders; + + for (j = 0; j < PD_ID_LEN; j++) + id[j ^ 1] = pd_scratch[j + PD_ID_OFF]; + j = PD_ID_LEN - 1; + while ((j >= 0) && (id[j] <= 0x20)) + j--; + j++; + id[j] = 0; + + disk->removable = pd_scratch[0] & 0x80; + + printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n", + disk->name, id, + disk->drive ? "slave" : "master", + disk->capacity, disk->capacity / 2048, + disk->cylinders, disk->heads, disk->sectors, + disk->removable ? "removable" : "fixed"); + + if (disk->capacity) + pd_init_dev_parms(disk); + if (!disk->standby) + pd_standby_off(disk); + + return Ok; +} + +/* end of io request engine */ + +static void do_pd_request(struct request_queue * q) +{ + if (pd_req) + return; + pd_req = elv_next_request(q); + if (!pd_req) + return; + + schedule_fsm(); +} + +static int pd_special_command(struct pd_unit *disk, + enum action (*func)(struct pd_unit *disk)) +{ + struct request *rq; + int err = 0; + + rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT); + + rq->cmd_type = REQ_TYPE_SPECIAL; + rq->special = func; + + err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0); + + blk_put_request(rq); + return err; +} + +/* kernel glue structures */ + +static int pd_open(struct block_device *bdev, fmode_t mode) +{ + struct pd_unit *disk = bdev->bd_disk->private_data; + + disk->access++; + + if (disk->removable) { + pd_special_command(disk, pd_media_check); + pd_special_command(disk, pd_door_lock); + } + return 0; +} + +static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct pd_unit *disk = bdev->bd_disk->private_data; + + if (disk->alt_geom) { + geo->heads = PD_LOG_HEADS; + geo->sectors = PD_LOG_SECTS; + geo->cylinders = disk->capacity / (geo->heads * geo->sectors); + } else { + geo->heads = disk->heads; + geo->sectors = disk->sectors; + geo->cylinders = disk->cylinders; + } + + return 0; +} + +static int pd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct pd_unit *disk = bdev->bd_disk->private_data; + + switch (cmd) { + case CDROMEJECT: + if (disk->access == 1) + pd_special_command(disk, pd_eject); + return 0; + default: + return -EINVAL; + } +} + +static int pd_release(struct gendisk *p, fmode_t mode) +{ + struct pd_unit *disk = p->private_data; + + if (!--disk->access && disk->removable) + pd_special_command(disk, pd_door_unlock); + + return 0; +} + +static int pd_check_media(struct gendisk *p) +{ + struct pd_unit *disk = p->private_data; + int r; + if (!disk->removable) + return 0; + pd_special_command(disk, pd_media_check); + r = disk->changed; + disk->changed = 0; + return r; +} + +static int pd_revalidate(struct gendisk *p) +{ + struct pd_unit *disk = p->private_data; + if (pd_special_command(disk, pd_identify) == 0) + set_capacity(p, disk->capacity); + else + set_capacity(p, 0); + return 0; +} + +static struct block_device_operations pd_fops = { + .owner = THIS_MODULE, + .open = pd_open, + .release = pd_release, + .locked_ioctl = pd_ioctl, + .getgeo = pd_getgeo, + .media_changed = pd_check_media, + .revalidate_disk= pd_revalidate +}; + +/* probing */ + +static void pd_probe_drive(struct pd_unit *disk) +{ + struct gendisk *p = alloc_disk(1 << PD_BITS); + if (!p) + return; + strcpy(p->disk_name, disk->name); + p->fops = &pd_fops; + p->major = major; + p->first_minor = (disk - pd) << PD_BITS; + disk->gd = p; + p->private_data = disk; + p->queue = pd_queue; + + if (disk->drive == -1) { + for (disk->drive = 0; disk->drive <= 1; disk->drive++) + if (pd_special_command(disk, pd_identify) == 0) + return; + } else if (pd_special_command(disk, pd_identify) == 0) + return; + disk->gd = NULL; + put_disk(p); +} + +static int pd_detect(void) +{ + int found = 0, unit, pd_drive_count = 0; + struct pd_unit *disk; + + for (unit = 0; unit < PD_UNITS; unit++) { + int *parm = *drives[unit]; + struct pd_unit *disk = pd + unit; + disk->pi = &disk->pia; + disk->access = 0; + disk->changed = 1; + disk->capacity = 0; + disk->drive = parm[D_SLV]; + snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit); + disk->alt_geom = parm[D_GEO]; + disk->standby = parm[D_SBY]; + if (parm[D_PRT]) + pd_drive_count++; + } + + if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */ + disk = pd; + if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch, + PI_PD, verbose, disk->name)) { + pd_probe_drive(disk); + if (!disk->gd) + pi_release(disk->pi); + } + + } else { + for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) { + int *parm = *drives[unit]; + if (!parm[D_PRT]) + continue; + if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD], + parm[D_UNI], parm[D_PRO], parm[D_DLY], + pd_scratch, PI_PD, verbose, disk->name)) { + pd_probe_drive(disk); + if (!disk->gd) + pi_release(disk->pi); + } + } + } + for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) { + if (disk->gd) { + set_capacity(disk->gd, disk->capacity); + add_disk(disk->gd); + found = 1; + } + } + if (!found) + printk("%s: no valid drive found\n", name); + return found; +} + +static int __init pd_init(void) +{ + if (disable) + goto out1; + + pd_queue = blk_init_queue(do_pd_request, &pd_lock); + if (!pd_queue) + goto out1; + + blk_queue_max_sectors(pd_queue, cluster); + + if (register_blkdev(major, name)) + goto out2; + + printk("%s: %s version %s, major %d, cluster %d, nice %d\n", + name, name, PD_VERSION, major, cluster, nice); + if (!pd_detect()) + goto out3; + + return 0; + +out3: + unregister_blkdev(major, name); +out2: + blk_cleanup_queue(pd_queue); +out1: + return -ENODEV; +} + +static void __exit pd_exit(void) +{ + struct pd_unit *disk; + int unit; + unregister_blkdev(major, name); + for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) { + struct gendisk *p = disk->gd; + if (p) { + disk->gd = NULL; + del_gendisk(p); + put_disk(p); + pi_release(disk->pi); + } + } + blk_cleanup_queue(pd_queue); +} + +MODULE_LICENSE("GPL"); +module_init(pd_init) +module_exit(pd_exit) diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c new file mode 100644 index 0000000..bef3b99 --- /dev/null +++ b/drivers/block/paride/pf.c @@ -0,0 +1,992 @@ +/* + pf.c (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + This is the high-level driver for parallel port ATAPI disk + drives based on chips supported by the paride module. + + By default, the driver will autoprobe for a single parallel + port ATAPI disk drive, but if their individual parameters are + specified, the driver can handle up to 4 drives. + + The behaviour of the pf driver can be altered by setting + some parameters from the insmod command line. The following + parameters are adjustable: + + drive0 These four arguments can be arrays of + drive1 1-7 integers as follows: + drive2 + drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<lun>,<dly> + + Where, + + <prt> is the base of the parallel port address for + the corresponding drive. (required) + + <pro> is the protocol number for the adapter that + supports this drive. These numbers are + logged by 'paride' when the protocol modules + are initialised. (0 if not given) + + <uni> for those adapters that support chained + devices, this is the unit selector for the + chain of devices on the given port. It should + be zero for devices that don't support chaining. + (0 if not given) + + <mod> this can be -1 to choose the best mode, or one + of the mode numbers supported by the adapter. + (-1 if not given) + + <slv> ATAPI CDroms can be jumpered to master or slave. + Set this to 0 to choose the master drive, 1 to + choose the slave, -1 (the default) to choose the + first drive found. + + <lun> Some ATAPI devices support multiple LUNs. + One example is the ATAPI PD/CD drive from + Matshita/Panasonic. This device has a + CD drive on LUN 0 and a PD drive on LUN 1. + By default, the driver will search for the + first LUN with a supported device. Set + this parameter to force it to use a specific + LUN. (default -1) + + <dly> some parallel ports require the driver to + go more slowly. -1 sets a default value that + should work with the chosen protocol. Otherwise, + set this to a small integer, the larger it is + the slower the port i/o. In some cases, setting + this to zero will speed up the device. (default -1) + + major You may use this parameter to overide the + default major number (47) that this driver + will use. Be sure to change the device + name as well. + + name This parameter is a character string that + contains the name the kernel will use for this + device (in /proc output, for instance). + (default "pf"). + + cluster The driver will attempt to aggregate requests + for adjacent blocks into larger multi-block + clusters. The maximum cluster size (in 512 + byte sectors) is set with this parameter. + (default 64) + + verbose This parameter controls the amount of logging + that the driver will do. Set it to 0 for + normal operation, 1 to see autoprobe progress + messages, or 2 to see additional debugging + output. (default 0) + + nice This parameter controls the driver's use of + idle CPU time, at the expense of some speed. + + If this driver is built into the kernel, you can use the + following command line parameters, with the same values + as the corresponding module parameters listed above: + + pf.drive0 + pf.drive1 + pf.drive2 + pf.drive3 + pf.cluster + pf.nice + + In addition, you can use the parameter pf.disable to disable + the driver entirely. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.03 Changes for SMP. Eliminate sti(). + Fix for drives that don't clear STAT_ERR + until after next CDB delivered. + Small change in pf_completion to round + up transfer size. + 1.02 GRG 1998.06.16 Eliminated an Ugh + 1.03 GRG 1998.08.16 Use HZ in loop timings, extra debugging + 1.04 GRG 1998.09.24 Added jumbo support + +*/ + +#define PF_VERSION "1.04" +#define PF_MAJOR 47 +#define PF_NAME "pf" +#define PF_UNITS 4 + +/* Here are things one can override from the insmod command. + Most are autoprobed by paride unless set here. Verbose is off + by default. + +*/ + +static int verbose = 0; +static int major = PF_MAJOR; +static char *name = PF_NAME; +static int cluster = 64; +static int nice = 0; +static int disable = 0; + +static int drive0[7] = { 0, 0, 0, -1, -1, -1, -1 }; +static int drive1[7] = { 0, 0, 0, -1, -1, -1, -1 }; +static int drive2[7] = { 0, 0, 0, -1, -1, -1, -1 }; +static int drive3[7] = { 0, 0, 0, -1, -1, -1, -1 }; + +static int (*drives[4])[7] = {&drive0, &drive1, &drive2, &drive3}; +static int pf_drive_count; + +enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY}; + +/* end of parameters */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/delay.h> +#include <linux/hdreg.h> +#include <linux/cdrom.h> +#include <linux/spinlock.h> +#include <linux/blkdev.h> +#include <linux/blkpg.h> +#include <asm/uaccess.h> + +static DEFINE_SPINLOCK(pf_spin_lock); + +module_param(verbose, bool, 0644); +module_param(major, int, 0); +module_param(name, charp, 0); +module_param(cluster, int, 0); +module_param(nice, int, 0); +module_param_array(drive0, int, NULL, 0); +module_param_array(drive1, int, NULL, 0); +module_param_array(drive2, int, NULL, 0); +module_param_array(drive3, int, NULL, 0); + +#include "paride.h" +#include "pseudo.h" + +/* constants for faking geometry numbers */ + +#define PF_FD_MAX 8192 /* use FD geometry under this size */ +#define PF_FD_HDS 2 +#define PF_FD_SPT 18 +#define PF_HD_HDS 64 +#define PF_HD_SPT 32 + +#define PF_MAX_RETRIES 5 +#define PF_TMO 800 /* interrupt timeout in jiffies */ +#define PF_SPIN_DEL 50 /* spin delay in micro-seconds */ + +#define PF_SPIN (1000000*PF_TMO)/(HZ*PF_SPIN_DEL) + +#define STAT_ERR 0x00001 +#define STAT_INDEX 0x00002 +#define STAT_ECC 0x00004 +#define STAT_DRQ 0x00008 +#define STAT_SEEK 0x00010 +#define STAT_WRERR 0x00020 +#define STAT_READY 0x00040 +#define STAT_BUSY 0x00080 + +#define ATAPI_REQ_SENSE 0x03 +#define ATAPI_LOCK 0x1e +#define ATAPI_DOOR 0x1b +#define ATAPI_MODE_SENSE 0x5a +#define ATAPI_CAPACITY 0x25 +#define ATAPI_IDENTIFY 0x12 +#define ATAPI_READ_10 0x28 +#define ATAPI_WRITE_10 0x2a + +static int pf_open(struct block_device *bdev, fmode_t mode); +static void do_pf_request(struct request_queue * q); +static int pf_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg); +static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo); + +static int pf_release(struct gendisk *disk, fmode_t mode); + +static int pf_detect(void); +static void do_pf_read(void); +static void do_pf_read_start(void); +static void do_pf_write(void); +static void do_pf_write_start(void); +static void do_pf_read_drq(void); +static void do_pf_write_done(void); + +#define PF_NM 0 +#define PF_RO 1 +#define PF_RW 2 + +#define PF_NAMELEN 8 + +struct pf_unit { + struct pi_adapter pia; /* interface to paride layer */ + struct pi_adapter *pi; + int removable; /* removable media device ? */ + int media_status; /* media present ? WP ? */ + int drive; /* drive */ + int lun; + int access; /* count of active opens ... */ + int present; /* device present ? */ + char name[PF_NAMELEN]; /* pf0, pf1, ... */ + struct gendisk *disk; +}; + +static struct pf_unit units[PF_UNITS]; + +static int pf_identify(struct pf_unit *pf); +static void pf_lock(struct pf_unit *pf, int func); +static void pf_eject(struct pf_unit *pf); +static int pf_check_media(struct gendisk *disk); + +static char pf_scratch[512]; /* scratch block buffer */ + +/* the variables below are used mainly in the I/O request engine, which + processes only one request at a time. +*/ + +static int pf_retries = 0; /* i/o error retry count */ +static int pf_busy = 0; /* request being processed ? */ +static struct request *pf_req; /* current request */ +static int pf_block; /* address of next requested block */ +static int pf_count; /* number of blocks still to do */ +static int pf_run; /* sectors in current cluster */ +static int pf_cmd; /* current command READ/WRITE */ +static struct pf_unit *pf_current;/* unit of current request */ +static int pf_mask; /* stopper for pseudo-int */ +static char *pf_buf; /* buffer for request in progress */ + +/* kernel glue structures */ + +static struct block_device_operations pf_fops = { + .owner = THIS_MODULE, + .open = pf_open, + .release = pf_release, + .locked_ioctl = pf_ioctl, + .getgeo = pf_getgeo, + .media_changed = pf_check_media, +}; + +static void __init pf_init_units(void) +{ + struct pf_unit *pf; + int unit; + + pf_drive_count = 0; + for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) { + struct gendisk *disk = alloc_disk(1); + if (!disk) + continue; + pf->disk = disk; + pf->pi = &pf->pia; + pf->media_status = PF_NM; + pf->drive = (*drives[unit])[D_SLV]; + pf->lun = (*drives[unit])[D_LUN]; + snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit); + disk->major = major; + disk->first_minor = unit; + strcpy(disk->disk_name, pf->name); + disk->fops = &pf_fops; + if (!(*drives[unit])[D_PRT]) + pf_drive_count++; + } +} + +static int pf_open(struct block_device *bdev, fmode_t mode) +{ + struct pf_unit *pf = bdev->bd_disk->private_data; + + pf_identify(pf); + + if (pf->media_status == PF_NM) + return -ENODEV; + + if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE)) + return -EROFS; + + pf->access++; + if (pf->removable) + pf_lock(pf, 1); + + return 0; +} + +static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct pf_unit *pf = bdev->bd_disk->private_data; + sector_t capacity = get_capacity(pf->disk); + + if (capacity < PF_FD_MAX) { + geo->cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT); + geo->heads = PF_FD_HDS; + geo->sectors = PF_FD_SPT; + } else { + geo->cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT); + geo->heads = PF_HD_HDS; + geo->sectors = PF_HD_SPT; + } + + return 0; +} + +static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) +{ + struct pf_unit *pf = bdev->bd_disk->private_data; + + if (cmd != CDROMEJECT) + return -EINVAL; + + if (pf->access != 1) + return -EBUSY; + pf_eject(pf); + return 0; +} + +static int pf_release(struct gendisk *disk, fmode_t mode) +{ + struct pf_unit *pf = disk->private_data; + + if (pf->access <= 0) + return -EINVAL; + + pf->access--; + + if (!pf->access && pf->removable) + pf_lock(pf, 0); + + return 0; + +} + +static int pf_check_media(struct gendisk *disk) +{ + return 1; +} + +static inline int status_reg(struct pf_unit *pf) +{ + return pi_read_regr(pf->pi, 1, 6); +} + +static inline int read_reg(struct pf_unit *pf, int reg) +{ + return pi_read_regr(pf->pi, 0, reg); +} + +static inline void write_reg(struct pf_unit *pf, int reg, int val) +{ + pi_write_regr(pf->pi, 0, reg, val); +} + +static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg) +{ + int j, r, e, s, p; + + j = 0; + while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop)))) + && (j++ < PF_SPIN)) + udelay(PF_SPIN_DEL); + + if ((r & (STAT_ERR & stop)) || (j >= PF_SPIN)) { + s = read_reg(pf, 7); + e = read_reg(pf, 1); + p = read_reg(pf, 2); + if (j >= PF_SPIN) + e |= 0x100; + if (fun) + printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" + " loop=%d phase=%d\n", + pf->name, fun, msg, r, s, e, j, p); + return (e << 8) + s; + } + return 0; +} + +static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun) +{ + pi_connect(pf->pi); + + write_reg(pf, 6, 0xa0+0x10*pf->drive); + + if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) { + pi_disconnect(pf->pi); + return -1; + } + + write_reg(pf, 4, dlen % 256); + write_reg(pf, 5, dlen / 256); + write_reg(pf, 7, 0xa0); /* ATAPI packet command */ + + if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) { + pi_disconnect(pf->pi); + return -1; + } + + if (read_reg(pf, 2) != 1) { + printk("%s: %s: command phase error\n", pf->name, fun); + pi_disconnect(pf->pi); + return -1; + } + + pi_write_block(pf->pi, cmd, 12); + + return 0; +} + +static int pf_completion(struct pf_unit *pf, char *buf, char *fun) +{ + int r, s, n; + + r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR, + fun, "completion"); + + if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) { + n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) + + 3) & 0xfffc); + pi_read_block(pf->pi, buf, n); + } + + s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done"); + + pi_disconnect(pf->pi); + + return (r ? r : s); +} + +static void pf_req_sense(struct pf_unit *pf, int quiet) +{ + char rs_cmd[12] = + { ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 }; + char buf[16]; + int r; + + r = pf_command(pf, rs_cmd, 16, "Request sense"); + mdelay(1); + if (!r) + pf_completion(pf, buf, "Request sense"); + + if ((!r) && (!quiet)) + printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n", + pf->name, buf[2] & 0xf, buf[12], buf[13]); +} + +static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun) +{ + int r; + + r = pf_command(pf, cmd, dlen, fun); + mdelay(1); + if (!r) + r = pf_completion(pf, buf, fun); + if (r) + pf_req_sense(pf, !fun); + + return r; +} + +static void pf_lock(struct pf_unit *pf, int func) +{ + char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 }; + + pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock"); +} + +static void pf_eject(struct pf_unit *pf) +{ + char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 }; + + pf_lock(pf, 0); + pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject"); +} + +#define PF_RESET_TMO 30 /* in tenths of a second */ + +static void pf_sleep(int cs) +{ + schedule_timeout_interruptible(cs); +} + +/* the ATAPI standard actually specifies the contents of all 7 registers + after a reset, but the specification is ambiguous concerning the last + two bytes, and different drives interpret the standard differently. + */ + +static int pf_reset(struct pf_unit *pf) +{ + int i, k, flg; + int expect[5] = { 1, 1, 1, 0x14, 0xeb }; + + pi_connect(pf->pi); + write_reg(pf, 6, 0xa0+0x10*pf->drive); + write_reg(pf, 7, 8); + + pf_sleep(20 * HZ / 1000); + + k = 0; + while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY)) + pf_sleep(HZ / 10); + + flg = 1; + for (i = 0; i < 5; i++) + flg &= (read_reg(pf, i + 1) == expect[i]); + + if (verbose) { + printk("%s: Reset (%d) signature = ", pf->name, k); + for (i = 0; i < 5; i++) + printk("%3x", read_reg(pf, i + 1)); + if (!flg) + printk(" (incorrect)"); + printk("\n"); + } + + pi_disconnect(pf->pi); + return flg - 1; +} + +static void pf_mode_sense(struct pf_unit *pf) +{ + char ms_cmd[12] = + { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 }; + char buf[8]; + + pf_atapi(pf, ms_cmd, 8, buf, "mode sense"); + pf->media_status = PF_RW; + if (buf[3] & 0x80) + pf->media_status = PF_RO; +} + +static void xs(char *buf, char *targ, int offs, int len) +{ + int j, k, l; + + j = 0; + l = 0; + for (k = 0; k < len; k++) + if ((buf[k + offs] != 0x20) || (buf[k + offs] != l)) + l = targ[j++] = buf[k + offs]; + if (l == 0x20) + j--; + targ[j] = 0; +} + +static int xl(char *buf, int offs) +{ + int v, k; + + v = 0; + for (k = 0; k < 4; k++) + v = v * 256 + (buf[k + offs] & 0xff); + return v; +} + +static void pf_get_capacity(struct pf_unit *pf) +{ + char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + char buf[8]; + int bs; + + if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) { + pf->media_status = PF_NM; + return; + } + set_capacity(pf->disk, xl(buf, 0) + 1); + bs = xl(buf, 4); + if (bs != 512) { + set_capacity(pf->disk, 0); + if (verbose) + printk("%s: Drive %d, LUN %d," + " unsupported block size %d\n", + pf->name, pf->drive, pf->lun, bs); + } +} + +static int pf_identify(struct pf_unit *pf) +{ + int dt, s; + char *ms[2] = { "master", "slave" }; + char mf[10], id[18]; + char id_cmd[12] = + { ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 }; + char buf[36]; + + s = pf_atapi(pf, id_cmd, 36, buf, "identify"); + if (s) + return -1; + + dt = buf[0] & 0x1f; + if ((dt != 0) && (dt != 7)) { + if (verbose) + printk("%s: Drive %d, LUN %d, unsupported type %d\n", + pf->name, pf->drive, pf->lun, dt); + return -1; + } + + xs(buf, mf, 8, 8); + xs(buf, id, 16, 16); + + pf->removable = (buf[1] & 0x80); + + pf_mode_sense(pf); + pf_mode_sense(pf); + pf_mode_sense(pf); + + pf_get_capacity(pf); + + printk("%s: %s %s, %s LUN %d, type %d", + pf->name, mf, id, ms[pf->drive], pf->lun, dt); + if (pf->removable) + printk(", removable"); + if (pf->media_status == PF_NM) + printk(", no media\n"); + else { + if (pf->media_status == PF_RO) + printk(", RO"); + printk(", %llu blocks\n", + (unsigned long long)get_capacity(pf->disk)); + } + return 0; +} + +/* returns 0, with id set if drive is detected + -1, if drive detection failed +*/ +static int pf_probe(struct pf_unit *pf) +{ + if (pf->drive == -1) { + for (pf->drive = 0; pf->drive <= 1; pf->drive++) + if (!pf_reset(pf)) { + if (pf->lun != -1) + return pf_identify(pf); + else + for (pf->lun = 0; pf->lun < 8; pf->lun++) + if (!pf_identify(pf)) + return 0; + } + } else { + if (pf_reset(pf)) + return -1; + if (pf->lun != -1) + return pf_identify(pf); + for (pf->lun = 0; pf->lun < 8; pf->lun++) + if (!pf_identify(pf)) + return 0; + } + return -1; +} + +static int pf_detect(void) +{ + struct pf_unit *pf = units; + int k, unit; + + printk("%s: %s version %s, major %d, cluster %d, nice %d\n", + name, name, PF_VERSION, major, cluster, nice); + + k = 0; + if (pf_drive_count == 0) { + if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF, + verbose, pf->name)) { + if (!pf_probe(pf) && pf->disk) { + pf->present = 1; + k++; + } else + pi_release(pf->pi); + } + + } else + for (unit = 0; unit < PF_UNITS; unit++, pf++) { + int *conf = *drives[unit]; + if (!conf[D_PRT]) + continue; + if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD], + conf[D_UNI], conf[D_PRO], conf[D_DLY], + pf_scratch, PI_PF, verbose, pf->name)) { + if (pf->disk && !pf_probe(pf)) { + pf->present = 1; + k++; + } else + pi_release(pf->pi); + } + } + if (k) + return 0; + + printk("%s: No ATAPI disk detected\n", name); + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) + put_disk(pf->disk); + return -1; +} + +/* The i/o request engine */ + +static int pf_start(struct pf_unit *pf, int cmd, int b, int c) +{ + int i; + char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + for (i = 0; i < 4; i++) { + io_cmd[5 - i] = b & 0xff; + b = b >> 8; + } + + io_cmd[8] = c & 0xff; + io_cmd[7] = (c >> 8) & 0xff; + + i = pf_command(pf, io_cmd, c * 512, "start i/o"); + + mdelay(1); + + return i; +} + +static int pf_ready(void) +{ + return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask)); +} + +static struct request_queue *pf_queue; + +static void pf_end_request(int uptodate) +{ + if (pf_req) { + end_request(pf_req, uptodate); + pf_req = NULL; + } +} + +static void do_pf_request(struct request_queue * q) +{ + if (pf_busy) + return; +repeat: + pf_req = elv_next_request(q); + if (!pf_req) + return; + + pf_current = pf_req->rq_disk->private_data; + pf_block = pf_req->sector; + pf_run = pf_req->nr_sectors; + pf_count = pf_req->current_nr_sectors; + + if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { + pf_end_request(0); + goto repeat; + } + + pf_cmd = rq_data_dir(pf_req); + pf_buf = pf_req->buffer; + pf_retries = 0; + + pf_busy = 1; + if (pf_cmd == READ) + pi_do_claimed(pf_current->pi, do_pf_read); + else if (pf_cmd == WRITE) + pi_do_claimed(pf_current->pi, do_pf_write); + else { + pf_busy = 0; + pf_end_request(0); + goto repeat; + } +} + +static int pf_next_buf(void) +{ + unsigned long saved_flags; + + pf_count--; + pf_run--; + pf_buf += 512; + pf_block++; + if (!pf_run) + return 1; + if (!pf_count) { + spin_lock_irqsave(&pf_spin_lock, saved_flags); + pf_end_request(1); + pf_req = elv_next_request(pf_queue); + spin_unlock_irqrestore(&pf_spin_lock, saved_flags); + if (!pf_req) + return 1; + pf_count = pf_req->current_nr_sectors; + pf_buf = pf_req->buffer; + } + return 0; +} + +static inline void next_request(int success) +{ + unsigned long saved_flags; + + spin_lock_irqsave(&pf_spin_lock, saved_flags); + pf_end_request(success); + pf_busy = 0; + do_pf_request(pf_queue); + spin_unlock_irqrestore(&pf_spin_lock, saved_flags); +} + +/* detach from the calling context - in case the spinlock is held */ +static void do_pf_read(void) +{ + ps_set_intr(do_pf_read_start, NULL, 0, nice); +} + +static void do_pf_read_start(void) +{ + pf_busy = 1; + + if (pf_start(pf_current, ATAPI_READ_10, pf_block, pf_run)) { + pi_disconnect(pf_current->pi); + if (pf_retries < PF_MAX_RETRIES) { + pf_retries++; + pi_do_claimed(pf_current->pi, do_pf_read_start); + return; + } + next_request(0); + return; + } + pf_mask = STAT_DRQ; + ps_set_intr(do_pf_read_drq, pf_ready, PF_TMO, nice); +} + +static void do_pf_read_drq(void) +{ + while (1) { + if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR, + "read block", "completion") & STAT_ERR) { + pi_disconnect(pf_current->pi); + if (pf_retries < PF_MAX_RETRIES) { + pf_req_sense(pf_current, 0); + pf_retries++; + pi_do_claimed(pf_current->pi, do_pf_read_start); + return; + } + next_request(0); + return; + } + pi_read_block(pf_current->pi, pf_buf, 512); + if (pf_next_buf()) + break; + } + pi_disconnect(pf_current->pi); + next_request(1); +} + +static void do_pf_write(void) +{ + ps_set_intr(do_pf_write_start, NULL, 0, nice); +} + +static void do_pf_write_start(void) +{ + pf_busy = 1; + + if (pf_start(pf_current, ATAPI_WRITE_10, pf_block, pf_run)) { + pi_disconnect(pf_current->pi); + if (pf_retries < PF_MAX_RETRIES) { + pf_retries++; + pi_do_claimed(pf_current->pi, do_pf_write_start); + return; + } + next_request(0); + return; + } + + while (1) { + if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR, + "write block", "data wait") & STAT_ERR) { + pi_disconnect(pf_current->pi); + if (pf_retries < PF_MAX_RETRIES) { + pf_retries++; + pi_do_claimed(pf_current->pi, do_pf_write_start); + return; + } + next_request(0); + return; + } + pi_write_block(pf_current->pi, pf_buf, 512); + if (pf_next_buf()) + break; + } + pf_mask = 0; + ps_set_intr(do_pf_write_done, pf_ready, PF_TMO, nice); +} + +static void do_pf_write_done(void) +{ + if (pf_wait(pf_current, STAT_BUSY, 0, "write block", "done") & STAT_ERR) { + pi_disconnect(pf_current->pi); + if (pf_retries < PF_MAX_RETRIES) { + pf_retries++; + pi_do_claimed(pf_current->pi, do_pf_write_start); + return; + } + next_request(0); + return; + } + pi_disconnect(pf_current->pi); + next_request(1); +} + +static int __init pf_init(void) +{ /* preliminary initialisation */ + struct pf_unit *pf; + int unit; + + if (disable) + return -EINVAL; + + pf_init_units(); + + if (pf_detect()) + return -ENODEV; + pf_busy = 0; + + if (register_blkdev(major, name)) { + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) + put_disk(pf->disk); + return -EBUSY; + } + pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock); + if (!pf_queue) { + unregister_blkdev(major, name); + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) + put_disk(pf->disk); + return -ENOMEM; + } + + blk_queue_max_phys_segments(pf_queue, cluster); + blk_queue_max_hw_segments(pf_queue, cluster); + + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { + struct gendisk *disk = pf->disk; + + if (!pf->present) + continue; + disk->private_data = pf; + disk->queue = pf_queue; + add_disk(disk); + } + return 0; +} + +static void __exit pf_exit(void) +{ + struct pf_unit *pf; + int unit; + unregister_blkdev(major, name); + for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { + if (!pf->present) + continue; + del_gendisk(pf->disk); + put_disk(pf->disk); + pi_release(pf->pi); + } + blk_cleanup_queue(pf_queue); +} + +MODULE_LICENSE("GPL"); +module_init(pf_init) +module_exit(pf_exit) diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c new file mode 100644 index 0000000..9dfa271 --- /dev/null +++ b/drivers/block/paride/pg.c @@ -0,0 +1,722 @@ +/* + pg.c (c) 1998 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + The pg driver provides a simple character device interface for + sending ATAPI commands to a device. With the exception of the + ATAPI reset operation, all operations are performed by a pair + of read and write operations to the appropriate /dev/pgN device. + A write operation delivers a command and any outbound data in + a single buffer. Normally, the write will succeed unless the + device is offline or malfunctioning, or there is already another + command pending. If the write succeeds, it should be followed + immediately by a read operation, to obtain any returned data and + status information. A read will fail if there is no operation + in progress. + + As a special case, the device can be reset with a write operation, + and in this case, no following read is expected, or permitted. + + There are no ioctl() operations. Any single operation + may transfer at most PG_MAX_DATA bytes. Note that the driver must + copy the data through an internal buffer. In keeping with all + current ATAPI devices, command packets are assumed to be exactly + 12 bytes in length. + + To permit future changes to this interface, the headers in the + read and write buffers contain a single character "magic" flag. + Currently this flag must be the character "P". + + By default, the driver will autoprobe for a single parallel + port ATAPI device, but if their individual parameters are + specified, the driver can handle up to 4 devices. + + To use this device, you must have the following device + special files defined: + + /dev/pg0 c 97 0 + /dev/pg1 c 97 1 + /dev/pg2 c 97 2 + /dev/pg3 c 97 3 + + (You'll need to change the 97 to something else if you use + the 'major' parameter to install the driver on a different + major number.) + + The behaviour of the pg driver can be altered by setting + some parameters from the insmod command line. The following + parameters are adjustable: + + drive0 These four arguments can be arrays of + drive1 1-6 integers as follows: + drive2 + drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly> + + Where, + + <prt> is the base of the parallel port address for + the corresponding drive. (required) + + <pro> is the protocol number for the adapter that + supports this drive. These numbers are + logged by 'paride' when the protocol modules + are initialised. (0 if not given) + + <uni> for those adapters that support chained + devices, this is the unit selector for the + chain of devices on the given port. It should + be zero for devices that don't support chaining. + (0 if not given) + + <mod> this can be -1 to choose the best mode, or one + of the mode numbers supported by the adapter. + (-1 if not given) + + <slv> ATAPI devices can be jumpered to master or slave. + Set this to 0 to choose the master drive, 1 to + choose the slave, -1 (the default) to choose the + first drive found. + + <dly> some parallel ports require the driver to + go more slowly. -1 sets a default value that + should work with the chosen protocol. Otherwise, + set this to a small integer, the larger it is + the slower the port i/o. In some cases, setting + this to zero will speed up the device. (default -1) + + major You may use this parameter to overide the + default major number (97) that this driver + will use. Be sure to change the device + name as well. + + name This parameter is a character string that + contains the name the kernel will use for this + device (in /proc output, for instance). + (default "pg"). + + verbose This parameter controls the amount of logging + that is done by the driver. Set it to 0 for + quiet operation, to 1 to enable progress + messages while the driver probes for devices, + or to 2 for full debug logging. (default 0) + + If this driver is built into the kernel, you can use + the following command line parameters, with the same values + as the corresponding module parameters listed above: + + pg.drive0 + pg.drive1 + pg.drive2 + pg.drive3 + + In addition, you can use the parameter pg.disable to disable + the driver entirely. + +*/ + +/* Changes: + + 1.01 GRG 1998.06.16 Bug fixes + 1.02 GRG 1998.09.24 Added jumbo support + +*/ + +#define PG_VERSION "1.02" +#define PG_MAJOR 97 +#define PG_NAME "pg" +#define PG_UNITS 4 + +#ifndef PI_PG +#define PI_PG 4 +#endif + +/* Here are things one can override from the insmod command. + Most are autoprobed by paride unless set here. Verbose is 0 + by default. + +*/ + +static int verbose = 0; +static int major = PG_MAJOR; +static char *name = PG_NAME; +static int disable = 0; + +static int drive0[6] = { 0, 0, 0, -1, -1, -1 }; +static int drive1[6] = { 0, 0, 0, -1, -1, -1 }; +static int drive2[6] = { 0, 0, 0, -1, -1, -1 }; +static int drive3[6] = { 0, 0, 0, -1, -1, -1 }; + +static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3}; +static int pg_drive_count; + +enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY}; + +/* end of parameters */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/mtio.h> +#include <linux/pg.h> +#include <linux/device.h> +#include <linux/sched.h> /* current, TASK_* */ +#include <linux/smp_lock.h> +#include <linux/jiffies.h> + +#include <asm/uaccess.h> + +module_param(verbose, bool, 0644); +module_param(major, int, 0); +module_param(name, charp, 0); +module_param_array(drive0, int, NULL, 0); +module_param_array(drive1, int, NULL, 0); +module_param_array(drive2, int, NULL, 0); +module_param_array(drive3, int, NULL, 0); + +#include "paride.h" + +#define PG_SPIN_DEL 50 /* spin delay in micro-seconds */ +#define PG_SPIN 200 +#define PG_TMO HZ +#define PG_RESET_TMO 10*HZ + +#define STAT_ERR 0x01 +#define STAT_INDEX 0x02 +#define STAT_ECC 0x04 +#define STAT_DRQ 0x08 +#define STAT_SEEK 0x10 +#define STAT_WRERR 0x20 +#define STAT_READY 0x40 +#define STAT_BUSY 0x80 + +#define ATAPI_IDENTIFY 0x12 + +static int pg_open(struct inode *inode, struct file *file); +static int pg_release(struct inode *inode, struct file *file); +static ssize_t pg_read(struct file *filp, char __user *buf, + size_t count, loff_t * ppos); +static ssize_t pg_write(struct file *filp, const char __user *buf, + size_t count, loff_t * ppos); +static int pg_detect(void); + +#define PG_NAMELEN 8 + +struct pg { + struct pi_adapter pia; /* interface to paride layer */ + struct pi_adapter *pi; + int busy; /* write done, read expected */ + int start; /* jiffies at command start */ + int dlen; /* transfer size requested */ + unsigned long timeout; /* timeout requested */ + int status; /* last sense key */ + int drive; /* drive */ + unsigned long access; /* count of active opens ... */ + int present; /* device present ? */ + char *bufptr; + char name[PG_NAMELEN]; /* pg0, pg1, ... */ +}; + +static struct pg devices[PG_UNITS]; + +static int pg_identify(struct pg *dev, int log); + +static char pg_scratch[512]; /* scratch block buffer */ + +static struct class *pg_class; + +/* kernel glue structures */ + +static const struct file_operations pg_fops = { + .owner = THIS_MODULE, + .read = pg_read, + .write = pg_write, + .open = pg_open, + .release = pg_release, +}; + +static void pg_init_units(void) +{ + int unit; + + pg_drive_count = 0; + for (unit = 0; unit < PG_UNITS; unit++) { + int *parm = *drives[unit]; + struct pg *dev = &devices[unit]; + dev->pi = &dev->pia; + clear_bit(0, &dev->access); + dev->busy = 0; + dev->present = 0; + dev->bufptr = NULL; + dev->drive = parm[D_SLV]; + snprintf(dev->name, PG_NAMELEN, "%s%c", name, 'a'+unit); + if (parm[D_PRT]) + pg_drive_count++; + } +} + +static inline int status_reg(struct pg *dev) +{ + return pi_read_regr(dev->pi, 1, 6); +} + +static inline int read_reg(struct pg *dev, int reg) +{ + return pi_read_regr(dev->pi, 0, reg); +} + +static inline void write_reg(struct pg *dev, int reg, int val) +{ + pi_write_regr(dev->pi, 0, reg, val); +} + +static inline u8 DRIVE(struct pg *dev) +{ + return 0xa0+0x10*dev->drive; +} + +static void pg_sleep(int cs) +{ + schedule_timeout_interruptible(cs); +} + +static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg) +{ + int j, r, e, s, p, to; + + dev->status = 0; + + j = 0; + while ((((r = status_reg(dev)) & go) || (stop && (!(r & stop)))) + && time_before(jiffies, tmo)) { + if (j++ < PG_SPIN) + udelay(PG_SPIN_DEL); + else + pg_sleep(1); + } + + to = time_after_eq(jiffies, tmo); + + if ((r & (STAT_ERR & stop)) || to) { + s = read_reg(dev, 7); + e = read_reg(dev, 1); + p = read_reg(dev, 2); + if (verbose > 1) + printk("%s: %s: stat=0x%x err=0x%x phase=%d%s\n", + dev->name, msg, s, e, p, to ? " timeout" : ""); + if (to) + e |= 0x100; + dev->status = (e >> 4) & 0xff; + return -1; + } + return 0; +} + +static int pg_command(struct pg *dev, char *cmd, int dlen, unsigned long tmo) +{ + int k; + + pi_connect(dev->pi); + + write_reg(dev, 6, DRIVE(dev)); + + if (pg_wait(dev, STAT_BUSY | STAT_DRQ, 0, tmo, "before command")) + goto fail; + + write_reg(dev, 4, dlen % 256); + write_reg(dev, 5, dlen / 256); + write_reg(dev, 7, 0xa0); /* ATAPI packet command */ + + if (pg_wait(dev, STAT_BUSY, STAT_DRQ, tmo, "command DRQ")) + goto fail; + + if (read_reg(dev, 2) != 1) { + printk("%s: command phase error\n", dev->name); + goto fail; + } + + pi_write_block(dev->pi, cmd, 12); + + if (verbose > 1) { + printk("%s: Command sent, dlen=%d packet= ", dev->name, dlen); + for (k = 0; k < 12; k++) + printk("%02x ", cmd[k] & 0xff); + printk("\n"); + } + return 0; +fail: + pi_disconnect(dev->pi); + return -1; +} + +static int pg_completion(struct pg *dev, char *buf, unsigned long tmo) +{ + int r, d, n, p; + + r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR, + tmo, "completion"); + + dev->dlen = 0; + + while (read_reg(dev, 7) & STAT_DRQ) { + d = (read_reg(dev, 4) + 256 * read_reg(dev, 5)); + n = ((d + 3) & 0xfffc); + p = read_reg(dev, 2) & 3; + if (p == 0) + pi_write_block(dev->pi, buf, n); + if (p == 2) + pi_read_block(dev->pi, buf, n); + if (verbose > 1) + printk("%s: %s %d bytes\n", dev->name, + p ? "Read" : "Write", n); + dev->dlen += (1 - p) * d; + buf += d; + r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR, + tmo, "completion"); + } + + pi_disconnect(dev->pi); + + return r; +} + +static int pg_reset(struct pg *dev) +{ + int i, k, err; + int expect[5] = { 1, 1, 1, 0x14, 0xeb }; + int got[5]; + + pi_connect(dev->pi); + write_reg(dev, 6, DRIVE(dev)); + write_reg(dev, 7, 8); + + pg_sleep(20 * HZ / 1000); + + k = 0; + while ((k++ < PG_RESET_TMO) && (status_reg(dev) & STAT_BUSY)) + pg_sleep(1); + + for (i = 0; i < 5; i++) + got[i] = read_reg(dev, i + 1); + + err = memcmp(expect, got, sizeof(got)) ? -1 : 0; + + if (verbose) { + printk("%s: Reset (%d) signature = ", dev->name, k); + for (i = 0; i < 5; i++) + printk("%3x", got[i]); + if (err) + printk(" (incorrect)"); + printk("\n"); + } + + pi_disconnect(dev->pi); + return err; +} + +static void xs(char *buf, char *targ, int len) +{ + char l = '\0'; + int k; + + for (k = 0; k < len; k++) { + char c = *buf++; + if (c != ' ' || c != l) + l = *targ++ = c; + } + if (l == ' ') + targ--; + *targ = '\0'; +} + +static int pg_identify(struct pg *dev, int log) +{ + int s; + char *ms[2] = { "master", "slave" }; + char mf[10], id[18]; + char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 }; + char buf[36]; + + s = pg_command(dev, id_cmd, 36, jiffies + PG_TMO); + if (s) + return -1; + s = pg_completion(dev, buf, jiffies + PG_TMO); + if (s) + return -1; + + if (log) { + xs(buf + 8, mf, 8); + xs(buf + 16, id, 16); + printk("%s: %s %s, %s\n", dev->name, mf, id, ms[dev->drive]); + } + + return 0; +} + +/* + * returns 0, with id set if drive is detected + * -1, if drive detection failed + */ +static int pg_probe(struct pg *dev) +{ + if (dev->drive == -1) { + for (dev->drive = 0; dev->drive <= 1; dev->drive++) + if (!pg_reset(dev)) + return pg_identify(dev, 1); + } else { + if (!pg_reset(dev)) + return pg_identify(dev, 1); + } + return -1; +} + +static int pg_detect(void) +{ + struct pg *dev = &devices[0]; + int k, unit; + + printk("%s: %s version %s, major %d\n", name, name, PG_VERSION, major); + + k = 0; + if (pg_drive_count == 0) { + if (pi_init(dev->pi, 1, -1, -1, -1, -1, -1, pg_scratch, + PI_PG, verbose, dev->name)) { + if (!pg_probe(dev)) { + dev->present = 1; + k++; + } else + pi_release(dev->pi); + } + + } else + for (unit = 0; unit < PG_UNITS; unit++, dev++) { + int *parm = *drives[unit]; + if (!parm[D_PRT]) + continue; + if (pi_init(dev->pi, 0, parm[D_PRT], parm[D_MOD], + parm[D_UNI], parm[D_PRO], parm[D_DLY], + pg_scratch, PI_PG, verbose, dev->name)) { + if (!pg_probe(dev)) { + dev->present = 1; + k++; + } else + pi_release(dev->pi); + } + } + + if (k) + return 0; + + printk("%s: No ATAPI device detected\n", name); + return -1; +} + +static int pg_open(struct inode *inode, struct file *file) +{ + int unit = iminor(inode) & 0x7f; + struct pg *dev = &devices[unit]; + int ret = 0; + + lock_kernel(); + if ((unit >= PG_UNITS) || (!dev->present)) { + ret = -ENODEV; + goto out; + } + + if (test_and_set_bit(0, &dev->access)) { + ret = -EBUSY; + goto out; + } + + if (dev->busy) { + pg_reset(dev); + dev->busy = 0; + } + + pg_identify(dev, (verbose > 1)); + + dev->bufptr = kmalloc(PG_MAX_DATA, GFP_KERNEL); + if (dev->bufptr == NULL) { + clear_bit(0, &dev->access); + printk("%s: buffer allocation failed\n", dev->name); + ret = -ENOMEM; + goto out; + } + + file->private_data = dev; + +out: + unlock_kernel(); + return ret; +} + +static int pg_release(struct inode *inode, struct file *file) +{ + struct pg *dev = file->private_data; + + kfree(dev->bufptr); + dev->bufptr = NULL; + clear_bit(0, &dev->access); + + return 0; +} + +static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) +{ + struct pg *dev = filp->private_data; + struct pg_write_hdr hdr; + int hs = sizeof (hdr); + + if (dev->busy) + return -EBUSY; + if (count < hs) + return -EINVAL; + + if (copy_from_user(&hdr, buf, hs)) + return -EFAULT; + + if (hdr.magic != PG_MAGIC) + return -EINVAL; + if (hdr.dlen > PG_MAX_DATA) + return -EINVAL; + if ((count - hs) > PG_MAX_DATA) + return -EINVAL; + + if (hdr.func == PG_RESET) { + if (count != hs) + return -EINVAL; + if (pg_reset(dev)) + return -EIO; + return count; + } + + if (hdr.func != PG_COMMAND) + return -EINVAL; + + dev->start = jiffies; + dev->timeout = hdr.timeout * HZ + HZ / 2 + jiffies; + + if (pg_command(dev, hdr.packet, hdr.dlen, jiffies + PG_TMO)) { + if (dev->status & 0x10) + return -ETIME; + return -EIO; + } + + dev->busy = 1; + + if (copy_from_user(dev->bufptr, buf + hs, count - hs)) + return -EFAULT; + return count; +} + +static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +{ + struct pg *dev = filp->private_data; + struct pg_read_hdr hdr; + int hs = sizeof (hdr); + int copy; + + if (!dev->busy) + return -EINVAL; + if (count < hs) + return -EINVAL; + + dev->busy = 0; + + if (pg_completion(dev, dev->bufptr, dev->timeout)) + if (dev->status & 0x10) + return -ETIME; + + hdr.magic = PG_MAGIC; + hdr.dlen = dev->dlen; + copy = 0; + + if (hdr.dlen < 0) { + hdr.dlen = -1 * hdr.dlen; + copy = hdr.dlen; + if (copy > (count - hs)) + copy = count - hs; + } + + hdr.duration = (jiffies - dev->start + HZ / 2) / HZ; + hdr.scsi = dev->status & 0x0f; + + if (copy_to_user(buf, &hdr, hs)) + return -EFAULT; + if (copy > 0) + if (copy_to_user(buf + hs, dev->bufptr, copy)) + return -EFAULT; + return copy + hs; +} + +static int __init pg_init(void) +{ + int unit; + int err; + + if (disable){ + err = -EINVAL; + goto out; + } + + pg_init_units(); + + if (pg_detect()) { + err = -ENODEV; + goto out; + } + + err = register_chrdev(major, name, &pg_fops); + if (err < 0) { + printk("pg_init: unable to get major number %d\n", major); + for (unit = 0; unit < PG_UNITS; unit++) { + struct pg *dev = &devices[unit]; + if (dev->present) + pi_release(dev->pi); + } + goto out; + } + major = err; /* In case the user specified `major=0' (dynamic) */ + pg_class = class_create(THIS_MODULE, "pg"); + if (IS_ERR(pg_class)) { + err = PTR_ERR(pg_class); + goto out_chrdev; + } + for (unit = 0; unit < PG_UNITS; unit++) { + struct pg *dev = &devices[unit]; + if (dev->present) + device_create(pg_class, NULL, MKDEV(major, unit), NULL, + "pg%u", unit); + } + err = 0; + goto out; + +out_chrdev: + unregister_chrdev(major, "pg"); +out: + return err; +} + +static void __exit pg_exit(void) +{ + int unit; + + for (unit = 0; unit < PG_UNITS; unit++) { + struct pg *dev = &devices[unit]; + if (dev->present) + device_destroy(pg_class, MKDEV(major, unit)); + } + class_destroy(pg_class); + unregister_chrdev(major, name); + + for (unit = 0; unit < PG_UNITS; unit++) { + struct pg *dev = &devices[unit]; + if (dev->present) + pi_release(dev->pi); + } +} + +MODULE_LICENSE("GPL"); +module_init(pg_init) +module_exit(pg_exit) diff --git a/drivers/block/paride/ppc6lnx.c b/drivers/block/paride/ppc6lnx.c new file mode 100644 index 0000000..5e5521d --- /dev/null +++ b/drivers/block/paride/ppc6lnx.c @@ -0,0 +1,726 @@ +/* + ppc6lnx.c (c) 2001 Micro Solutions Inc. + Released under the terms of the GNU General Public license + + ppc6lnx.c is a par of the protocol driver for the Micro Solutions + "BACKPACK" parallel port IDE adapter + (Works on Series 6 drives) + +*/ + +//*************************************************************************** + +// PPC 6 Code in C sanitized for LINUX +// Original x86 ASM by Ron, Converted to C by Clive + +//*************************************************************************** + + +#define port_stb 1 +#define port_afd 2 +#define cmd_stb port_afd +#define port_init 4 +#define data_stb port_init +#define port_sel 8 +#define port_int 16 +#define port_dir 0x20 + +#define ECR_EPP 0x80 +#define ECR_BI 0x20 + +//*************************************************************************** + +// 60772 Commands + +#define ACCESS_REG 0x00 +#define ACCESS_PORT 0x40 + +#define ACCESS_READ 0x00 +#define ACCESS_WRITE 0x20 + +// 60772 Command Prefix + +#define CMD_PREFIX_SET 0xe0 // Special command that modifies the next command's operation +#define CMD_PREFIX_RESET 0xc0 // Resets current cmd modifier reg bits + #define PREFIX_IO16 0x01 // perform 16-bit wide I/O + #define PREFIX_FASTWR 0x04 // enable PPC mode fast-write + #define PREFIX_BLK 0x08 // enable block transfer mode + +// 60772 Registers + +#define REG_STATUS 0x00 // status register + #define STATUS_IRQA 0x01 // Peripheral IRQA line + #define STATUS_EEPROM_DO 0x40 // Serial EEPROM data bit +#define REG_VERSION 0x01 // PPC version register (read) +#define REG_HWCFG 0x02 // Hardware Config register +#define REG_RAMSIZE 0x03 // Size of RAM Buffer + #define RAMSIZE_128K 0x02 +#define REG_EEPROM 0x06 // EEPROM control register + #define EEPROM_SK 0x01 // eeprom SK bit + #define EEPROM_DI 0x02 // eeprom DI bit + #define EEPROM_CS 0x04 // eeprom CS bit + #define EEPROM_EN 0x08 // eeprom output enable +#define REG_BLKSIZE 0x08 // Block transfer len (24 bit) + +//*************************************************************************** + +typedef struct ppc_storage { + u16 lpt_addr; // LPT base address + u8 ppc_id; + u8 mode; // operating mode + // 0 = PPC Uni SW + // 1 = PPC Uni FW + // 2 = PPC Bi SW + // 3 = PPC Bi FW + // 4 = EPP Byte + // 5 = EPP Word + // 6 = EPP Dword + u8 ppc_flags; + u8 org_data; // original LPT data port contents + u8 org_ctrl; // original LPT control port contents + u8 cur_ctrl; // current control port contents +} Interface; + +//*************************************************************************** + +// ppc_flags + +#define fifo_wait 0x10 + +//*************************************************************************** + +// DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES + +#define PPCMODE_UNI_SW 0 +#define PPCMODE_UNI_FW 1 +#define PPCMODE_BI_SW 2 +#define PPCMODE_BI_FW 3 +#define PPCMODE_EPP_BYTE 4 +#define PPCMODE_EPP_WORD 5 +#define PPCMODE_EPP_DWORD 6 + +//*************************************************************************** + +static int ppc6_select(Interface *ppc); +static void ppc6_deselect(Interface *ppc); +static void ppc6_send_cmd(Interface *ppc, u8 cmd); +static void ppc6_wr_data_byte(Interface *ppc, u8 data); +static u8 ppc6_rd_data_byte(Interface *ppc); +static u8 ppc6_rd_port(Interface *ppc, u8 port); +static void ppc6_wr_port(Interface *ppc, u8 port, u8 data); +static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count); +static void ppc6_wait_for_fifo(Interface *ppc); +static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count); +static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length); +static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length); +static void ppc6_wr_extout(Interface *ppc, u8 regdata); +static int ppc6_open(Interface *ppc); +static void ppc6_close(Interface *ppc); + +//*************************************************************************** + +static int ppc6_select(Interface *ppc) +{ + u8 i, j, k; + + i = inb(ppc->lpt_addr + 1); + + if (i & 1) + outb(i, ppc->lpt_addr + 1); + + ppc->org_data = inb(ppc->lpt_addr); + + ppc->org_ctrl = inb(ppc->lpt_addr + 2) & 0x5F; // readback ctrl + + ppc->cur_ctrl = ppc->org_ctrl; + + ppc->cur_ctrl |= port_sel; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + if (ppc->org_data == 'b') + outb('x', ppc->lpt_addr); + + outb('b', ppc->lpt_addr); + outb('p', ppc->lpt_addr); + outb(ppc->ppc_id, ppc->lpt_addr); + outb(~ppc->ppc_id,ppc->lpt_addr); + + ppc->cur_ctrl &= ~port_sel; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + ppc->cur_ctrl = (ppc->cur_ctrl & port_int) | port_init; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + i = ppc->mode & 0x0C; + + if (i == 0) + i = (ppc->mode & 2) | 1; + + outb(i, ppc->lpt_addr); + + ppc->cur_ctrl |= port_sel; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + // DELAY + + ppc->cur_ctrl |= port_afd; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + j = ((i & 0x08) << 4) | ((i & 0x07) << 3); + + k = inb(ppc->lpt_addr + 1) & 0xB8; + + if (j == k) + { + ppc->cur_ctrl &= ~port_afd; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + k = (inb(ppc->lpt_addr + 1) & 0xB8) ^ 0xB8; + + if (j == k) + { + if (i & 4) // EPP + ppc->cur_ctrl &= ~(port_sel | port_init); + else // PPC/ECP + ppc->cur_ctrl &= ~port_sel; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + return(1); + } + } + + outb(ppc->org_ctrl, ppc->lpt_addr + 2); + + outb(ppc->org_data, ppc->lpt_addr); + + return(0); // FAIL +} + +//*************************************************************************** + +static void ppc6_deselect(Interface *ppc) +{ + if (ppc->mode & 4) // EPP + ppc->cur_ctrl |= port_init; + else // PPC/ECP + ppc->cur_ctrl |= port_sel; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + outb(ppc->org_data, ppc->lpt_addr); + + outb((ppc->org_ctrl | port_sel), ppc->lpt_addr + 2); + + outb(ppc->org_ctrl, ppc->lpt_addr + 2); +} + +//*************************************************************************** + +static void ppc6_send_cmd(Interface *ppc, u8 cmd) +{ + switch(ppc->mode) + { + case PPCMODE_UNI_SW : + case PPCMODE_UNI_FW : + case PPCMODE_BI_SW : + case PPCMODE_BI_FW : + { + outb(cmd, ppc->lpt_addr); + + ppc->cur_ctrl ^= cmd_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + break; + } + + case PPCMODE_EPP_BYTE : + case PPCMODE_EPP_WORD : + case PPCMODE_EPP_DWORD : + { + outb(cmd, ppc->lpt_addr + 3); + + break; + } + } +} + +//*************************************************************************** + +static void ppc6_wr_data_byte(Interface *ppc, u8 data) +{ + switch(ppc->mode) + { + case PPCMODE_UNI_SW : + case PPCMODE_UNI_FW : + case PPCMODE_BI_SW : + case PPCMODE_BI_FW : + { + outb(data, ppc->lpt_addr); + + ppc->cur_ctrl ^= data_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + break; + } + + case PPCMODE_EPP_BYTE : + case PPCMODE_EPP_WORD : + case PPCMODE_EPP_DWORD : + { + outb(data, ppc->lpt_addr + 4); + + break; + } + } +} + +//*************************************************************************** + +static u8 ppc6_rd_data_byte(Interface *ppc) +{ + u8 data = 0; + + switch(ppc->mode) + { + case PPCMODE_UNI_SW : + case PPCMODE_UNI_FW : + { + ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + // DELAY + + data = inb(ppc->lpt_addr + 1); + + data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3); + + ppc->cur_ctrl |= port_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + // DELAY + + data |= inb(ppc->lpt_addr + 1) & 0xB8; + + break; + } + + case PPCMODE_BI_SW : + case PPCMODE_BI_FW : + { + ppc->cur_ctrl |= port_dir; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + ppc->cur_ctrl = (ppc->cur_ctrl | port_stb) ^ data_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + data = inb(ppc->lpt_addr); + + ppc->cur_ctrl &= ~port_stb; + + outb(ppc->cur_ctrl,ppc->lpt_addr + 2); + + ppc->cur_ctrl &= ~port_dir; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + break; + } + + case PPCMODE_EPP_BYTE : + case PPCMODE_EPP_WORD : + case PPCMODE_EPP_DWORD : + { + outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2); + + data = inb(ppc->lpt_addr + 4); + + outb(ppc->cur_ctrl,ppc->lpt_addr + 2); + + break; + } + } + + return(data); +} + +//*************************************************************************** + +static u8 ppc6_rd_port(Interface *ppc, u8 port) +{ + ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ)); + + return(ppc6_rd_data_byte(ppc)); +} + +//*************************************************************************** + +static void ppc6_wr_port(Interface *ppc, u8 port, u8 data) +{ + ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE)); + + ppc6_wr_data_byte(ppc, data); +} + +//*************************************************************************** + +static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count) +{ + switch(ppc->mode) + { + case PPCMODE_UNI_SW : + case PPCMODE_UNI_FW : + { + while(count) + { + u8 d; + + ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + // DELAY + + d = inb(ppc->lpt_addr + 1); + + d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3); + + ppc->cur_ctrl |= port_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + // DELAY + + d |= inb(ppc->lpt_addr + 1) & 0xB8; + + *data++ = d; + count--; + } + + break; + } + + case PPCMODE_BI_SW : + case PPCMODE_BI_FW : + { + ppc->cur_ctrl |= port_dir; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + ppc->cur_ctrl |= port_stb; + + while(count) + { + ppc->cur_ctrl ^= data_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + *data++ = inb(ppc->lpt_addr); + count--; + } + + ppc->cur_ctrl &= ~port_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + ppc->cur_ctrl &= ~port_dir; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + break; + } + + case PPCMODE_EPP_BYTE : + { + outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2); + + // DELAY + + while(count) + { + *data++ = inb(ppc->lpt_addr + 4); + count--; + } + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + break; + } + + case PPCMODE_EPP_WORD : + { + outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2); + + // DELAY + + while(count > 1) + { + *((u16 *)data) = inw(ppc->lpt_addr + 4); + data += 2; + count -= 2; + } + + while(count) + { + *data++ = inb(ppc->lpt_addr + 4); + count--; + } + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + break; + } + + case PPCMODE_EPP_DWORD : + { + outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2); + + // DELAY + + while(count > 3) + { + *((u32 *)data) = inl(ppc->lpt_addr + 4); + data += 4; + count -= 4; + } + + while(count) + { + *data++ = inb(ppc->lpt_addr + 4); + count--; + } + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + break; + } + } + +} + +//*************************************************************************** + +static void ppc6_wait_for_fifo(Interface *ppc) +{ + int i; + + if (ppc->ppc_flags & fifo_wait) + { + for(i=0; i<20; i++) + inb(ppc->lpt_addr + 1); + } +} + +//*************************************************************************** + +static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count) +{ + switch(ppc->mode) + { + case PPCMODE_UNI_SW : + case PPCMODE_BI_SW : + { + while(count--) + { + outb(*data++, ppc->lpt_addr); + + ppc->cur_ctrl ^= data_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + } + + break; + } + + case PPCMODE_UNI_FW : + case PPCMODE_BI_FW : + { + u8 this, last; + + ppc6_send_cmd(ppc,(CMD_PREFIX_SET | PREFIX_FASTWR)); + + ppc->cur_ctrl |= port_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + last = *data; + + outb(last, ppc->lpt_addr); + + while(count) + { + this = *data++; + count--; + + if (this == last) + { + ppc->cur_ctrl ^= data_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + } + else + { + outb(this, ppc->lpt_addr); + + last = this; + } + } + + ppc->cur_ctrl &= ~port_stb; + + outb(ppc->cur_ctrl, ppc->lpt_addr + 2); + + ppc6_send_cmd(ppc,(CMD_PREFIX_RESET | PREFIX_FASTWR)); + + break; + } + + case PPCMODE_EPP_BYTE : + { + while(count) + { + outb(*data++,ppc->lpt_addr + 4); + count--; + } + + ppc6_wait_for_fifo(ppc); + + break; + } + + case PPCMODE_EPP_WORD : + { + while(count > 1) + { + outw(*((u16 *)data),ppc->lpt_addr + 4); + data += 2; + count -= 2; + } + + while(count) + { + outb(*data++,ppc->lpt_addr + 4); + count--; + } + + ppc6_wait_for_fifo(ppc); + + break; + } + + case PPCMODE_EPP_DWORD : + { + while(count > 3) + { + outl(*((u32 *)data),ppc->lpt_addr + 4); + data += 4; + count -= 4; + } + + while(count) + { + outb(*data++,ppc->lpt_addr + 4); + count--; + } + + ppc6_wait_for_fifo(ppc); + + break; + } + } +} + +//*************************************************************************** + +static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length) +{ + length = length << 1; + + ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE)); + ppc6_wr_data_byte(ppc,(u8)length); + ppc6_wr_data_byte(ppc,(u8)(length >> 8)); + ppc6_wr_data_byte(ppc,0); + + ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK)); + + ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_READ)); + + ppc6_rd_data_blk(ppc, data, length); + + ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK)); +} + +//*************************************************************************** + +static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length) +{ + length = length << 1; + + ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE)); + ppc6_wr_data_byte(ppc,(u8)length); + ppc6_wr_data_byte(ppc,(u8)(length >> 8)); + ppc6_wr_data_byte(ppc,0); + + ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK)); + + ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_WRITE)); + + ppc6_wr_data_blk(ppc, data, length); + + ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK)); +} + +//*************************************************************************** + +static void ppc6_wr_extout(Interface *ppc, u8 regdata) +{ + ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE)); + + ppc6_wr_data_byte(ppc, (u8)((regdata & 0x03) << 6)); +} + +//*************************************************************************** + +static int ppc6_open(Interface *ppc) +{ + int ret; + + ret = ppc6_select(ppc); + + if (ret == 0) + return(ret); + + ppc->ppc_flags &= ~fifo_wait; + + ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE)); + ppc6_wr_data_byte(ppc, RAMSIZE_128K); + + ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_READ | REG_VERSION)); + + if ((ppc6_rd_data_byte(ppc) & 0x3F) == 0x0C) + ppc->ppc_flags |= fifo_wait; + + return(ret); +} + +//*************************************************************************** + +static void ppc6_close(Interface *ppc) +{ + ppc6_deselect(ppc); +} + +//*************************************************************************** + diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h new file mode 100644 index 0000000..bc37032 --- /dev/null +++ b/drivers/block/paride/pseudo.h @@ -0,0 +1,102 @@ +/* + pseudo.h (c) 1997-8 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + This is the "pseudo-interrupt" logic for parallel port drivers. + + This module is #included into each driver. It makes one + function available: + + ps_set_intr( void (*continuation)(void), + int (*ready)(void), + int timeout, + int nice ) + + Which will arrange for ready() to be evaluated frequently and + when either it returns true, or timeout jiffies have passed, + continuation() will be invoked. + + If nice is 1, the test will done approximately once a + jiffy. If nice is 0, the test will also be done whenever + the scheduler runs (by adding it to a task queue). If + nice is greater than 1, the test will be done once every + (nice-1) jiffies. + +*/ + +/* Changes: + + 1.01 1998.05.03 Switched from cli()/sti() to spinlocks + 1.02 1998.12.14 Added support for nice > 1 +*/ + +#define PS_VERSION "1.02" + +#include <linux/sched.h> +#include <linux/workqueue.h> + +static void ps_tq_int(struct work_struct *work); + +static void (* ps_continuation)(void); +static int (* ps_ready)(void); +static unsigned long ps_timeout; +static int ps_tq_active = 0; +static int ps_nice = 0; + +static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); + +static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int); + +static void ps_set_intr(void (*continuation)(void), + int (*ready)(void), + int timeout, int nice) +{ + unsigned long flags; + + spin_lock_irqsave(&ps_spinlock,flags); + + ps_continuation = continuation; + ps_ready = ready; + ps_timeout = jiffies + timeout; + ps_nice = nice; + + if (!ps_tq_active) { + ps_tq_active = 1; + if (!ps_nice) + schedule_delayed_work(&ps_tq, 0); + else + schedule_delayed_work(&ps_tq, ps_nice-1); + } + spin_unlock_irqrestore(&ps_spinlock,flags); +} + +static void ps_tq_int(struct work_struct *work) +{ + void (*con)(void); + unsigned long flags; + + spin_lock_irqsave(&ps_spinlock,flags); + + con = ps_continuation; + ps_tq_active = 0; + + if (!con) { + spin_unlock_irqrestore(&ps_spinlock,flags); + return; + } + if (!ps_ready || ps_ready() || time_after_eq(jiffies, ps_timeout)) { + ps_continuation = NULL; + spin_unlock_irqrestore(&ps_spinlock,flags); + con(); + return; + } + ps_tq_active = 1; + if (!ps_nice) + schedule_delayed_work(&ps_tq, 0); + else + schedule_delayed_work(&ps_tq, ps_nice-1); + spin_unlock_irqrestore(&ps_spinlock,flags); +} + +/* end of pseudo.h */ + diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c new file mode 100644 index 0000000..1e4006e --- /dev/null +++ b/drivers/block/paride/pt.c @@ -0,0 +1,1012 @@ +/* + pt.c (c) 1998 Grant R. Guenther <grant@torque.net> + Under the terms of the GNU General Public License. + + This is the high-level driver for parallel port ATAPI tape + drives based on chips supported by the paride module. + + The driver implements both rewinding and non-rewinding + devices, filemarks, and the rewind ioctl. It allocates + a small internal "bounce buffer" for each open device, but + otherwise expects buffering and blocking to be done at the + user level. As with most block-structured tapes, short + writes are padded to full tape blocks, so reading back a file + may return more data than was actually written. + + By default, the driver will autoprobe for a single parallel + port ATAPI tape drive, but if their individual parameters are + specified, the driver can handle up to 4 drives. + + The rewinding devices are named /dev/pt0, /dev/pt1, ... + while the non-rewinding devices are /dev/npt0, /dev/npt1, etc. + + The behaviour of the pt driver can be altered by setting + some parameters from the insmod command line. The following + parameters are adjustable: + + drive0 These four arguments can be arrays of + drive1 1-6 integers as follows: + drive2 + drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly> + + Where, + + <prt> is the base of the parallel port address for + the corresponding drive. (required) + + <pro> is the protocol number for the adapter that + supports this drive. These numbers are + logged by 'paride' when the protocol modules + are initialised. (0 if not given) + + <uni> for those adapters that support chained + devices, this is the unit selector for the + chain of devices on the given port. It should + be zero for devices that don't support chaining. + (0 if not given) + + <mod> this can be -1 to choose the best mode, or one + of the mode numbers supported by the adapter. + (-1 if not given) + + <slv> ATAPI devices can be jumpered to master or slave. + Set this to 0 to choose the master drive, 1 to + choose the slave, -1 (the default) to choose the + first drive found. + + <dly> some parallel ports require the driver to + go more slowly. -1 sets a default value that + should work with the chosen protocol. Otherwise, + set this to a small integer, the larger it is + the slower the port i/o. In some cases, setting + this to zero will speed up the device. (default -1) + + major You may use this parameter to overide the + default major number (96) that this driver + will use. Be sure to change the device + name as well. + + name This parameter is a character string that + contains the name the kernel will use for this + device (in /proc output, for instance). + (default "pt"). + + verbose This parameter controls the amount of logging + that the driver will do. Set it to 0 for + normal operation, 1 to see autoprobe progress + messages, or 2 to see additional debugging + output. (default 0) + + If this driver is built into the kernel, you can use + the following command line parameters, with the same values + as the corresponding module parameters listed above: + + pt.drive0 + pt.drive1 + pt.drive2 + pt.drive3 + + In addition, you can use the parameter pt.disable to disable + the driver entirely. + +*/ + +/* Changes: + + 1.01 GRG 1998.05.06 Round up transfer size, fix ready_wait, + loosed interpretation of ATAPI standard + for clearing error status. + Eliminate sti(); + 1.02 GRG 1998.06.16 Eliminate an Ugh. + 1.03 GRG 1998.08.15 Adjusted PT_TMO, use HZ in loop timing, + extra debugging + 1.04 GRG 1998.09.24 Repair minor coding error, added jumbo support + +*/ + +#define PT_VERSION "1.04" +#define PT_MAJOR 96 +#define PT_NAME "pt" +#define PT_UNITS 4 + +/* Here are things one can override from the insmod command. + Most are autoprobed by paride unless set here. Verbose is on + by default. + +*/ + +static int verbose = 0; +static int major = PT_MAJOR; +static char *name = PT_NAME; +static int disable = 0; + +static int drive0[6] = { 0, 0, 0, -1, -1, -1 }; +static int drive1[6] = { 0, 0, 0, -1, -1, -1 }; +static int drive2[6] = { 0, 0, 0, -1, -1, -1 }; +static int drive3[6] = { 0, 0, 0, -1, -1, -1 }; + +static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3}; + +#define D_PRT 0 +#define D_PRO 1 +#define D_UNI 2 +#define D_MOD 3 +#define D_SLV 4 +#define D_DLY 5 + +#define DU (*drives[unit]) + +/* end of parameters */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/mtio.h> +#include <linux/device.h> +#include <linux/sched.h> /* current, TASK_*, schedule_timeout() */ +#include <linux/smp_lock.h> + +#include <asm/uaccess.h> + +module_param(verbose, bool, 0); +module_param(major, int, 0); +module_param(name, charp, 0); +module_param_array(drive0, int, NULL, 0); +module_param_array(drive1, int, NULL, 0); +module_param_array(drive2, int, NULL, 0); +module_param_array(drive3, int, NULL, 0); + +#include "paride.h" + +#define PT_MAX_RETRIES 5 +#define PT_TMO 3000 /* interrupt timeout in jiffies */ +#define PT_SPIN_DEL 50 /* spin delay in micro-seconds */ +#define PT_RESET_TMO 30 /* 30 seconds */ +#define PT_READY_TMO 60 /* 60 seconds */ +#define PT_REWIND_TMO 1200 /* 20 minutes */ + +#define PT_SPIN ((1000000/(HZ*PT_SPIN_DEL))*PT_TMO) + +#define STAT_ERR 0x00001 +#define STAT_INDEX 0x00002 +#define STAT_ECC 0x00004 +#define STAT_DRQ 0x00008 +#define STAT_SEEK 0x00010 +#define STAT_WRERR 0x00020 +#define STAT_READY 0x00040 +#define STAT_BUSY 0x00080 +#define STAT_SENSE 0x1f000 + +#define ATAPI_TEST_READY 0x00 +#define ATAPI_REWIND 0x01 +#define ATAPI_REQ_SENSE 0x03 +#define ATAPI_READ_6 0x08 +#define ATAPI_WRITE_6 0x0a +#define ATAPI_WFM 0x10 +#define ATAPI_IDENTIFY 0x12 +#define ATAPI_MODE_SENSE 0x1a +#define ATAPI_LOG_SENSE 0x4d + +static int pt_open(struct inode *inode, struct file *file); +static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +static int pt_release(struct inode *inode, struct file *file); +static ssize_t pt_read(struct file *filp, char __user *buf, + size_t count, loff_t * ppos); +static ssize_t pt_write(struct file *filp, const char __user *buf, + size_t count, loff_t * ppos); +static int pt_detect(void); + +/* bits in tape->flags */ + +#define PT_MEDIA 1 +#define PT_WRITE_OK 2 +#define PT_REWIND 4 +#define PT_WRITING 8 +#define PT_READING 16 +#define PT_EOF 32 + +#define PT_NAMELEN 8 +#define PT_BUFSIZE 16384 + +struct pt_unit { + struct pi_adapter pia; /* interface to paride layer */ + struct pi_adapter *pi; + int flags; /* various state flags */ + int last_sense; /* result of last request sense */ + int drive; /* drive */ + atomic_t available; /* 1 if access is available 0 otherwise */ + int bs; /* block size */ + int capacity; /* Size of tape in KB */ + int present; /* device present ? */ + char *bufptr; + char name[PT_NAMELEN]; /* pf0, pf1, ... */ +}; + +static int pt_identify(struct pt_unit *tape); + +static struct pt_unit pt[PT_UNITS]; + +static char pt_scratch[512]; /* scratch block buffer */ + +/* kernel glue structures */ + +static const struct file_operations pt_fops = { + .owner = THIS_MODULE, + .read = pt_read, + .write = pt_write, + .unlocked_ioctl = pt_ioctl, + .open = pt_open, + .release = pt_release, +}; + +/* sysfs class support */ +static struct class *pt_class; + +static inline int status_reg(struct pi_adapter *pi) +{ + return pi_read_regr(pi, 1, 6); +} + +static inline int read_reg(struct pi_adapter *pi, int reg) +{ + return pi_read_regr(pi, 0, reg); +} + +static inline void write_reg(struct pi_adapter *pi, int reg, int val) +{ + pi_write_regr(pi, 0, reg, val); +} + +static inline u8 DRIVE(struct pt_unit *tape) +{ + return 0xa0+0x10*tape->drive; +} + +static int pt_wait(struct pt_unit *tape, int go, int stop, char *fun, char *msg) +{ + int j, r, e, s, p; + struct pi_adapter *pi = tape->pi; + + j = 0; + while ((((r = status_reg(pi)) & go) || (stop && (!(r & stop)))) + && (j++ < PT_SPIN)) + udelay(PT_SPIN_DEL); + + if ((r & (STAT_ERR & stop)) || (j >= PT_SPIN)) { + s = read_reg(pi, 7); + e = read_reg(pi, 1); + p = read_reg(pi, 2); + if (j >= PT_SPIN) + e |= 0x100; + if (fun) + printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x" + " loop=%d phase=%d\n", + tape->name, fun, msg, r, s, e, j, p); + return (e << 8) + s; + } + return 0; +} + +static int pt_command(struct pt_unit *tape, char *cmd, int dlen, char *fun) +{ + struct pi_adapter *pi = tape->pi; + pi_connect(pi); + + write_reg(pi, 6, DRIVE(tape)); + + if (pt_wait(tape, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) { + pi_disconnect(pi); + return -1; + } + + write_reg(pi, 4, dlen % 256); + write_reg(pi, 5, dlen / 256); + write_reg(pi, 7, 0xa0); /* ATAPI packet command */ + + if (pt_wait(tape, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) { + pi_disconnect(pi); + return -1; + } + + if (read_reg(pi, 2) != 1) { + printk("%s: %s: command phase error\n", tape->name, fun); + pi_disconnect(pi); + return -1; + } + + pi_write_block(pi, cmd, 12); + + return 0; +} + +static int pt_completion(struct pt_unit *tape, char *buf, char *fun) +{ + struct pi_adapter *pi = tape->pi; + int r, s, n, p; + + r = pt_wait(tape, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR, + fun, "completion"); + + if (read_reg(pi, 7) & STAT_DRQ) { + n = (((read_reg(pi, 4) + 256 * read_reg(pi, 5)) + + 3) & 0xfffc); + p = read_reg(pi, 2) & 3; + if (p == 0) + pi_write_block(pi, buf, n); + if (p == 2) + pi_read_block(pi, buf, n); + } + + s = pt_wait(tape, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done"); + + pi_disconnect(pi); + + return (r ? r : s); +} + +static void pt_req_sense(struct pt_unit *tape, int quiet) +{ + char rs_cmd[12] = { ATAPI_REQ_SENSE, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 }; + char buf[16]; + int r; + + r = pt_command(tape, rs_cmd, 16, "Request sense"); + mdelay(1); + if (!r) + pt_completion(tape, buf, "Request sense"); + + tape->last_sense = -1; + if (!r) { + if (!quiet) + printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n", + tape->name, buf[2] & 0xf, buf[12], buf[13]); + tape->last_sense = (buf[2] & 0xf) | ((buf[12] & 0xff) << 8) + | ((buf[13] & 0xff) << 16); + } +} + +static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char *fun) +{ + int r; + + r = pt_command(tape, cmd, dlen, fun); + mdelay(1); + if (!r) + r = pt_completion(tape, buf, fun); + if (r) + pt_req_sense(tape, !fun); + + return r; +} + +static void pt_sleep(int cs) +{ + schedule_timeout_interruptible(cs); +} + +static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg) +{ + struct pi_adapter *pi = tape->pi; + int k, e, s; + + k = 0; + e = 0; + s = 0; + while (k < tmo) { + pt_sleep(pause); + k++; + pi_connect(pi); + write_reg(pi, 6, DRIVE(tape)); + s = read_reg(pi, 7); + e = read_reg(pi, 1); + pi_disconnect(pi); + if (s & (STAT_ERR | STAT_SEEK)) + break; + } + if ((k >= tmo) || (s & STAT_ERR)) { + if (k >= tmo) + printk("%s: %s DSC timeout\n", tape->name, msg); + else + printk("%s: %s stat=0x%x err=0x%x\n", tape->name, msg, s, + e); + pt_req_sense(tape, 0); + return 0; + } + return 1; +} + +static void pt_media_access_cmd(struct pt_unit *tape, int tmo, char *cmd, char *fun) +{ + if (pt_command(tape, cmd, 0, fun)) { + pt_req_sense(tape, 0); + return; + } + pi_disconnect(tape->pi); + pt_poll_dsc(tape, HZ, tmo, fun); +} + +static void pt_rewind(struct pt_unit *tape) +{ + char rw_cmd[12] = { ATAPI_REWIND, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + pt_media_access_cmd(tape, PT_REWIND_TMO, rw_cmd, "rewind"); +} + +static void pt_write_fm(struct pt_unit *tape) +{ + char wm_cmd[12] = { ATAPI_WFM, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 }; + + pt_media_access_cmd(tape, PT_TMO, wm_cmd, "write filemark"); +} + +#define DBMSG(msg) ((verbose>1)?(msg):NULL) + +static int pt_reset(struct pt_unit *tape) +{ + struct pi_adapter *pi = tape->pi; + int i, k, flg; + int expect[5] = { 1, 1, 1, 0x14, 0xeb }; + + pi_connect(pi); + write_reg(pi, 6, DRIVE(tape)); + write_reg(pi, 7, 8); + + pt_sleep(20 * HZ / 1000); + + k = 0; + while ((k++ < PT_RESET_TMO) && (status_reg(pi) & STAT_BUSY)) + pt_sleep(HZ / 10); + + flg = 1; + for (i = 0; i < 5; i++) + flg &= (read_reg(pi, i + 1) == expect[i]); + + if (verbose) { + printk("%s: Reset (%d) signature = ", tape->name, k); + for (i = 0; i < 5; i++) + printk("%3x", read_reg(pi, i + 1)); + if (!flg) + printk(" (incorrect)"); + printk("\n"); + } + + pi_disconnect(pi); + return flg - 1; +} + +static int pt_ready_wait(struct pt_unit *tape, int tmo) +{ + char tr_cmd[12] = { ATAPI_TEST_READY, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + int k, p; + + k = 0; + while (k < tmo) { + tape->last_sense = 0; + pt_atapi(tape, tr_cmd, 0, NULL, DBMSG("test unit ready")); + p = tape->last_sense; + if (!p) + return 0; + if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6))) + return p; + k++; + pt_sleep(HZ); + } + return 0x000020; /* timeout */ +} + +static void xs(char *buf, char *targ, int offs, int len) +{ + int j, k, l; + + j = 0; + l = 0; + for (k = 0; k < len; k++) + if ((buf[k + offs] != 0x20) || (buf[k + offs] != l)) + l = targ[j++] = buf[k + offs]; + if (l == 0x20) + j--; + targ[j] = 0; +} + +static int xn(char *buf, int offs, int size) +{ + int v, k; + + v = 0; + for (k = 0; k < size; k++) + v = v * 256 + (buf[k + offs] & 0xff); + return v; +} + +static int pt_identify(struct pt_unit *tape) +{ + int dt, s; + char *ms[2] = { "master", "slave" }; + char mf[10], id[18]; + char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 }; + char ms_cmd[12] = + { ATAPI_MODE_SENSE, 0, 0x2a, 0, 36, 0, 0, 0, 0, 0, 0, 0 }; + char ls_cmd[12] = + { ATAPI_LOG_SENSE, 0, 0x71, 0, 0, 0, 0, 0, 36, 0, 0, 0 }; + char buf[36]; + + s = pt_atapi(tape, id_cmd, 36, buf, "identify"); + if (s) + return -1; + + dt = buf[0] & 0x1f; + if (dt != 1) { + if (verbose) + printk("%s: Drive %d, unsupported type %d\n", + tape->name, tape->drive, dt); + return -1; + } + + xs(buf, mf, 8, 8); + xs(buf, id, 16, 16); + + tape->flags = 0; + tape->capacity = 0; + tape->bs = 0; + + if (!pt_ready_wait(tape, PT_READY_TMO)) + tape->flags |= PT_MEDIA; + + if (!pt_atapi(tape, ms_cmd, 36, buf, "mode sense")) { + if (!(buf[2] & 0x80)) + tape->flags |= PT_WRITE_OK; + tape->bs = xn(buf, 10, 2); + } + + if (!pt_atapi(tape, ls_cmd, 36, buf, "log sense")) + tape->capacity = xn(buf, 24, 4); + + printk("%s: %s %s, %s", tape->name, mf, id, ms[tape->drive]); + if (!(tape->flags & PT_MEDIA)) + printk(", no media\n"); + else { + if (!(tape->flags & PT_WRITE_OK)) + printk(", RO"); + printk(", blocksize %d, %d MB\n", tape->bs, tape->capacity / 1024); + } + + return 0; +} + + +/* + * returns 0, with id set if drive is detected + * -1, if drive detection failed + */ +static int pt_probe(struct pt_unit *tape) +{ + if (tape->drive == -1) { + for (tape->drive = 0; tape->drive <= 1; tape->drive++) + if (!pt_reset(tape)) + return pt_identify(tape); + } else { + if (!pt_reset(tape)) + return pt_identify(tape); + } + return -1; +} + +static int pt_detect(void) +{ + struct pt_unit *tape; + int specified = 0, found = 0; + int unit; + + printk("%s: %s version %s, major %d\n", name, name, PT_VERSION, major); + + specified = 0; + for (unit = 0; unit < PT_UNITS; unit++) { + struct pt_unit *tape = &pt[unit]; + tape->pi = &tape->pia; + atomic_set(&tape->available, 1); + tape->flags = 0; + tape->last_sense = 0; + tape->present = 0; + tape->bufptr = NULL; + tape->drive = DU[D_SLV]; + snprintf(tape->name, PT_NAMELEN, "%s%d", name, unit); + if (!DU[D_PRT]) + continue; + specified++; + if (pi_init(tape->pi, 0, DU[D_PRT], DU[D_MOD], DU[D_UNI], + DU[D_PRO], DU[D_DLY], pt_scratch, PI_PT, + verbose, tape->name)) { + if (!pt_probe(tape)) { + tape->present = 1; + found++; + } else + pi_release(tape->pi); + } + } + if (specified == 0) { + tape = pt; + if (pi_init(tape->pi, 1, -1, -1, -1, -1, -1, pt_scratch, + PI_PT, verbose, tape->name)) { + if (!pt_probe(tape)) { + tape->present = 1; + found++; + } else + pi_release(tape->pi); + } + + } + if (found) + return 0; + + printk("%s: No ATAPI tape drive detected\n", name); + return -1; +} + +static int pt_open(struct inode *inode, struct file *file) +{ + int unit = iminor(inode) & 0x7F; + struct pt_unit *tape = pt + unit; + int err; + + lock_kernel(); + if (unit >= PT_UNITS || (!tape->present)) { + unlock_kernel(); + return -ENODEV; + } + + err = -EBUSY; + if (!atomic_dec_and_test(&tape->available)) + goto out; + + pt_identify(tape); + + err = -ENODEV; + if (!(tape->flags & PT_MEDIA)) + goto out; + + err = -EROFS; + if ((!(tape->flags & PT_WRITE_OK)) && (file->f_mode & FMODE_WRITE)) + goto out; + + if (!(iminor(inode) & 128)) + tape->flags |= PT_REWIND; + + err = -ENOMEM; + tape->bufptr = kmalloc(PT_BUFSIZE, GFP_KERNEL); + if (tape->bufptr == NULL) { + printk("%s: buffer allocation failed\n", tape->name); + goto out; + } + + file->private_data = tape; + unlock_kernel(); + return 0; + +out: + atomic_inc(&tape->available); + unlock_kernel(); + return err; +} + +static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct pt_unit *tape = file->private_data; + struct mtop __user *p = (void __user *)arg; + struct mtop mtop; + + switch (cmd) { + case MTIOCTOP: + if (copy_from_user(&mtop, p, sizeof(struct mtop))) + return -EFAULT; + + switch (mtop.mt_op) { + + case MTREW: + lock_kernel(); + pt_rewind(tape); + unlock_kernel(); + return 0; + + case MTWEOF: + lock_kernel(); + pt_write_fm(tape); + unlock_kernel(); + return 0; + + default: + /* FIXME: rate limit ?? */ + printk(KERN_DEBUG "%s: Unimplemented mt_op %d\n", tape->name, + mtop.mt_op); + return -EINVAL; + } + + default: + return -ENOTTY; + } +} + +static int +pt_release(struct inode *inode, struct file *file) +{ + struct pt_unit *tape = file->private_data; + + if (atomic_read(&tape->available) > 1) + return -EINVAL; + + if (tape->flags & PT_WRITING) + pt_write_fm(tape); + + if (tape->flags & PT_REWIND) + pt_rewind(tape); + + kfree(tape->bufptr); + tape->bufptr = NULL; + + atomic_inc(&tape->available); + + return 0; + +} + +static ssize_t pt_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) +{ + struct pt_unit *tape = filp->private_data; + struct pi_adapter *pi = tape->pi; + char rd_cmd[12] = { ATAPI_READ_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + int k, n, r, p, s, t, b; + + if (!(tape->flags & (PT_READING | PT_WRITING))) { + tape->flags |= PT_READING; + if (pt_atapi(tape, rd_cmd, 0, NULL, "start read-ahead")) + return -EIO; + } else if (tape->flags & PT_WRITING) + return -EIO; + + if (tape->flags & PT_EOF) + return 0; + + t = 0; + + while (count > 0) { + + if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "read")) + return -EIO; + + n = count; + if (n > 32768) + n = 32768; /* max per command */ + b = (n - 1 + tape->bs) / tape->bs; + n = b * tape->bs; /* rounded up to even block */ + + rd_cmd[4] = b; + + r = pt_command(tape, rd_cmd, n, "read"); + + mdelay(1); + + if (r) { + pt_req_sense(tape, 0); + return -EIO; + } + + while (1) { + + r = pt_wait(tape, STAT_BUSY, + STAT_DRQ | STAT_ERR | STAT_READY, + DBMSG("read DRQ"), ""); + + if (r & STAT_SENSE) { + pi_disconnect(pi); + pt_req_sense(tape, 0); + return -EIO; + } + + if (r) + tape->flags |= PT_EOF; + + s = read_reg(pi, 7); + + if (!(s & STAT_DRQ)) + break; + + n = (read_reg(pi, 4) + 256 * read_reg(pi, 5)); + p = (read_reg(pi, 2) & 3); + if (p != 2) { + pi_disconnect(pi); + printk("%s: Phase error on read: %d\n", tape->name, + p); + return -EIO; + } + + while (n > 0) { + k = n; + if (k > PT_BUFSIZE) + k = PT_BUFSIZE; + pi_read_block(pi, tape->bufptr, k); + n -= k; + b = k; + if (b > count) + b = count; + if (copy_to_user(buf + t, tape->bufptr, b)) { + pi_disconnect(pi); + return -EFAULT; + } + t += b; + count -= b; + } + + } + pi_disconnect(pi); + if (tape->flags & PT_EOF) + break; + } + + return t; + +} + +static ssize_t pt_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) +{ + struct pt_unit *tape = filp->private_data; + struct pi_adapter *pi = tape->pi; + char wr_cmd[12] = { ATAPI_WRITE_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + int k, n, r, p, s, t, b; + + if (!(tape->flags & PT_WRITE_OK)) + return -EROFS; + + if (!(tape->flags & (PT_READING | PT_WRITING))) { + tape->flags |= PT_WRITING; + if (pt_atapi + (tape, wr_cmd, 0, NULL, "start buffer-available mode")) + return -EIO; + } else if (tape->flags & PT_READING) + return -EIO; + + if (tape->flags & PT_EOF) + return -ENOSPC; + + t = 0; + + while (count > 0) { + + if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "write")) + return -EIO; + + n = count; + if (n > 32768) + n = 32768; /* max per command */ + b = (n - 1 + tape->bs) / tape->bs; + n = b * tape->bs; /* rounded up to even block */ + + wr_cmd[4] = b; + + r = pt_command(tape, wr_cmd, n, "write"); + + mdelay(1); + + if (r) { /* error delivering command only */ + pt_req_sense(tape, 0); + return -EIO; + } + + while (1) { + + r = pt_wait(tape, STAT_BUSY, + STAT_DRQ | STAT_ERR | STAT_READY, + DBMSG("write DRQ"), NULL); + + if (r & STAT_SENSE) { + pi_disconnect(pi); + pt_req_sense(tape, 0); + return -EIO; + } + + if (r) + tape->flags |= PT_EOF; + + s = read_reg(pi, 7); + + if (!(s & STAT_DRQ)) + break; + + n = (read_reg(pi, 4) + 256 * read_reg(pi, 5)); + p = (read_reg(pi, 2) & 3); + if (p != 0) { + pi_disconnect(pi); + printk("%s: Phase error on write: %d \n", + tape->name, p); + return -EIO; + } + + while (n > 0) { + k = n; + if (k > PT_BUFSIZE) + k = PT_BUFSIZE; + b = k; + if (b > count) + b = count; + if (copy_from_user(tape->bufptr, buf + t, b)) { + pi_disconnect(pi); + return -EFAULT; + } + pi_write_block(pi, tape->bufptr, k); + t += b; + count -= b; + n -= k; + } + + } + pi_disconnect(pi); + if (tape->flags & PT_EOF) + break; + } + + return t; +} + +static int __init pt_init(void) +{ + int unit; + int err; + + if (disable) { + err = -EINVAL; + goto out; + } + + if (pt_detect()) { + err = -ENODEV; + goto out; + } + + err = register_chrdev(major, name, &pt_fops); + if (err < 0) { + printk("pt_init: unable to get major number %d\n", major); + for (unit = 0; unit < PT_UNITS; unit++) + if (pt[unit].present) + pi_release(pt[unit].pi); + goto out; + } + major = err; + pt_class = class_create(THIS_MODULE, "pt"); + if (IS_ERR(pt_class)) { + err = PTR_ERR(pt_class); + goto out_chrdev; + } + + for (unit = 0; unit < PT_UNITS; unit++) + if (pt[unit].present) { + device_create(pt_class, NULL, MKDEV(major, unit), NULL, + "pt%d", unit); + device_create(pt_class, NULL, MKDEV(major, unit + 128), + NULL, "pt%dn", unit); + } + goto out; + +out_chrdev: + unregister_chrdev(major, "pt"); +out: + return err; +} + +static void __exit pt_exit(void) +{ + int unit; + for (unit = 0; unit < PT_UNITS; unit++) + if (pt[unit].present) { + device_destroy(pt_class, MKDEV(major, unit)); + device_destroy(pt_class, MKDEV(major, unit + 128)); + } + class_destroy(pt_class); + unregister_chrdev(major, name); + for (unit = 0; unit < PT_UNITS; unit++) + if (pt[unit].present) + pi_release(pt[unit].pi); +} + +MODULE_LICENSE("GPL"); +module_init(pt_init) +module_exit(pt_exit) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c new file mode 100644 index 0000000..dc7a8c3 --- /dev/null +++ b/drivers/block/pktcdvd.c @@ -0,0 +1,3128 @@ +/* + * Copyright (C) 2000 Jens Axboe <axboe@suse.de> + * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com> + * Copyright (C) 2006 Thomas Maier <balagi@justmail.de> + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and + * DVD-RAM devices. + * + * Theory of operation: + * + * At the lowest level, there is the standard driver for the CD/DVD device, + * typically ide-cd.c or sr.c. This driver can handle read and write requests, + * but it doesn't know anything about the special restrictions that apply to + * packet writing. One restriction is that write requests must be aligned to + * packet boundaries on the physical media, and the size of a write request + * must be equal to the packet size. Another restriction is that a + * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read + * command, if the previous command was a write. + * + * The purpose of the packet writing driver is to hide these restrictions from + * higher layers, such as file systems, and present a block device that can be + * randomly read and written using 2kB-sized blocks. + * + * The lowest layer in the packet writing driver is the packet I/O scheduler. + * Its data is defined by the struct packet_iosched and includes two bio + * queues with pending read and write requests. These queues are processed + * by the pkt_iosched_process_queue() function. The write requests in this + * queue are already properly aligned and sized. This layer is responsible for + * issuing the flush cache commands and scheduling the I/O in a good order. + * + * The next layer transforms unaligned write requests to aligned writes. This + * transformation requires reading missing pieces of data from the underlying + * block device, assembling the pieces to full packets and queuing them to the + * packet I/O scheduler. + * + * At the top layer there is a custom make_request_fn function that forwards + * read requests directly to the iosched queue and puts write requests in the + * unaligned write queue. A kernel thread performs the necessary read + * gathering to convert the unaligned writes to aligned writes and then feeds + * them to the packet I/O scheduler. + * + *************************************************************************/ + +#include <linux/pktcdvd.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/errno.h> +#include <linux/spinlock.h> +#include <linux/file.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/miscdevice.h> +#include <linux/freezer.h> +#include <linux/mutex.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_ioctl.h> +#include <scsi/scsi.h> +#include <linux/debugfs.h> +#include <linux/device.h> + +#include <asm/uaccess.h> + +#define DRIVER_NAME "pktcdvd" + +#if PACKET_DEBUG +#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) +#else +#define DPRINTK(fmt, args...) +#endif + +#if PACKET_DEBUG > 1 +#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) +#else +#define VPRINTK(fmt, args...) +#endif + +#define MAX_SPEED 0xffff + +#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1)) + +static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; +static struct proc_dir_entry *pkt_proc; +static int pktdev_major; +static int write_congestion_on = PKT_WRITE_CONGESTION_ON; +static int write_congestion_off = PKT_WRITE_CONGESTION_OFF; +static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */ +static mempool_t *psd_pool; + +static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */ +static struct dentry *pkt_debugfs_root = NULL; /* /debug/pktcdvd */ + +/* forward declaration */ +static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev); +static int pkt_remove_dev(dev_t pkt_dev); +static int pkt_seq_show(struct seq_file *m, void *p); + + + +/* + * create and register a pktcdvd kernel object. + */ +static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd, + const char* name, + struct kobject* parent, + struct kobj_type* ktype) +{ + struct pktcdvd_kobj *p; + int error; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return NULL; + p->pd = pd; + error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name); + if (error) { + kobject_put(&p->kobj); + return NULL; + } + kobject_uevent(&p->kobj, KOBJ_ADD); + return p; +} +/* + * remove a pktcdvd kernel object. + */ +static void pkt_kobj_remove(struct pktcdvd_kobj *p) +{ + if (p) + kobject_put(&p->kobj); +} +/* + * default release function for pktcdvd kernel objects. + */ +static void pkt_kobj_release(struct kobject *kobj) +{ + kfree(to_pktcdvdkobj(kobj)); +} + + +/********************************************************** + * + * sysfs interface for pktcdvd + * by (C) 2006 Thomas Maier <balagi@justmail.de> + * + **********************************************************/ + +#define DEF_ATTR(_obj,_name,_mode) \ + static struct attribute _obj = { .name = _name, .mode = _mode } + +/********************************************************** + /sys/class/pktcdvd/pktcdvd[0-7]/ + stat/reset + stat/packets_started + stat/packets_finished + stat/kb_written + stat/kb_read + stat/kb_read_gather + write_queue/size + write_queue/congestion_off + write_queue/congestion_on + **********************************************************/ + +DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200); +DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444); +DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444); +DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444); +DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444); +DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444); + +static struct attribute *kobj_pkt_attrs_stat[] = { + &kobj_pkt_attr_st1, + &kobj_pkt_attr_st2, + &kobj_pkt_attr_st3, + &kobj_pkt_attr_st4, + &kobj_pkt_attr_st5, + &kobj_pkt_attr_st6, + NULL +}; + +DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444); +DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644); +DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644); + +static struct attribute *kobj_pkt_attrs_wqueue[] = { + &kobj_pkt_attr_wq1, + &kobj_pkt_attr_wq2, + &kobj_pkt_attr_wq3, + NULL +}; + +static ssize_t kobj_pkt_show(struct kobject *kobj, + struct attribute *attr, char *data) +{ + struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; + int n = 0; + int v; + if (strcmp(attr->name, "packets_started") == 0) { + n = sprintf(data, "%lu\n", pd->stats.pkt_started); + + } else if (strcmp(attr->name, "packets_finished") == 0) { + n = sprintf(data, "%lu\n", pd->stats.pkt_ended); + + } else if (strcmp(attr->name, "kb_written") == 0) { + n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1); + + } else if (strcmp(attr->name, "kb_read") == 0) { + n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1); + + } else if (strcmp(attr->name, "kb_read_gather") == 0) { + n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1); + + } else if (strcmp(attr->name, "size") == 0) { + spin_lock(&pd->lock); + v = pd->bio_queue_size; + spin_unlock(&pd->lock); + n = sprintf(data, "%d\n", v); + + } else if (strcmp(attr->name, "congestion_off") == 0) { + spin_lock(&pd->lock); + v = pd->write_congestion_off; + spin_unlock(&pd->lock); + n = sprintf(data, "%d\n", v); + + } else if (strcmp(attr->name, "congestion_on") == 0) { + spin_lock(&pd->lock); + v = pd->write_congestion_on; + spin_unlock(&pd->lock); + n = sprintf(data, "%d\n", v); + } + return n; +} + +static void init_write_congestion_marks(int* lo, int* hi) +{ + if (*hi > 0) { + *hi = max(*hi, 500); + *hi = min(*hi, 1000000); + if (*lo <= 0) + *lo = *hi - 100; + else { + *lo = min(*lo, *hi - 100); + *lo = max(*lo, 100); + } + } else { + *hi = -1; + *lo = -1; + } +} + +static ssize_t kobj_pkt_store(struct kobject *kobj, + struct attribute *attr, + const char *data, size_t len) +{ + struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; + int val; + + if (strcmp(attr->name, "reset") == 0 && len > 0) { + pd->stats.pkt_started = 0; + pd->stats.pkt_ended = 0; + pd->stats.secs_w = 0; + pd->stats.secs_rg = 0; + pd->stats.secs_r = 0; + + } else if (strcmp(attr->name, "congestion_off") == 0 + && sscanf(data, "%d", &val) == 1) { + spin_lock(&pd->lock); + pd->write_congestion_off = val; + init_write_congestion_marks(&pd->write_congestion_off, + &pd->write_congestion_on); + spin_unlock(&pd->lock); + + } else if (strcmp(attr->name, "congestion_on") == 0 + && sscanf(data, "%d", &val) == 1) { + spin_lock(&pd->lock); + pd->write_congestion_on = val; + init_write_congestion_marks(&pd->write_congestion_off, + &pd->write_congestion_on); + spin_unlock(&pd->lock); + } + return len; +} + +static struct sysfs_ops kobj_pkt_ops = { + .show = kobj_pkt_show, + .store = kobj_pkt_store +}; +static struct kobj_type kobj_pkt_type_stat = { + .release = pkt_kobj_release, + .sysfs_ops = &kobj_pkt_ops, + .default_attrs = kobj_pkt_attrs_stat +}; +static struct kobj_type kobj_pkt_type_wqueue = { + .release = pkt_kobj_release, + .sysfs_ops = &kobj_pkt_ops, + .default_attrs = kobj_pkt_attrs_wqueue +}; + +static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) +{ + if (class_pktcdvd) { + pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL, + "%s", pd->name); + if (IS_ERR(pd->dev)) + pd->dev = NULL; + } + if (pd->dev) { + pd->kobj_stat = pkt_kobj_create(pd, "stat", + &pd->dev->kobj, + &kobj_pkt_type_stat); + pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue", + &pd->dev->kobj, + &kobj_pkt_type_wqueue); + } +} + +static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) +{ + pkt_kobj_remove(pd->kobj_stat); + pkt_kobj_remove(pd->kobj_wqueue); + if (class_pktcdvd) + device_destroy(class_pktcdvd, pd->pkt_dev); +} + + +/******************************************************************** + /sys/class/pktcdvd/ + add map block device + remove unmap packet dev + device_map show mappings + *******************************************************************/ + +static void class_pktcdvd_release(struct class *cls) +{ + kfree(cls); +} +static ssize_t class_pktcdvd_show_map(struct class *c, char *data) +{ + int n = 0; + int idx; + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + for (idx = 0; idx < MAX_WRITERS; idx++) { + struct pktcdvd_device *pd = pkt_devs[idx]; + if (!pd) + continue; + n += sprintf(data+n, "%s %u:%u %u:%u\n", + pd->name, + MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), + MAJOR(pd->bdev->bd_dev), + MINOR(pd->bdev->bd_dev)); + } + mutex_unlock(&ctl_mutex); + return n; +} + +static ssize_t class_pktcdvd_store_add(struct class *c, const char *buf, + size_t count) +{ + unsigned int major, minor; + + if (sscanf(buf, "%u:%u", &major, &minor) == 2) { + /* pkt_setup_dev() expects caller to hold reference to self */ + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + pkt_setup_dev(MKDEV(major, minor), NULL); + + module_put(THIS_MODULE); + + return count; + } + + return -EINVAL; +} + +static ssize_t class_pktcdvd_store_remove(struct class *c, const char *buf, + size_t count) +{ + unsigned int major, minor; + if (sscanf(buf, "%u:%u", &major, &minor) == 2) { + pkt_remove_dev(MKDEV(major, minor)); + return count; + } + return -EINVAL; +} + +static struct class_attribute class_pktcdvd_attrs[] = { + __ATTR(add, 0200, NULL, class_pktcdvd_store_add), + __ATTR(remove, 0200, NULL, class_pktcdvd_store_remove), + __ATTR(device_map, 0444, class_pktcdvd_show_map, NULL), + __ATTR_NULL +}; + + +static int pkt_sysfs_init(void) +{ + int ret = 0; + + /* + * create control files in sysfs + * /sys/class/pktcdvd/... + */ + class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL); + if (!class_pktcdvd) + return -ENOMEM; + class_pktcdvd->name = DRIVER_NAME; + class_pktcdvd->owner = THIS_MODULE; + class_pktcdvd->class_release = class_pktcdvd_release; + class_pktcdvd->class_attrs = class_pktcdvd_attrs; + ret = class_register(class_pktcdvd); + if (ret) { + kfree(class_pktcdvd); + class_pktcdvd = NULL; + printk(DRIVER_NAME": failed to create class pktcdvd\n"); + return ret; + } + return 0; +} + +static void pkt_sysfs_cleanup(void) +{ + if (class_pktcdvd) + class_destroy(class_pktcdvd); + class_pktcdvd = NULL; +} + +/******************************************************************** + entries in debugfs + + /debugfs/pktcdvd[0-7]/ + info + + *******************************************************************/ + +static int pkt_debugfs_seq_show(struct seq_file *m, void *p) +{ + return pkt_seq_show(m, p); +} + +static int pkt_debugfs_fops_open(struct inode *inode, struct file *file) +{ + return single_open(file, pkt_debugfs_seq_show, inode->i_private); +} + +static const struct file_operations debug_fops = { + .open = pkt_debugfs_fops_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) +{ + if (!pkt_debugfs_root) + return; + pd->dfs_f_info = NULL; + pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); + if (IS_ERR(pd->dfs_d_root)) { + pd->dfs_d_root = NULL; + return; + } + pd->dfs_f_info = debugfs_create_file("info", S_IRUGO, + pd->dfs_d_root, pd, &debug_fops); + if (IS_ERR(pd->dfs_f_info)) { + pd->dfs_f_info = NULL; + return; + } +} + +static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) +{ + if (!pkt_debugfs_root) + return; + if (pd->dfs_f_info) + debugfs_remove(pd->dfs_f_info); + pd->dfs_f_info = NULL; + if (pd->dfs_d_root) + debugfs_remove(pd->dfs_d_root); + pd->dfs_d_root = NULL; +} + +static void pkt_debugfs_init(void) +{ + pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL); + if (IS_ERR(pkt_debugfs_root)) { + pkt_debugfs_root = NULL; + return; + } +} + +static void pkt_debugfs_cleanup(void) +{ + if (!pkt_debugfs_root) + return; + debugfs_remove(pkt_debugfs_root); + pkt_debugfs_root = NULL; +} + +/* ----------------------------------------------------------*/ + + +static void pkt_bio_finished(struct pktcdvd_device *pd) +{ + BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); + if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { + VPRINTK(DRIVER_NAME": queue empty\n"); + atomic_set(&pd->iosched.attention, 1); + wake_up(&pd->wqueue); + } +} + +static void pkt_bio_destructor(struct bio *bio) +{ + kfree(bio->bi_io_vec); + kfree(bio); +} + +static struct bio *pkt_bio_alloc(int nr_iovecs) +{ + struct bio_vec *bvl = NULL; + struct bio *bio; + + bio = kmalloc(sizeof(struct bio), GFP_KERNEL); + if (!bio) + goto no_bio; + bio_init(bio); + + bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL); + if (!bvl) + goto no_bvl; + + bio->bi_max_vecs = nr_iovecs; + bio->bi_io_vec = bvl; + bio->bi_destructor = pkt_bio_destructor; + + return bio; + + no_bvl: + kfree(bio); + no_bio: + return NULL; +} + +/* + * Allocate a packet_data struct + */ +static struct packet_data *pkt_alloc_packet_data(int frames) +{ + int i; + struct packet_data *pkt; + + pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL); + if (!pkt) + goto no_pkt; + + pkt->frames = frames; + pkt->w_bio = pkt_bio_alloc(frames); + if (!pkt->w_bio) + goto no_bio; + + for (i = 0; i < frames / FRAMES_PER_PAGE; i++) { + pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); + if (!pkt->pages[i]) + goto no_page; + } + + spin_lock_init(&pkt->lock); + + for (i = 0; i < frames; i++) { + struct bio *bio = pkt_bio_alloc(1); + if (!bio) + goto no_rd_bio; + pkt->r_bios[i] = bio; + } + + return pkt; + +no_rd_bio: + for (i = 0; i < frames; i++) { + struct bio *bio = pkt->r_bios[i]; + if (bio) + bio_put(bio); + } + +no_page: + for (i = 0; i < frames / FRAMES_PER_PAGE; i++) + if (pkt->pages[i]) + __free_page(pkt->pages[i]); + bio_put(pkt->w_bio); +no_bio: + kfree(pkt); +no_pkt: + return NULL; +} + +/* + * Free a packet_data struct + */ +static void pkt_free_packet_data(struct packet_data *pkt) +{ + int i; + + for (i = 0; i < pkt->frames; i++) { + struct bio *bio = pkt->r_bios[i]; + if (bio) + bio_put(bio); + } + for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++) + __free_page(pkt->pages[i]); + bio_put(pkt->w_bio); + kfree(pkt); +} + +static void pkt_shrink_pktlist(struct pktcdvd_device *pd) +{ + struct packet_data *pkt, *next; + + BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); + + list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { + pkt_free_packet_data(pkt); + } + INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); +} + +static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) +{ + struct packet_data *pkt; + + BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); + + while (nr_packets > 0) { + pkt = pkt_alloc_packet_data(pd->settings.size >> 2); + if (!pkt) { + pkt_shrink_pktlist(pd); + return 0; + } + pkt->id = nr_packets; + pkt->pd = pd; + list_add(&pkt->list, &pd->cdrw.pkt_free_list); + nr_packets--; + } + return 1; +} + +static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) +{ + struct rb_node *n = rb_next(&node->rb_node); + if (!n) + return NULL; + return rb_entry(n, struct pkt_rb_node, rb_node); +} + +static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) +{ + rb_erase(&node->rb_node, &pd->bio_queue); + mempool_free(node, pd->rb_pool); + pd->bio_queue_size--; + BUG_ON(pd->bio_queue_size < 0); +} + +/* + * Find the first node in the pd->bio_queue rb tree with a starting sector >= s. + */ +static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) +{ + struct rb_node *n = pd->bio_queue.rb_node; + struct rb_node *next; + struct pkt_rb_node *tmp; + + if (!n) { + BUG_ON(pd->bio_queue_size > 0); + return NULL; + } + + for (;;) { + tmp = rb_entry(n, struct pkt_rb_node, rb_node); + if (s <= tmp->bio->bi_sector) + next = n->rb_left; + else + next = n->rb_right; + if (!next) + break; + n = next; + } + + if (s > tmp->bio->bi_sector) { + tmp = pkt_rbtree_next(tmp); + if (!tmp) + return NULL; + } + BUG_ON(s > tmp->bio->bi_sector); + return tmp; +} + +/* + * Insert a node into the pd->bio_queue rb tree. + */ +static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) +{ + struct rb_node **p = &pd->bio_queue.rb_node; + struct rb_node *parent = NULL; + sector_t s = node->bio->bi_sector; + struct pkt_rb_node *tmp; + + while (*p) { + parent = *p; + tmp = rb_entry(parent, struct pkt_rb_node, rb_node); + if (s < tmp->bio->bi_sector) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + rb_link_node(&node->rb_node, parent, p); + rb_insert_color(&node->rb_node, &pd->bio_queue); + pd->bio_queue_size++; +} + +/* + * Add a bio to a single linked list defined by its head and tail pointers. + */ +static void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail) +{ + bio->bi_next = NULL; + if (*list_tail) { + BUG_ON((*list_head) == NULL); + (*list_tail)->bi_next = bio; + (*list_tail) = bio; + } else { + BUG_ON((*list_head) != NULL); + (*list_head) = bio; + (*list_tail) = bio; + } +} + +/* + * Remove and return the first bio from a single linked list defined by its + * head and tail pointers. + */ +static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio **list_tail) +{ + struct bio *bio; + + if (*list_head == NULL) + return NULL; + + bio = *list_head; + *list_head = bio->bi_next; + if (*list_head == NULL) + *list_tail = NULL; + + bio->bi_next = NULL; + return bio; +} + +/* + * Send a packet_command to the underlying block device and + * wait for completion. + */ +static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) +{ + struct request_queue *q = bdev_get_queue(pd->bdev); + struct request *rq; + int ret = 0; + + rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? + WRITE : READ, __GFP_WAIT); + + if (cgc->buflen) { + if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT)) + goto out; + } + + rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]); + memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); + + rq->timeout = 60*HZ; + rq->cmd_type = REQ_TYPE_BLOCK_PC; + rq->cmd_flags |= REQ_HARDBARRIER; + if (cgc->quiet) + rq->cmd_flags |= REQ_QUIET; + + blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); + if (rq->errors) + ret = -EIO; +out: + blk_put_request(rq); + return ret; +} + +/* + * A generic sense dump / resolve mechanism should be implemented across + * all ATAPI + SCSI devices. + */ +static void pkt_dump_sense(struct packet_command *cgc) +{ + static char *info[9] = { "No sense", "Recovered error", "Not ready", + "Medium error", "Hardware error", "Illegal request", + "Unit attention", "Data protect", "Blank check" }; + int i; + struct request_sense *sense = cgc->sense; + + printk(DRIVER_NAME":"); + for (i = 0; i < CDROM_PACKET_SIZE; i++) + printk(" %02x", cgc->cmd[i]); + printk(" - "); + + if (sense == NULL) { + printk("no sense\n"); + return; + } + + printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq); + + if (sense->sense_key > 8) { + printk(" (INVALID)\n"); + return; + } + + printk(" (%s)\n", info[sense->sense_key]); +} + +/* + * flush the drive cache to media + */ +static int pkt_flush_cache(struct pktcdvd_device *pd) +{ + struct packet_command cgc; + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.cmd[0] = GPCMD_FLUSH_CACHE; + cgc.quiet = 1; + + /* + * the IMMED bit -- we default to not setting it, although that + * would allow a much faster close, this is safer + */ +#if 0 + cgc.cmd[1] = 1 << 1; +#endif + return pkt_generic_packet(pd, &cgc); +} + +/* + * speed is given as the normal factor, e.g. 4 for 4x + */ +static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, + unsigned write_speed, unsigned read_speed) +{ + struct packet_command cgc; + struct request_sense sense; + int ret; + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.sense = &sense; + cgc.cmd[0] = GPCMD_SET_SPEED; + cgc.cmd[2] = (read_speed >> 8) & 0xff; + cgc.cmd[3] = read_speed & 0xff; + cgc.cmd[4] = (write_speed >> 8) & 0xff; + cgc.cmd[5] = write_speed & 0xff; + + if ((ret = pkt_generic_packet(pd, &cgc))) + pkt_dump_sense(&cgc); + + return ret; +} + +/* + * Queue a bio for processing by the low-level CD device. Must be called + * from process context. + */ +static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) +{ + spin_lock(&pd->iosched.lock); + if (bio_data_dir(bio) == READ) { + pkt_add_list_last(bio, &pd->iosched.read_queue, + &pd->iosched.read_queue_tail); + } else { + pkt_add_list_last(bio, &pd->iosched.write_queue, + &pd->iosched.write_queue_tail); + } + spin_unlock(&pd->iosched.lock); + + atomic_set(&pd->iosched.attention, 1); + wake_up(&pd->wqueue); +} + +/* + * Process the queued read/write requests. This function handles special + * requirements for CDRW drives: + * - A cache flush command must be inserted before a read request if the + * previous request was a write. + * - Switching between reading and writing is slow, so don't do it more often + * than necessary. + * - Optimize for throughput at the expense of latency. This means that streaming + * writes will never be interrupted by a read, but if the drive has to seek + * before the next write, switch to reading instead if there are any pending + * read requests. + * - Set the read speed according to current usage pattern. When only reading + * from the device, it's best to use the highest possible read speed, but + * when switching often between reading and writing, it's better to have the + * same read and write speeds. + */ +static void pkt_iosched_process_queue(struct pktcdvd_device *pd) +{ + + if (atomic_read(&pd->iosched.attention) == 0) + return; + atomic_set(&pd->iosched.attention, 0); + + for (;;) { + struct bio *bio; + int reads_queued, writes_queued; + + spin_lock(&pd->iosched.lock); + reads_queued = (pd->iosched.read_queue != NULL); + writes_queued = (pd->iosched.write_queue != NULL); + spin_unlock(&pd->iosched.lock); + + if (!reads_queued && !writes_queued) + break; + + if (pd->iosched.writing) { + int need_write_seek = 1; + spin_lock(&pd->iosched.lock); + bio = pd->iosched.write_queue; + spin_unlock(&pd->iosched.lock); + if (bio && (bio->bi_sector == pd->iosched.last_write)) + need_write_seek = 0; + if (need_write_seek && reads_queued) { + if (atomic_read(&pd->cdrw.pending_bios) > 0) { + VPRINTK(DRIVER_NAME": write, waiting\n"); + break; + } + pkt_flush_cache(pd); + pd->iosched.writing = 0; + } + } else { + if (!reads_queued && writes_queued) { + if (atomic_read(&pd->cdrw.pending_bios) > 0) { + VPRINTK(DRIVER_NAME": read, waiting\n"); + break; + } + pd->iosched.writing = 1; + } + } + + spin_lock(&pd->iosched.lock); + if (pd->iosched.writing) { + bio = pkt_get_list_first(&pd->iosched.write_queue, + &pd->iosched.write_queue_tail); + } else { + bio = pkt_get_list_first(&pd->iosched.read_queue, + &pd->iosched.read_queue_tail); + } + spin_unlock(&pd->iosched.lock); + + if (!bio) + continue; + + if (bio_data_dir(bio) == READ) + pd->iosched.successive_reads += bio->bi_size >> 10; + else { + pd->iosched.successive_reads = 0; + pd->iosched.last_write = bio->bi_sector + bio_sectors(bio); + } + if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { + if (pd->read_speed == pd->write_speed) { + pd->read_speed = MAX_SPEED; + pkt_set_speed(pd, pd->write_speed, pd->read_speed); + } + } else { + if (pd->read_speed != pd->write_speed) { + pd->read_speed = pd->write_speed; + pkt_set_speed(pd, pd->write_speed, pd->read_speed); + } + } + + atomic_inc(&pd->cdrw.pending_bios); + generic_make_request(bio); + } +} + +/* + * Special care is needed if the underlying block device has a small + * max_phys_segments value. + */ +static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) +{ + if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { + /* + * The cdrom device can handle one segment/frame + */ + clear_bit(PACKET_MERGE_SEGS, &pd->flags); + return 0; + } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) { + /* + * We can handle this case at the expense of some extra memory + * copies during write operations + */ + set_bit(PACKET_MERGE_SEGS, &pd->flags); + return 0; + } else { + printk(DRIVER_NAME": cdrom max_phys_segments too small\n"); + return -EIO; + } +} + +/* + * Copy CD_FRAMESIZE bytes from src_bio into a destination page + */ +static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs) +{ + unsigned int copy_size = CD_FRAMESIZE; + + while (copy_size > 0) { + struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg); + void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) + + src_bvl->bv_offset + offs; + void *vto = page_address(dst_page) + dst_offs; + int len = min_t(int, copy_size, src_bvl->bv_len - offs); + + BUG_ON(len < 0); + memcpy(vto, vfrom, len); + kunmap_atomic(vfrom, KM_USER0); + + seg++; + offs = 0; + dst_offs += len; + copy_size -= len; + } +} + +/* + * Copy all data for this packet to pkt->pages[], so that + * a) The number of required segments for the write bio is minimized, which + * is necessary for some scsi controllers. + * b) The data can be used as cache to avoid read requests if we receive a + * new write request for the same zone. + */ +static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec) +{ + int f, p, offs; + + /* Copy all data to pkt->pages[] */ + p = 0; + offs = 0; + for (f = 0; f < pkt->frames; f++) { + if (bvec[f].bv_page != pkt->pages[p]) { + void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset; + void *vto = page_address(pkt->pages[p]) + offs; + memcpy(vto, vfrom, CD_FRAMESIZE); + kunmap_atomic(vfrom, KM_USER0); + bvec[f].bv_page = pkt->pages[p]; + bvec[f].bv_offset = offs; + } else { + BUG_ON(bvec[f].bv_offset != offs); + } + offs += CD_FRAMESIZE; + if (offs >= PAGE_SIZE) { + offs = 0; + p++; + } + } +} + +static void pkt_end_io_read(struct bio *bio, int err) +{ + struct packet_data *pkt = bio->bi_private; + struct pktcdvd_device *pd = pkt->pd; + BUG_ON(!pd); + + VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio, + (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err); + + if (err) + atomic_inc(&pkt->io_errors); + if (atomic_dec_and_test(&pkt->io_wait)) { + atomic_inc(&pkt->run_sm); + wake_up(&pd->wqueue); + } + pkt_bio_finished(pd); +} + +static void pkt_end_io_packet_write(struct bio *bio, int err) +{ + struct packet_data *pkt = bio->bi_private; + struct pktcdvd_device *pd = pkt->pd; + BUG_ON(!pd); + + VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err); + + pd->stats.pkt_ended++; + + pkt_bio_finished(pd); + atomic_dec(&pkt->io_wait); + atomic_inc(&pkt->run_sm); + wake_up(&pd->wqueue); +} + +/* + * Schedule reads for the holes in a packet + */ +static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) +{ + int frames_read = 0; + struct bio *bio; + int f; + char written[PACKET_MAX_SIZE]; + + BUG_ON(!pkt->orig_bios); + + atomic_set(&pkt->io_wait, 0); + atomic_set(&pkt->io_errors, 0); + + /* + * Figure out which frames we need to read before we can write. + */ + memset(written, 0, sizeof(written)); + spin_lock(&pkt->lock); + for (bio = pkt->orig_bios; bio; bio = bio->bi_next) { + int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); + int num_frames = bio->bi_size / CD_FRAMESIZE; + pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); + BUG_ON(first_frame < 0); + BUG_ON(first_frame + num_frames > pkt->frames); + for (f = first_frame; f < first_frame + num_frames; f++) + written[f] = 1; + } + spin_unlock(&pkt->lock); + + if (pkt->cache_valid) { + VPRINTK("pkt_gather_data: zone %llx cached\n", + (unsigned long long)pkt->sector); + goto out_account; + } + + /* + * Schedule reads for missing parts of the packet. + */ + for (f = 0; f < pkt->frames; f++) { + struct bio_vec *vec; + + int p, offset; + if (written[f]) + continue; + bio = pkt->r_bios[f]; + vec = bio->bi_io_vec; + bio_init(bio); + bio->bi_max_vecs = 1; + bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); + bio->bi_bdev = pd->bdev; + bio->bi_end_io = pkt_end_io_read; + bio->bi_private = pkt; + bio->bi_io_vec = vec; + bio->bi_destructor = pkt_bio_destructor; + + p = (f * CD_FRAMESIZE) / PAGE_SIZE; + offset = (f * CD_FRAMESIZE) % PAGE_SIZE; + VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n", + f, pkt->pages[p], offset); + if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) + BUG(); + + atomic_inc(&pkt->io_wait); + bio->bi_rw = READ; + pkt_queue_bio(pd, bio); + frames_read++; + } + +out_account: + VPRINTK("pkt_gather_data: need %d frames for zone %llx\n", + frames_read, (unsigned long long)pkt->sector); + pd->stats.pkt_started++; + pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); +} + +/* + * Find a packet matching zone, or the least recently used packet if + * there is no match. + */ +static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) +{ + struct packet_data *pkt; + + list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { + if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { + list_del_init(&pkt->list); + if (pkt->sector != zone) + pkt->cache_valid = 0; + return pkt; + } + } + BUG(); + return NULL; +} + +static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) +{ + if (pkt->cache_valid) { + list_add(&pkt->list, &pd->cdrw.pkt_free_list); + } else { + list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); + } +} + +/* + * recover a failed write, query for relocation if possible + * + * returns 1 if recovery is possible, or 0 if not + * + */ +static int pkt_start_recovery(struct packet_data *pkt) +{ + /* + * FIXME. We need help from the file system to implement + * recovery handling. + */ + return 0; +#if 0 + struct request *rq = pkt->rq; + struct pktcdvd_device *pd = rq->rq_disk->private_data; + struct block_device *pkt_bdev; + struct super_block *sb = NULL; + unsigned long old_block, new_block; + sector_t new_sector; + + pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev)); + if (pkt_bdev) { + sb = get_super(pkt_bdev); + bdput(pkt_bdev); + } + + if (!sb) + return 0; + + if (!sb->s_op || !sb->s_op->relocate_blocks) + goto out; + + old_block = pkt->sector / (CD_FRAMESIZE >> 9); + if (sb->s_op->relocate_blocks(sb, old_block, &new_block)) + goto out; + + new_sector = new_block * (CD_FRAMESIZE >> 9); + pkt->sector = new_sector; + + pkt->bio->bi_sector = new_sector; + pkt->bio->bi_next = NULL; + pkt->bio->bi_flags = 1 << BIO_UPTODATE; + pkt->bio->bi_idx = 0; + + BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW)); + BUG_ON(pkt->bio->bi_vcnt != pkt->frames); + BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE); + BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write); + BUG_ON(pkt->bio->bi_private != pkt); + + drop_super(sb); + return 1; + +out: + drop_super(sb); + return 0; +#endif +} + +static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state) +{ +#if PACKET_DEBUG > 1 + static const char *state_name[] = { + "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED" + }; + enum packet_data_state old_state = pkt->state; + VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector, + state_name[old_state], state_name[state]); +#endif + pkt->state = state; +} + +/* + * Scan the work queue to see if we can start a new packet. + * returns non-zero if any work was done. + */ +static int pkt_handle_queue(struct pktcdvd_device *pd) +{ + struct packet_data *pkt, *p; + struct bio *bio = NULL; + sector_t zone = 0; /* Suppress gcc warning */ + struct pkt_rb_node *node, *first_node; + struct rb_node *n; + int wakeup; + + VPRINTK("handle_queue\n"); + + atomic_set(&pd->scan_queue, 0); + + if (list_empty(&pd->cdrw.pkt_free_list)) { + VPRINTK("handle_queue: no pkt\n"); + return 0; + } + + /* + * Try to find a zone we are not already working on. + */ + spin_lock(&pd->lock); + first_node = pkt_rbtree_find(pd, pd->current_sector); + if (!first_node) { + n = rb_first(&pd->bio_queue); + if (n) + first_node = rb_entry(n, struct pkt_rb_node, rb_node); + } + node = first_node; + while (node) { + bio = node->bio; + zone = ZONE(bio->bi_sector, pd); + list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { + if (p->sector == zone) { + bio = NULL; + goto try_next_bio; + } + } + break; +try_next_bio: + node = pkt_rbtree_next(node); + if (!node) { + n = rb_first(&pd->bio_queue); + if (n) + node = rb_entry(n, struct pkt_rb_node, rb_node); + } + if (node == first_node) + node = NULL; + } + spin_unlock(&pd->lock); + if (!bio) { + VPRINTK("handle_queue: no bio\n"); + return 0; + } + + pkt = pkt_get_packet_data(pd, zone); + + pd->current_sector = zone + pd->settings.size; + pkt->sector = zone; + BUG_ON(pkt->frames != pd->settings.size >> 2); + pkt->write_size = 0; + + /* + * Scan work queue for bios in the same zone and link them + * to this packet. + */ + spin_lock(&pd->lock); + VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone); + while ((node = pkt_rbtree_find(pd, zone)) != NULL) { + bio = node->bio; + VPRINTK("pkt_handle_queue: found zone=%llx\n", + (unsigned long long)ZONE(bio->bi_sector, pd)); + if (ZONE(bio->bi_sector, pd) != zone) + break; + pkt_rbtree_erase(pd, node); + spin_lock(&pkt->lock); + pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail); + pkt->write_size += bio->bi_size / CD_FRAMESIZE; + spin_unlock(&pkt->lock); + } + /* check write congestion marks, and if bio_queue_size is + below, wake up any waiters */ + wakeup = (pd->write_congestion_on > 0 + && pd->bio_queue_size <= pd->write_congestion_off); + spin_unlock(&pd->lock); + if (wakeup) + clear_bdi_congested(&pd->disk->queue->backing_dev_info, WRITE); + + pkt->sleep_time = max(PACKET_WAIT_TIME, 1); + pkt_set_state(pkt, PACKET_WAITING_STATE); + atomic_set(&pkt->run_sm, 1); + + spin_lock(&pd->cdrw.active_list_lock); + list_add(&pkt->list, &pd->cdrw.pkt_active_list); + spin_unlock(&pd->cdrw.active_list_lock); + + return 1; +} + +/* + * Assemble a bio to write one packet and queue the bio for processing + * by the underlying block device. + */ +static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) +{ + struct bio *bio; + int f; + int frames_write; + struct bio_vec *bvec = pkt->w_bio->bi_io_vec; + + for (f = 0; f < pkt->frames; f++) { + bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; + bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE; + } + + /* + * Fill-in bvec with data from orig_bios. + */ + frames_write = 0; + spin_lock(&pkt->lock); + for (bio = pkt->orig_bios; bio; bio = bio->bi_next) { + int segment = bio->bi_idx; + int src_offs = 0; + int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); + int num_frames = bio->bi_size / CD_FRAMESIZE; + BUG_ON(first_frame < 0); + BUG_ON(first_frame + num_frames > pkt->frames); + for (f = first_frame; f < first_frame + num_frames; f++) { + struct bio_vec *src_bvl = bio_iovec_idx(bio, segment); + + while (src_offs >= src_bvl->bv_len) { + src_offs -= src_bvl->bv_len; + segment++; + BUG_ON(segment >= bio->bi_vcnt); + src_bvl = bio_iovec_idx(bio, segment); + } + + if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) { + bvec[f].bv_page = src_bvl->bv_page; + bvec[f].bv_offset = src_bvl->bv_offset + src_offs; + } else { + pkt_copy_bio_data(bio, segment, src_offs, + bvec[f].bv_page, bvec[f].bv_offset); + } + src_offs += CD_FRAMESIZE; + frames_write++; + } + } + pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE); + spin_unlock(&pkt->lock); + + VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n", + frames_write, (unsigned long long)pkt->sector); + BUG_ON(frames_write != pkt->write_size); + + if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) { + pkt_make_local_copy(pkt, bvec); + pkt->cache_valid = 1; + } else { + pkt->cache_valid = 0; + } + + /* Start the write request */ + bio_init(pkt->w_bio); + pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE; + pkt->w_bio->bi_sector = pkt->sector; + pkt->w_bio->bi_bdev = pd->bdev; + pkt->w_bio->bi_end_io = pkt_end_io_packet_write; + pkt->w_bio->bi_private = pkt; + pkt->w_bio->bi_io_vec = bvec; + pkt->w_bio->bi_destructor = pkt_bio_destructor; + for (f = 0; f < pkt->frames; f++) + if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) + BUG(); + VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt); + + atomic_set(&pkt->io_wait, 1); + pkt->w_bio->bi_rw = WRITE; + pkt_queue_bio(pd, pkt->w_bio); +} + +static void pkt_finish_packet(struct packet_data *pkt, int uptodate) +{ + struct bio *bio, *next; + + if (!uptodate) + pkt->cache_valid = 0; + + /* Finish all bios corresponding to this packet */ + bio = pkt->orig_bios; + while (bio) { + next = bio->bi_next; + bio->bi_next = NULL; + bio_endio(bio, uptodate ? 0 : -EIO); + bio = next; + } + pkt->orig_bios = pkt->orig_bios_tail = NULL; +} + +static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) +{ + int uptodate; + + VPRINTK("run_state_machine: pkt %d\n", pkt->id); + + for (;;) { + switch (pkt->state) { + case PACKET_WAITING_STATE: + if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0)) + return; + + pkt->sleep_time = 0; + pkt_gather_data(pd, pkt); + pkt_set_state(pkt, PACKET_READ_WAIT_STATE); + break; + + case PACKET_READ_WAIT_STATE: + if (atomic_read(&pkt->io_wait) > 0) + return; + + if (atomic_read(&pkt->io_errors) > 0) { + pkt_set_state(pkt, PACKET_RECOVERY_STATE); + } else { + pkt_start_write(pd, pkt); + } + break; + + case PACKET_WRITE_WAIT_STATE: + if (atomic_read(&pkt->io_wait) > 0) + return; + + if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) { + pkt_set_state(pkt, PACKET_FINISHED_STATE); + } else { + pkt_set_state(pkt, PACKET_RECOVERY_STATE); + } + break; + + case PACKET_RECOVERY_STATE: + if (pkt_start_recovery(pkt)) { + pkt_start_write(pd, pkt); + } else { + VPRINTK("No recovery possible\n"); + pkt_set_state(pkt, PACKET_FINISHED_STATE); + } + break; + + case PACKET_FINISHED_STATE: + uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags); + pkt_finish_packet(pkt, uptodate); + return; + + default: + BUG(); + break; + } + } +} + +static void pkt_handle_packets(struct pktcdvd_device *pd) +{ + struct packet_data *pkt, *next; + + VPRINTK("pkt_handle_packets\n"); + + /* + * Run state machine for active packets + */ + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + if (atomic_read(&pkt->run_sm) > 0) { + atomic_set(&pkt->run_sm, 0); + pkt_run_state_machine(pd, pkt); + } + } + + /* + * Move no longer active packets to the free list + */ + spin_lock(&pd->cdrw.active_list_lock); + list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { + if (pkt->state == PACKET_FINISHED_STATE) { + list_del(&pkt->list); + pkt_put_packet_data(pd, pkt); + pkt_set_state(pkt, PACKET_IDLE_STATE); + atomic_set(&pd->scan_queue, 1); + } + } + spin_unlock(&pd->cdrw.active_list_lock); +} + +static void pkt_count_states(struct pktcdvd_device *pd, int *states) +{ + struct packet_data *pkt; + int i; + + for (i = 0; i < PACKET_NUM_STATES; i++) + states[i] = 0; + + spin_lock(&pd->cdrw.active_list_lock); + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + states[pkt->state]++; + } + spin_unlock(&pd->cdrw.active_list_lock); +} + +/* + * kcdrwd is woken up when writes have been queued for one of our + * registered devices + */ +static int kcdrwd(void *foobar) +{ + struct pktcdvd_device *pd = foobar; + struct packet_data *pkt; + long min_sleep_time, residue; + + set_user_nice(current, -20); + set_freezable(); + + for (;;) { + DECLARE_WAITQUEUE(wait, current); + + /* + * Wait until there is something to do + */ + add_wait_queue(&pd->wqueue, &wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + /* Check if we need to run pkt_handle_queue */ + if (atomic_read(&pd->scan_queue) > 0) + goto work_to_do; + + /* Check if we need to run the state machine for some packet */ + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + if (atomic_read(&pkt->run_sm) > 0) + goto work_to_do; + } + + /* Check if we need to process the iosched queues */ + if (atomic_read(&pd->iosched.attention) != 0) + goto work_to_do; + + /* Otherwise, go to sleep */ + if (PACKET_DEBUG > 1) { + int states[PACKET_NUM_STATES]; + pkt_count_states(pd, states); + VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", + states[0], states[1], states[2], states[3], + states[4], states[5]); + } + + min_sleep_time = MAX_SCHEDULE_TIMEOUT; + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + if (pkt->sleep_time && pkt->sleep_time < min_sleep_time) + min_sleep_time = pkt->sleep_time; + } + + generic_unplug_device(bdev_get_queue(pd->bdev)); + + VPRINTK("kcdrwd: sleeping\n"); + residue = schedule_timeout(min_sleep_time); + VPRINTK("kcdrwd: wake up\n"); + + /* make swsusp happy with our thread */ + try_to_freeze(); + + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + if (!pkt->sleep_time) + continue; + pkt->sleep_time -= min_sleep_time - residue; + if (pkt->sleep_time <= 0) { + pkt->sleep_time = 0; + atomic_inc(&pkt->run_sm); + } + } + + if (kthread_should_stop()) + break; + } +work_to_do: + set_current_state(TASK_RUNNING); + remove_wait_queue(&pd->wqueue, &wait); + + if (kthread_should_stop()) + break; + + /* + * if pkt_handle_queue returns true, we can queue + * another request. + */ + while (pkt_handle_queue(pd)) + ; + + /* + * Handle packet state machine + */ + pkt_handle_packets(pd); + + /* + * Handle iosched queues + */ + pkt_iosched_process_queue(pd); + } + + return 0; +} + +static void pkt_print_settings(struct pktcdvd_device *pd) +{ + printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable"); + printk("%u blocks, ", pd->settings.size >> 2); + printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2'); +} + +static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control) +{ + memset(cgc->cmd, 0, sizeof(cgc->cmd)); + + cgc->cmd[0] = GPCMD_MODE_SENSE_10; + cgc->cmd[2] = page_code | (page_control << 6); + cgc->cmd[7] = cgc->buflen >> 8; + cgc->cmd[8] = cgc->buflen & 0xff; + cgc->data_direction = CGC_DATA_READ; + return pkt_generic_packet(pd, cgc); +} + +static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) +{ + memset(cgc->cmd, 0, sizeof(cgc->cmd)); + memset(cgc->buffer, 0, 2); + cgc->cmd[0] = GPCMD_MODE_SELECT_10; + cgc->cmd[1] = 0x10; /* PF */ + cgc->cmd[7] = cgc->buflen >> 8; + cgc->cmd[8] = cgc->buflen & 0xff; + cgc->data_direction = CGC_DATA_WRITE; + return pkt_generic_packet(pd, cgc); +} + +static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) +{ + struct packet_command cgc; + int ret; + + /* set up command and get the disc info */ + init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); + cgc.cmd[0] = GPCMD_READ_DISC_INFO; + cgc.cmd[8] = cgc.buflen = 2; + cgc.quiet = 1; + + if ((ret = pkt_generic_packet(pd, &cgc))) + return ret; + + /* not all drives have the same disc_info length, so requeue + * packet with the length the drive tells us it can supply + */ + cgc.buflen = be16_to_cpu(di->disc_information_length) + + sizeof(di->disc_information_length); + + if (cgc.buflen > sizeof(disc_information)) + cgc.buflen = sizeof(disc_information); + + cgc.cmd[8] = cgc.buflen; + return pkt_generic_packet(pd, &cgc); +} + +static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti) +{ + struct packet_command cgc; + int ret; + + init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); + cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; + cgc.cmd[1] = type & 3; + cgc.cmd[4] = (track & 0xff00) >> 8; + cgc.cmd[5] = track & 0xff; + cgc.cmd[8] = 8; + cgc.quiet = 1; + + if ((ret = pkt_generic_packet(pd, &cgc))) + return ret; + + cgc.buflen = be16_to_cpu(ti->track_information_length) + + sizeof(ti->track_information_length); + + if (cgc.buflen > sizeof(track_information)) + cgc.buflen = sizeof(track_information); + + cgc.cmd[8] = cgc.buflen; + return pkt_generic_packet(pd, &cgc); +} + +static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, + long *last_written) +{ + disc_information di; + track_information ti; + __u32 last_track; + int ret = -1; + + if ((ret = pkt_get_disc_info(pd, &di))) + return ret; + + last_track = (di.last_track_msb << 8) | di.last_track_lsb; + if ((ret = pkt_get_track_info(pd, last_track, 1, &ti))) + return ret; + + /* if this track is blank, try the previous. */ + if (ti.blank) { + last_track--; + if ((ret = pkt_get_track_info(pd, last_track, 1, &ti))) + return ret; + } + + /* if last recorded field is valid, return it. */ + if (ti.lra_v) { + *last_written = be32_to_cpu(ti.last_rec_address); + } else { + /* make it up instead */ + *last_written = be32_to_cpu(ti.track_start) + + be32_to_cpu(ti.track_size); + if (ti.free_blocks) + *last_written -= (be32_to_cpu(ti.free_blocks) + 7); + } + return 0; +} + +/* + * write mode select package based on pd->settings + */ +static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) +{ + struct packet_command cgc; + struct request_sense sense; + write_param_page *wp; + char buffer[128]; + int ret, size; + + /* doesn't apply to DVD+RW or DVD-RAM */ + if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) + return 0; + + memset(buffer, 0, sizeof(buffer)); + init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ); + cgc.sense = &sense; + if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { + pkt_dump_sense(&cgc); + return ret; + } + + size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff)); + pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff); + if (size > sizeof(buffer)) + size = sizeof(buffer); + + /* + * now get it all + */ + init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ); + cgc.sense = &sense; + if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { + pkt_dump_sense(&cgc); + return ret; + } + + /* + * write page is offset header + block descriptor length + */ + wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; + + wp->fp = pd->settings.fp; + wp->track_mode = pd->settings.track_mode; + wp->write_type = pd->settings.write_type; + wp->data_block_type = pd->settings.block_mode; + + wp->multi_session = 0; + +#ifdef PACKET_USE_LS + wp->link_size = 7; + wp->ls_v = 1; +#endif + + if (wp->data_block_type == PACKET_BLOCK_MODE1) { + wp->session_format = 0; + wp->subhdr2 = 0x20; + } else if (wp->data_block_type == PACKET_BLOCK_MODE2) { + wp->session_format = 0x20; + wp->subhdr2 = 8; +#if 0 + wp->mcn[0] = 0x80; + memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1); +#endif + } else { + /* + * paranoia + */ + printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type); + return 1; + } + wp->packet_size = cpu_to_be32(pd->settings.size >> 2); + + cgc.buflen = cgc.cmd[8] = size; + if ((ret = pkt_mode_select(pd, &cgc))) { + pkt_dump_sense(&cgc); + return ret; + } + + pkt_print_settings(pd); + return 0; +} + +/* + * 1 -- we can write to this track, 0 -- we can't + */ +static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) +{ + switch (pd->mmc3_profile) { + case 0x1a: /* DVD+RW */ + case 0x12: /* DVD-RAM */ + /* The track is always writable on DVD+RW/DVD-RAM */ + return 1; + default: + break; + } + + if (!ti->packet || !ti->fp) + return 0; + + /* + * "good" settings as per Mt Fuji. + */ + if (ti->rt == 0 && ti->blank == 0) + return 1; + + if (ti->rt == 0 && ti->blank == 1) + return 1; + + if (ti->rt == 1 && ti->blank == 0) + return 1; + + printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); + return 0; +} + +/* + * 1 -- we can write to this disc, 0 -- we can't + */ +static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) +{ + switch (pd->mmc3_profile) { + case 0x0a: /* CD-RW */ + case 0xffff: /* MMC3 not supported */ + break; + case 0x1a: /* DVD+RW */ + case 0x13: /* DVD-RW */ + case 0x12: /* DVD-RAM */ + return 1; + default: + VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile); + return 0; + } + + /* + * for disc type 0xff we should probably reserve a new track. + * but i'm not sure, should we leave this to user apps? probably. + */ + if (di->disc_type == 0xff) { + printk(DRIVER_NAME": Unknown disc. No track?\n"); + return 0; + } + + if (di->disc_type != 0x20 && di->disc_type != 0) { + printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type); + return 0; + } + + if (di->erasable == 0) { + printk(DRIVER_NAME": Disc not erasable\n"); + return 0; + } + + if (di->border_status == PACKET_SESSION_RESERVED) { + printk(DRIVER_NAME": Can't write to last track (reserved)\n"); + return 0; + } + + return 1; +} + +static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) +{ + struct packet_command cgc; + unsigned char buf[12]; + disc_information di; + track_information ti; + int ret, track; + + init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); + cgc.cmd[0] = GPCMD_GET_CONFIGURATION; + cgc.cmd[8] = 8; + ret = pkt_generic_packet(pd, &cgc); + pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7]; + + memset(&di, 0, sizeof(disc_information)); + memset(&ti, 0, sizeof(track_information)); + + if ((ret = pkt_get_disc_info(pd, &di))) { + printk("failed get_disc\n"); + return ret; + } + + if (!pkt_writable_disc(pd, &di)) + return -EROFS; + + pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; + + track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ + if ((ret = pkt_get_track_info(pd, track, 1, &ti))) { + printk(DRIVER_NAME": failed get_track\n"); + return ret; + } + + if (!pkt_writable_track(pd, &ti)) { + printk(DRIVER_NAME": can't write to this track\n"); + return -EROFS; + } + + /* + * we keep packet size in 512 byte units, makes it easier to + * deal with request calculations. + */ + pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; + if (pd->settings.size == 0) { + printk(DRIVER_NAME": detected zero packet size!\n"); + return -ENXIO; + } + if (pd->settings.size > PACKET_MAX_SECTORS) { + printk(DRIVER_NAME": packet size is too big\n"); + return -EROFS; + } + pd->settings.fp = ti.fp; + pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); + + if (ti.nwa_v) { + pd->nwa = be32_to_cpu(ti.next_writable); + set_bit(PACKET_NWA_VALID, &pd->flags); + } + + /* + * in theory we could use lra on -RW media as well and just zero + * blocks that haven't been written yet, but in practice that + * is just a no-go. we'll use that for -R, naturally. + */ + if (ti.lra_v) { + pd->lra = be32_to_cpu(ti.last_rec_address); + set_bit(PACKET_LRA_VALID, &pd->flags); + } else { + pd->lra = 0xffffffff; + set_bit(PACKET_LRA_VALID, &pd->flags); + } + + /* + * fine for now + */ + pd->settings.link_loss = 7; + pd->settings.write_type = 0; /* packet */ + pd->settings.track_mode = ti.track_mode; + + /* + * mode1 or mode2 disc + */ + switch (ti.data_mode) { + case PACKET_MODE1: + pd->settings.block_mode = PACKET_BLOCK_MODE1; + break; + case PACKET_MODE2: + pd->settings.block_mode = PACKET_BLOCK_MODE2; + break; + default: + printk(DRIVER_NAME": unknown data mode\n"); + return -EROFS; + } + return 0; +} + +/* + * enable/disable write caching on drive + */ +static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, + int set) +{ + struct packet_command cgc; + struct request_sense sense; + unsigned char buf[64]; + int ret; + + init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); + cgc.sense = &sense; + cgc.buflen = pd->mode_offset + 12; + + /* + * caching mode page might not be there, so quiet this command + */ + cgc.quiet = 1; + + if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0))) + return ret; + + buf[pd->mode_offset + 10] |= (!!set << 2); + + cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff)); + ret = pkt_mode_select(pd, &cgc); + if (ret) { + printk(DRIVER_NAME": write caching control failed\n"); + pkt_dump_sense(&cgc); + } else if (!ret && set) + printk(DRIVER_NAME": enabled write caching on %s\n", pd->name); + return ret; +} + +static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) +{ + struct packet_command cgc; + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL; + cgc.cmd[4] = lockflag ? 1 : 0; + return pkt_generic_packet(pd, &cgc); +} + +/* + * Returns drive maximum write speed + */ +static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, + unsigned *write_speed) +{ + struct packet_command cgc; + struct request_sense sense; + unsigned char buf[256+18]; + unsigned char *cap_buf; + int ret, offset; + + cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; + init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN); + cgc.sense = &sense; + + ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); + if (ret) { + cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + + sizeof(struct mode_page_header); + ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); + if (ret) { + pkt_dump_sense(&cgc); + return ret; + } + } + + offset = 20; /* Obsoleted field, used by older drives */ + if (cap_buf[1] >= 28) + offset = 28; /* Current write speed selected */ + if (cap_buf[1] >= 30) { + /* If the drive reports at least one "Logical Unit Write + * Speed Performance Descriptor Block", use the information + * in the first block. (contains the highest speed) + */ + int num_spdb = (cap_buf[30] << 8) + cap_buf[31]; + if (num_spdb > 0) + offset = 34; + } + + *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1]; + return 0; +} + +/* These tables from cdrecord - I don't have orange book */ +/* standard speed CD-RW (1-4x) */ +static char clv_to_speed[16] = { + /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ + 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; +/* high speed CD-RW (-10x) */ +static char hs_clv_to_speed[16] = { + /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ + 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; +/* ultra high speed CD-RW */ +static char us_clv_to_speed[16] = { + /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */ + 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0 +}; + +/* + * reads the maximum media speed from ATIP + */ +static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, + unsigned *speed) +{ + struct packet_command cgc; + struct request_sense sense; + unsigned char buf[64]; + unsigned int size, st, sp; + int ret; + + init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ); + cgc.sense = &sense; + cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; + cgc.cmd[1] = 2; + cgc.cmd[2] = 4; /* READ ATIP */ + cgc.cmd[8] = 2; + ret = pkt_generic_packet(pd, &cgc); + if (ret) { + pkt_dump_sense(&cgc); + return ret; + } + size = ((unsigned int) buf[0]<<8) + buf[1] + 2; + if (size > sizeof(buf)) + size = sizeof(buf); + + init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); + cgc.sense = &sense; + cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; + cgc.cmd[1] = 2; + cgc.cmd[2] = 4; + cgc.cmd[8] = size; + ret = pkt_generic_packet(pd, &cgc); + if (ret) { + pkt_dump_sense(&cgc); + return ret; + } + + if (!(buf[6] & 0x40)) { + printk(DRIVER_NAME": Disc type is not CD-RW\n"); + return 1; + } + if (!(buf[6] & 0x4)) { + printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n"); + return 1; + } + + st = (buf[6] >> 3) & 0x7; /* disc sub-type */ + + sp = buf[16] & 0xf; /* max speed from ATIP A1 field */ + + /* Info from cdrecord */ + switch (st) { + case 0: /* standard speed */ + *speed = clv_to_speed[sp]; + break; + case 1: /* high speed */ + *speed = hs_clv_to_speed[sp]; + break; + case 2: /* ultra high speed */ + *speed = us_clv_to_speed[sp]; + break; + default: + printk(DRIVER_NAME": Unknown disc sub-type %d\n",st); + return 1; + } + if (*speed) { + printk(DRIVER_NAME": Max. media speed: %d\n",*speed); + return 0; + } else { + printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st); + return 1; + } +} + +static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) +{ + struct packet_command cgc; + struct request_sense sense; + int ret; + + VPRINTK(DRIVER_NAME": Performing OPC\n"); + + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.sense = &sense; + cgc.timeout = 60*HZ; + cgc.cmd[0] = GPCMD_SEND_OPC; + cgc.cmd[1] = 1; + if ((ret = pkt_generic_packet(pd, &cgc))) + pkt_dump_sense(&cgc); + return ret; +} + +static int pkt_open_write(struct pktcdvd_device *pd) +{ + int ret; + unsigned int write_speed, media_write_speed, read_speed; + + if ((ret = pkt_probe_settings(pd))) { + VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name); + return ret; + } + + if ((ret = pkt_set_write_settings(pd))) { + DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name); + return -EIO; + } + + pkt_write_caching(pd, USE_WCACHING); + + if ((ret = pkt_get_max_speed(pd, &write_speed))) + write_speed = 16 * 177; + switch (pd->mmc3_profile) { + case 0x13: /* DVD-RW */ + case 0x1a: /* DVD+RW */ + case 0x12: /* DVD-RAM */ + DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed); + break; + default: + if ((ret = pkt_media_speed(pd, &media_write_speed))) + media_write_speed = 16; + write_speed = min(write_speed, media_write_speed * 177); + DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176); + break; + } + read_speed = write_speed; + + if ((ret = pkt_set_speed(pd, write_speed, read_speed))) { + DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name); + return -EIO; + } + pd->write_speed = write_speed; + pd->read_speed = read_speed; + + if ((ret = pkt_perform_opc(pd))) { + DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name); + } + + return 0; +} + +/* + * called at open time. + */ +static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) +{ + int ret; + long lba; + struct request_queue *q; + + /* + * We need to re-open the cdrom device without O_NONBLOCK to be able + * to read/write from/to it. It is already opened in O_NONBLOCK mode + * so bdget() can't fail. + */ + bdget(pd->bdev->bd_dev); + if ((ret = blkdev_get(pd->bdev, FMODE_READ))) + goto out; + + if ((ret = bd_claim(pd->bdev, pd))) + goto out_putdev; + + if ((ret = pkt_get_last_written(pd, &lba))) { + printk(DRIVER_NAME": pkt_get_last_written failed\n"); + goto out_unclaim; + } + + set_capacity(pd->disk, lba << 2); + set_capacity(pd->bdev->bd_disk, lba << 2); + bd_set_size(pd->bdev, (loff_t)lba << 11); + + q = bdev_get_queue(pd->bdev); + if (write) { + if ((ret = pkt_open_write(pd))) + goto out_unclaim; + /* + * Some CDRW drives can not handle writes larger than one packet, + * even if the size is a multiple of the packet size. + */ + spin_lock_irq(q->queue_lock); + blk_queue_max_sectors(q, pd->settings.size); + spin_unlock_irq(q->queue_lock); + set_bit(PACKET_WRITABLE, &pd->flags); + } else { + pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); + clear_bit(PACKET_WRITABLE, &pd->flags); + } + + if ((ret = pkt_set_segment_merging(pd, q))) + goto out_unclaim; + + if (write) { + if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { + printk(DRIVER_NAME": not enough memory for buffers\n"); + ret = -ENOMEM; + goto out_unclaim; + } + printk(DRIVER_NAME": %lukB available on disc\n", lba << 1); + } + + return 0; + +out_unclaim: + bd_release(pd->bdev); +out_putdev: + blkdev_put(pd->bdev, FMODE_READ); +out: + return ret; +} + +/* + * called when the device is closed. makes sure that the device flushes + * the internal cache before we close. + */ +static void pkt_release_dev(struct pktcdvd_device *pd, int flush) +{ + if (flush && pkt_flush_cache(pd)) + DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name); + + pkt_lock_door(pd, 0); + + pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); + bd_release(pd->bdev); + blkdev_put(pd->bdev, FMODE_READ); + + pkt_shrink_pktlist(pd); +} + +static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) +{ + if (dev_minor >= MAX_WRITERS) + return NULL; + return pkt_devs[dev_minor]; +} + +static int pkt_open(struct block_device *bdev, fmode_t mode) +{ + struct pktcdvd_device *pd = NULL; + int ret; + + VPRINTK(DRIVER_NAME": entering open\n"); + + mutex_lock(&ctl_mutex); + pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); + if (!pd) { + ret = -ENODEV; + goto out; + } + BUG_ON(pd->refcnt < 0); + + pd->refcnt++; + if (pd->refcnt > 1) { + if ((mode & FMODE_WRITE) && + !test_bit(PACKET_WRITABLE, &pd->flags)) { + ret = -EBUSY; + goto out_dec; + } + } else { + ret = pkt_open_dev(pd, mode & FMODE_WRITE); + if (ret) + goto out_dec; + /* + * needed here as well, since ext2 (among others) may change + * the blocksize at mount time + */ + set_blocksize(bdev, CD_FRAMESIZE); + } + + mutex_unlock(&ctl_mutex); + return 0; + +out_dec: + pd->refcnt--; +out: + VPRINTK(DRIVER_NAME": failed open (%d)\n", ret); + mutex_unlock(&ctl_mutex); + return ret; +} + +static int pkt_close(struct gendisk *disk, fmode_t mode) +{ + struct pktcdvd_device *pd = disk->private_data; + int ret = 0; + + mutex_lock(&ctl_mutex); + pd->refcnt--; + BUG_ON(pd->refcnt < 0); + if (pd->refcnt == 0) { + int flush = test_bit(PACKET_WRITABLE, &pd->flags); + pkt_release_dev(pd, flush); + } + mutex_unlock(&ctl_mutex); + return ret; +} + + +static void pkt_end_io_read_cloned(struct bio *bio, int err) +{ + struct packet_stacked_data *psd = bio->bi_private; + struct pktcdvd_device *pd = psd->pd; + + bio_put(bio); + bio_endio(psd->bio, err); + mempool_free(psd, psd_pool); + pkt_bio_finished(pd); +} + +static int pkt_make_request(struct request_queue *q, struct bio *bio) +{ + struct pktcdvd_device *pd; + char b[BDEVNAME_SIZE]; + sector_t zone; + struct packet_data *pkt; + int was_empty, blocked_bio; + struct pkt_rb_node *node; + + pd = q->queuedata; + if (!pd) { + printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b)); + goto end_io; + } + + /* + * Clone READ bios so we can have our own bi_end_io callback. + */ + if (bio_data_dir(bio) == READ) { + struct bio *cloned_bio = bio_clone(bio, GFP_NOIO); + struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO); + + psd->pd = pd; + psd->bio = bio; + cloned_bio->bi_bdev = pd->bdev; + cloned_bio->bi_private = psd; + cloned_bio->bi_end_io = pkt_end_io_read_cloned; + pd->stats.secs_r += bio->bi_size >> 9; + pkt_queue_bio(pd, cloned_bio); + return 0; + } + + if (!test_bit(PACKET_WRITABLE, &pd->flags)) { + printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n", + pd->name, (unsigned long long)bio->bi_sector); + goto end_io; + } + + if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) { + printk(DRIVER_NAME": wrong bio size\n"); + goto end_io; + } + + blk_queue_bounce(q, &bio); + + zone = ZONE(bio->bi_sector, pd); + VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n", + (unsigned long long)bio->bi_sector, + (unsigned long long)(bio->bi_sector + bio_sectors(bio))); + + /* Check if we have to split the bio */ + { + struct bio_pair *bp; + sector_t last_zone; + int first_sectors; + + last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd); + if (last_zone != zone) { + BUG_ON(last_zone != zone + pd->settings.size); + first_sectors = last_zone - bio->bi_sector; + bp = bio_split(bio, first_sectors); + BUG_ON(!bp); + pkt_make_request(q, &bp->bio1); + pkt_make_request(q, &bp->bio2); + bio_pair_release(bp); + return 0; + } + } + + /* + * If we find a matching packet in state WAITING or READ_WAIT, we can + * just append this bio to that packet. + */ + spin_lock(&pd->cdrw.active_list_lock); + blocked_bio = 0; + list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { + if (pkt->sector == zone) { + spin_lock(&pkt->lock); + if ((pkt->state == PACKET_WAITING_STATE) || + (pkt->state == PACKET_READ_WAIT_STATE)) { + pkt_add_list_last(bio, &pkt->orig_bios, + &pkt->orig_bios_tail); + pkt->write_size += bio->bi_size / CD_FRAMESIZE; + if ((pkt->write_size >= pkt->frames) && + (pkt->state == PACKET_WAITING_STATE)) { + atomic_inc(&pkt->run_sm); + wake_up(&pd->wqueue); + } + spin_unlock(&pkt->lock); + spin_unlock(&pd->cdrw.active_list_lock); + return 0; + } else { + blocked_bio = 1; + } + spin_unlock(&pkt->lock); + } + } + spin_unlock(&pd->cdrw.active_list_lock); + + /* + * Test if there is enough room left in the bio work queue + * (queue size >= congestion on mark). + * If not, wait till the work queue size is below the congestion off mark. + */ + spin_lock(&pd->lock); + if (pd->write_congestion_on > 0 + && pd->bio_queue_size >= pd->write_congestion_on) { + set_bdi_congested(&q->backing_dev_info, WRITE); + do { + spin_unlock(&pd->lock); + congestion_wait(WRITE, HZ); + spin_lock(&pd->lock); + } while(pd->bio_queue_size > pd->write_congestion_off); + } + spin_unlock(&pd->lock); + + /* + * No matching packet found. Store the bio in the work queue. + */ + node = mempool_alloc(pd->rb_pool, GFP_NOIO); + node->bio = bio; + spin_lock(&pd->lock); + BUG_ON(pd->bio_queue_size < 0); + was_empty = (pd->bio_queue_size == 0); + pkt_rbtree_insert(pd, node); + spin_unlock(&pd->lock); + + /* + * Wake up the worker thread. + */ + atomic_set(&pd->scan_queue, 1); + if (was_empty) { + /* This wake_up is required for correct operation */ + wake_up(&pd->wqueue); + } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { + /* + * This wake up is not required for correct operation, + * but improves performance in some cases. + */ + wake_up(&pd->wqueue); + } + return 0; +end_io: + bio_io_error(bio); + return 0; +} + + + +static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, + struct bio_vec *bvec) +{ + struct pktcdvd_device *pd = q->queuedata; + sector_t zone = ZONE(bmd->bi_sector, pd); + int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size; + int remaining = (pd->settings.size << 9) - used; + int remaining2; + + /* + * A bio <= PAGE_SIZE must be allowed. If it crosses a packet + * boundary, pkt_make_request() will split the bio. + */ + remaining2 = PAGE_SIZE - bmd->bi_size; + remaining = max(remaining, remaining2); + + BUG_ON(remaining < 0); + return remaining; +} + +static void pkt_init_queue(struct pktcdvd_device *pd) +{ + struct request_queue *q = pd->disk->queue; + + blk_queue_make_request(q, pkt_make_request); + blk_queue_hardsect_size(q, CD_FRAMESIZE); + blk_queue_max_sectors(q, PACKET_MAX_SECTORS); + blk_queue_merge_bvec(q, pkt_merge_bvec); + q->queuedata = pd; +} + +static int pkt_seq_show(struct seq_file *m, void *p) +{ + struct pktcdvd_device *pd = m->private; + char *msg; + char bdev_buf[BDEVNAME_SIZE]; + int states[PACKET_NUM_STATES]; + + seq_printf(m, "Writer %s mapped to %s:\n", pd->name, + bdevname(pd->bdev, bdev_buf)); + + seq_printf(m, "\nSettings:\n"); + seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); + + if (pd->settings.write_type == 0) + msg = "Packet"; + else + msg = "Unknown"; + seq_printf(m, "\twrite type:\t\t%s\n", msg); + + seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); + seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); + + seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); + + if (pd->settings.block_mode == PACKET_BLOCK_MODE1) + msg = "Mode 1"; + else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) + msg = "Mode 2"; + else + msg = "Unknown"; + seq_printf(m, "\tblock mode:\t\t%s\n", msg); + + seq_printf(m, "\nStatistics:\n"); + seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); + seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); + seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); + seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); + seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); + + seq_printf(m, "\nMisc:\n"); + seq_printf(m, "\treference count:\t%d\n", pd->refcnt); + seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); + seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); + seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); + seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); + seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); + + seq_printf(m, "\nQueue state:\n"); + seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); + seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); + seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector); + + pkt_count_states(pd, states); + seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", + states[0], states[1], states[2], states[3], states[4], states[5]); + + seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n", + pd->write_congestion_off, + pd->write_congestion_on); + return 0; +} + +static int pkt_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, pkt_seq_show, PDE(inode)->data); +} + +static const struct file_operations pkt_proc_fops = { + .open = pkt_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release +}; + +static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) +{ + int i; + int ret = 0; + char b[BDEVNAME_SIZE]; + struct block_device *bdev; + + if (pd->pkt_dev == dev) { + printk(DRIVER_NAME": Recursive setup not allowed\n"); + return -EBUSY; + } + for (i = 0; i < MAX_WRITERS; i++) { + struct pktcdvd_device *pd2 = pkt_devs[i]; + if (!pd2) + continue; + if (pd2->bdev->bd_dev == dev) { + printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b)); + return -EBUSY; + } + if (pd2->pkt_dev == dev) { + printk(DRIVER_NAME": Can't chain pktcdvd devices\n"); + return -EBUSY; + } + } + + bdev = bdget(dev); + if (!bdev) + return -ENOMEM; + ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY); + if (ret) + return ret; + + /* This is safe, since we have a reference from open(). */ + __module_get(THIS_MODULE); + + pd->bdev = bdev; + set_blocksize(bdev, CD_FRAMESIZE); + + pkt_init_queue(pd); + + atomic_set(&pd->cdrw.pending_bios, 0); + pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); + if (IS_ERR(pd->cdrw.thread)) { + printk(DRIVER_NAME": can't start kernel thread\n"); + ret = -ENOMEM; + goto out_mem; + } + + proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd); + DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); + return 0; + +out_mem: + blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + return ret; +} + +static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) +{ + struct pktcdvd_device *pd = bdev->bd_disk->private_data; + + VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, + MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); + + switch (cmd) { + case CDROMEJECT: + /* + * The door gets locked when the device is opened, so we + * have to unlock it or else the eject command fails. + */ + if (pd->refcnt == 1) + pkt_lock_door(pd, 0); + /* fallthru */ + /* + * forward selected CDROM ioctls to CD-ROM, for UDF + */ + case CDROMMULTISESSION: + case CDROMREADTOCENTRY: + case CDROM_LAST_WRITTEN: + case CDROM_SEND_PACKET: + case SCSI_IOCTL_SEND_COMMAND: + return __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg); + + default: + VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); + return -ENOTTY; + } + + return 0; +} + +static int pkt_media_changed(struct gendisk *disk) +{ + struct pktcdvd_device *pd = disk->private_data; + struct gendisk *attached_disk; + + if (!pd) + return 0; + if (!pd->bdev) + return 0; + attached_disk = pd->bdev->bd_disk; + if (!attached_disk) + return 0; + return attached_disk->fops->media_changed(attached_disk); +} + +static struct block_device_operations pktcdvd_ops = { + .owner = THIS_MODULE, + .open = pkt_open, + .release = pkt_close, + .locked_ioctl = pkt_ioctl, + .media_changed = pkt_media_changed, +}; + +/* + * Set up mapping from pktcdvd device to CD-ROM device. + */ +static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) +{ + int idx; + int ret = -ENOMEM; + struct pktcdvd_device *pd; + struct gendisk *disk; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + for (idx = 0; idx < MAX_WRITERS; idx++) + if (!pkt_devs[idx]) + break; + if (idx == MAX_WRITERS) { + printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS); + ret = -EBUSY; + goto out_mutex; + } + + pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); + if (!pd) + goto out_mutex; + + pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE, + sizeof(struct pkt_rb_node)); + if (!pd->rb_pool) + goto out_mem; + + INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); + INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); + spin_lock_init(&pd->cdrw.active_list_lock); + + spin_lock_init(&pd->lock); + spin_lock_init(&pd->iosched.lock); + sprintf(pd->name, DRIVER_NAME"%d", idx); + init_waitqueue_head(&pd->wqueue); + pd->bio_queue = RB_ROOT; + + pd->write_congestion_on = write_congestion_on; + pd->write_congestion_off = write_congestion_off; + + disk = alloc_disk(1); + if (!disk) + goto out_mem; + pd->disk = disk; + disk->major = pktdev_major; + disk->first_minor = idx; + disk->fops = &pktcdvd_ops; + disk->flags = GENHD_FL_REMOVABLE; + strcpy(disk->disk_name, pd->name); + disk->private_data = pd; + disk->queue = blk_alloc_queue(GFP_KERNEL); + if (!disk->queue) + goto out_mem2; + + pd->pkt_dev = MKDEV(pktdev_major, idx); + ret = pkt_new_dev(pd, dev); + if (ret) + goto out_new_dev; + + add_disk(disk); + + pkt_sysfs_dev_new(pd); + pkt_debugfs_dev_new(pd); + + pkt_devs[idx] = pd; + if (pkt_dev) + *pkt_dev = pd->pkt_dev; + + mutex_unlock(&ctl_mutex); + return 0; + +out_new_dev: + blk_cleanup_queue(disk->queue); +out_mem2: + put_disk(disk); +out_mem: + if (pd->rb_pool) + mempool_destroy(pd->rb_pool); + kfree(pd); +out_mutex: + mutex_unlock(&ctl_mutex); + printk(DRIVER_NAME": setup of pktcdvd device failed\n"); + return ret; +} + +/* + * Tear down mapping from pktcdvd device to CD-ROM device. + */ +static int pkt_remove_dev(dev_t pkt_dev) +{ + struct pktcdvd_device *pd; + int idx; + int ret = 0; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + for (idx = 0; idx < MAX_WRITERS; idx++) { + pd = pkt_devs[idx]; + if (pd && (pd->pkt_dev == pkt_dev)) + break; + } + if (idx == MAX_WRITERS) { + DPRINTK(DRIVER_NAME": dev not setup\n"); + ret = -ENXIO; + goto out; + } + + if (pd->refcnt > 0) { + ret = -EBUSY; + goto out; + } + if (!IS_ERR(pd->cdrw.thread)) + kthread_stop(pd->cdrw.thread); + + pkt_devs[idx] = NULL; + + pkt_debugfs_dev_remove(pd); + pkt_sysfs_dev_remove(pd); + + blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); + + remove_proc_entry(pd->name, pkt_proc); + DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name); + + del_gendisk(pd->disk); + blk_cleanup_queue(pd->disk->queue); + put_disk(pd->disk); + + mempool_destroy(pd->rb_pool); + kfree(pd); + + /* This is safe: open() is still holding a reference. */ + module_put(THIS_MODULE); + +out: + mutex_unlock(&ctl_mutex); + return ret; +} + +static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd) +{ + struct pktcdvd_device *pd; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); + if (pd) { + ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev); + ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); + } else { + ctrl_cmd->dev = 0; + ctrl_cmd->pkt_dev = 0; + } + ctrl_cmd->num_devices = MAX_WRITERS; + + mutex_unlock(&ctl_mutex); +} + +static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct pkt_ctrl_command ctrl_cmd; + int ret = 0; + dev_t pkt_dev = 0; + + if (cmd != PACKET_CTRL_CMD) + return -ENOTTY; + + if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command))) + return -EFAULT; + + switch (ctrl_cmd.command) { + case PKT_CTRL_CMD_SETUP: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev); + ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev); + break; + case PKT_CTRL_CMD_TEARDOWN: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev)); + break; + case PKT_CTRL_CMD_STATUS: + pkt_get_status(&ctrl_cmd); + break; + default: + return -ENOTTY; + } + + if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command))) + return -EFAULT; + return ret; +} + + +static const struct file_operations pkt_ctl_fops = { + .ioctl = pkt_ctl_ioctl, + .owner = THIS_MODULE, +}; + +static struct miscdevice pkt_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = DRIVER_NAME, + .fops = &pkt_ctl_fops +}; + +static int __init pkt_init(void) +{ + int ret; + + mutex_init(&ctl_mutex); + + psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE, + sizeof(struct packet_stacked_data)); + if (!psd_pool) + return -ENOMEM; + + ret = register_blkdev(pktdev_major, DRIVER_NAME); + if (ret < 0) { + printk(DRIVER_NAME": Unable to register block device\n"); + goto out2; + } + if (!pktdev_major) + pktdev_major = ret; + + ret = pkt_sysfs_init(); + if (ret) + goto out; + + pkt_debugfs_init(); + + ret = misc_register(&pkt_misc); + if (ret) { + printk(DRIVER_NAME": Unable to register misc device\n"); + goto out_misc; + } + + pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL); + + return 0; + +out_misc: + pkt_debugfs_cleanup(); + pkt_sysfs_cleanup(); +out: + unregister_blkdev(pktdev_major, DRIVER_NAME); +out2: + mempool_destroy(psd_pool); + return ret; +} + +static void __exit pkt_exit(void) +{ + remove_proc_entry("driver/"DRIVER_NAME, NULL); + misc_deregister(&pkt_misc); + + pkt_debugfs_cleanup(); + pkt_sysfs_cleanup(); + + unregister_blkdev(pktdev_major, DRIVER_NAME); + mempool_destroy(psd_pool); +} + +MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives"); +MODULE_AUTHOR("Jens Axboe <axboe@suse.de>"); +MODULE_LICENSE("GPL"); + +module_init(pkt_init); +module_exit(pkt_exit); diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c new file mode 100644 index 0000000..936466f --- /dev/null +++ b/drivers/block/ps3disk.c @@ -0,0 +1,606 @@ +/* + * PS3 Disk Storage Driver + * + * Copyright (C) 2007 Sony Computer Entertainment Inc. + * Copyright 2007 Sony Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published + * by the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/ata.h> +#include <linux/blkdev.h> + +#include <asm/lv1call.h> +#include <asm/ps3stor.h> +#include <asm/firmware.h> + + +#define DEVICE_NAME "ps3disk" + +#define BOUNCE_SIZE (64*1024) + +#define PS3DISK_MAX_DISKS 16 +#define PS3DISK_MINORS 16 + + +#define PS3DISK_NAME "ps3d%c" + + +struct ps3disk_private { + spinlock_t lock; /* Request queue spinlock */ + struct request_queue *queue; + struct gendisk *gendisk; + unsigned int blocking_factor; + struct request *req; + u64 raw_capacity; + unsigned char model[ATA_ID_PROD_LEN+1]; +}; + + +#define LV1_STORAGE_SEND_ATA_COMMAND (2) +#define LV1_STORAGE_ATA_HDDOUT (0x23) + +struct lv1_ata_cmnd_block { + u16 features; + u16 sector_count; + u16 LBA_low; + u16 LBA_mid; + u16 LBA_high; + u8 device; + u8 command; + u32 is_ext; + u32 proto; + u32 in_out; + u32 size; + u64 buffer; + u32 arglen; +}; + +enum lv1_ata_proto { + NON_DATA_PROTO = 0, + PIO_DATA_IN_PROTO = 1, + PIO_DATA_OUT_PROTO = 2, + DMA_PROTO = 3 +}; + +enum lv1_ata_in_out { + DIR_WRITE = 0, /* memory -> device */ + DIR_READ = 1 /* device -> memory */ +}; + +static int ps3disk_major; + + +static struct block_device_operations ps3disk_fops = { + .owner = THIS_MODULE, +}; + + +static void ps3disk_scatter_gather(struct ps3_storage_device *dev, + struct request *req, int gather) +{ + unsigned int offset = 0; + struct req_iterator iter; + struct bio_vec *bvec; + unsigned int i = 0; + size_t size; + void *buf; + + rq_for_each_segment(bvec, req, iter) { + unsigned long flags; + dev_dbg(&dev->sbd.core, + "%s:%u: bio %u: %u segs %u sectors from %lu\n", + __func__, __LINE__, i, bio_segments(iter.bio), + bio_sectors(iter.bio), iter.bio->bi_sector); + + size = bvec->bv_len; + buf = bvec_kmap_irq(bvec, &flags); + if (gather) + memcpy(dev->bounce_buf+offset, buf, size); + else + memcpy(buf, dev->bounce_buf+offset, size); + offset += size; + flush_kernel_dcache_page(bvec->bv_page); + bvec_kunmap_irq(bvec, &flags); + i++; + } +} + +static int ps3disk_submit_request_sg(struct ps3_storage_device *dev, + struct request *req) +{ + struct ps3disk_private *priv = dev->sbd.core.driver_data; + int write = rq_data_dir(req), res; + const char *op = write ? "write" : "read"; + u64 start_sector, sectors; + unsigned int region_id = dev->regions[dev->region_idx].id; + +#ifdef DEBUG + unsigned int n = 0; + struct bio_vec *bv; + struct req_iterator iter; + + rq_for_each_segment(bv, req, iter) + n++; + dev_dbg(&dev->sbd.core, + "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n", + __func__, __LINE__, op, n, req->nr_sectors, + req->hard_nr_sectors); +#endif + + start_sector = req->sector * priv->blocking_factor; + sectors = req->nr_sectors * priv->blocking_factor; + dev_dbg(&dev->sbd.core, "%s:%u: %s %lu sectors starting at %lu\n", + __func__, __LINE__, op, sectors, start_sector); + + if (write) { + ps3disk_scatter_gather(dev, req, 1); + + res = lv1_storage_write(dev->sbd.dev_id, region_id, + start_sector, sectors, 0, + dev->bounce_lpar, &dev->tag); + } else { + res = lv1_storage_read(dev->sbd.dev_id, region_id, + start_sector, sectors, 0, + dev->bounce_lpar, &dev->tag); + } + if (res) { + dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, + __LINE__, op, res); + end_request(req, 0); + return 0; + } + + priv->req = req; + return 1; +} + +static int ps3disk_submit_flush_request(struct ps3_storage_device *dev, + struct request *req) +{ + struct ps3disk_private *priv = dev->sbd.core.driver_data; + u64 res; + + dev_dbg(&dev->sbd.core, "%s:%u: flush request\n", __func__, __LINE__); + + res = lv1_storage_send_device_command(dev->sbd.dev_id, + LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, + 0, &dev->tag); + if (res) { + dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n", + __func__, __LINE__, res); + end_request(req, 0); + return 0; + } + + priv->req = req; + return 1; +} + +static void ps3disk_do_request(struct ps3_storage_device *dev, + struct request_queue *q) +{ + struct request *req; + + dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); + + while ((req = elv_next_request(q))) { + if (blk_fs_request(req)) { + if (ps3disk_submit_request_sg(dev, req)) + break; + } else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && + req->cmd[0] == REQ_LB_OP_FLUSH) { + if (ps3disk_submit_flush_request(dev, req)) + break; + } else { + blk_dump_rq_flags(req, DEVICE_NAME " bad request"); + end_request(req, 0); + continue; + } + } +} + +static void ps3disk_request(struct request_queue *q) +{ + struct ps3_storage_device *dev = q->queuedata; + struct ps3disk_private *priv = dev->sbd.core.driver_data; + + if (priv->req) { + dev_dbg(&dev->sbd.core, "%s:%u busy\n", __func__, __LINE__); + return; + } + + ps3disk_do_request(dev, q); +} + +static irqreturn_t ps3disk_interrupt(int irq, void *data) +{ + struct ps3_storage_device *dev = data; + struct ps3disk_private *priv; + struct request *req; + int res, read, error; + u64 tag, status; + unsigned long num_sectors; + const char *op; + + res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); + + if (tag != dev->tag) + dev_err(&dev->sbd.core, + "%s:%u: tag mismatch, got %lx, expected %lx\n", + __func__, __LINE__, tag, dev->tag); + + if (res) { + dev_err(&dev->sbd.core, "%s:%u: res=%d status=0x%lx\n", + __func__, __LINE__, res, status); + return IRQ_HANDLED; + } + + priv = dev->sbd.core.driver_data; + req = priv->req; + if (!req) { + dev_dbg(&dev->sbd.core, + "%s:%u non-block layer request completed\n", __func__, + __LINE__); + dev->lv1_status = status; + complete(&dev->done); + return IRQ_HANDLED; + } + + if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && + req->cmd[0] == REQ_LB_OP_FLUSH) { + read = 0; + num_sectors = req->hard_cur_sectors; + op = "flush"; + } else { + read = !rq_data_dir(req); + num_sectors = req->nr_sectors; + op = read ? "read" : "write"; + } + if (status) { + dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__, + __LINE__, op, status); + error = -EIO; + } else { + dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__, + __LINE__, op); + error = 0; + if (read) + ps3disk_scatter_gather(dev, req, 0); + } + + spin_lock(&priv->lock); + __blk_end_request(req, error, num_sectors << 9); + priv->req = NULL; + ps3disk_do_request(dev, priv->queue); + spin_unlock(&priv->lock); + + return IRQ_HANDLED; +} + +static int ps3disk_sync_cache(struct ps3_storage_device *dev) +{ + u64 res; + + dev_dbg(&dev->sbd.core, "%s:%u: sync cache\n", __func__, __LINE__); + + res = ps3stor_send_command(dev, LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 0); + if (res) { + dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%lx\n", + __func__, __LINE__, res); + return -EIO; + } + return 0; +} + + +/* ATA helpers copied from drivers/ata/libata-core.c */ + +static void swap_buf_le16(u16 *buf, unsigned int buf_words) +{ +#ifdef __BIG_ENDIAN + unsigned int i; + + for (i = 0; i < buf_words; i++) + buf[i] = le16_to_cpu(buf[i]); +#endif /* __BIG_ENDIAN */ +} + +static u64 ata_id_n_sectors(const u16 *id) +{ + if (ata_id_has_lba(id)) { + if (ata_id_has_lba48(id)) + return ata_id_u64(id, 100); + else + return ata_id_u32(id, 60); + } else { + if (ata_id_current_chs_valid(id)) + return ata_id_u32(id, 57); + else + return id[1] * id[3] * id[6]; + } +} + +static void ata_id_string(const u16 *id, unsigned char *s, unsigned int ofs, + unsigned int len) +{ + unsigned int c; + + while (len > 0) { + c = id[ofs] >> 8; + *s = c; + s++; + + c = id[ofs] & 0xff; + *s = c; + s++; + + ofs++; + len -= 2; + } +} + +static void ata_id_c_string(const u16 *id, unsigned char *s, unsigned int ofs, + unsigned int len) +{ + unsigned char *p; + + WARN_ON(!(len & 1)); + + ata_id_string(id, s, ofs, len - 1); + + p = s + strnlen(s, len - 1); + while (p > s && p[-1] == ' ') + p--; + *p = '\0'; +} + +static int ps3disk_identify(struct ps3_storage_device *dev) +{ + struct ps3disk_private *priv = dev->sbd.core.driver_data; + struct lv1_ata_cmnd_block ata_cmnd; + u16 *id = dev->bounce_buf; + u64 res; + + dev_dbg(&dev->sbd.core, "%s:%u: identify disk\n", __func__, __LINE__); + + memset(&ata_cmnd, 0, sizeof(struct lv1_ata_cmnd_block)); + ata_cmnd.command = ATA_CMD_ID_ATA; + ata_cmnd.sector_count = 1; + ata_cmnd.size = ata_cmnd.arglen = ATA_ID_WORDS * 2; + ata_cmnd.buffer = dev->bounce_lpar; + ata_cmnd.proto = PIO_DATA_IN_PROTO; + ata_cmnd.in_out = DIR_READ; + + res = ps3stor_send_command(dev, LV1_STORAGE_SEND_ATA_COMMAND, + ps3_mm_phys_to_lpar(__pa(&ata_cmnd)), + sizeof(ata_cmnd), ata_cmnd.buffer, + ata_cmnd.arglen); + if (res) { + dev_err(&dev->sbd.core, "%s:%u: identify disk failed 0x%lx\n", + __func__, __LINE__, res); + return -EIO; + } + + swap_buf_le16(id, ATA_ID_WORDS); + + /* All we're interested in are raw capacity and model name */ + priv->raw_capacity = ata_id_n_sectors(id); + ata_id_c_string(id, priv->model, ATA_ID_PROD, sizeof(priv->model)); + return 0; +} + +static void ps3disk_prepare_flush(struct request_queue *q, struct request *req) +{ + struct ps3_storage_device *dev = q->queuedata; + + dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); + + req->cmd_type = REQ_TYPE_LINUX_BLOCK; + req->cmd[0] = REQ_LB_OP_FLUSH; +} + +static unsigned long ps3disk_mask; + +static DEFINE_MUTEX(ps3disk_mask_mutex); + +static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) +{ + struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); + struct ps3disk_private *priv; + int error; + unsigned int devidx; + struct request_queue *queue; + struct gendisk *gendisk; + + if (dev->blk_size < 512) { + dev_err(&dev->sbd.core, + "%s:%u: cannot handle block size %lu\n", __func__, + __LINE__, dev->blk_size); + return -EINVAL; + } + + BUILD_BUG_ON(PS3DISK_MAX_DISKS > BITS_PER_LONG); + mutex_lock(&ps3disk_mask_mutex); + devidx = find_first_zero_bit(&ps3disk_mask, PS3DISK_MAX_DISKS); + if (devidx >= PS3DISK_MAX_DISKS) { + dev_err(&dev->sbd.core, "%s:%u: Too many disks\n", __func__, + __LINE__); + mutex_unlock(&ps3disk_mask_mutex); + return -ENOSPC; + } + __set_bit(devidx, &ps3disk_mask); + mutex_unlock(&ps3disk_mask_mutex); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + error = -ENOMEM; + goto fail; + } + + dev->sbd.core.driver_data = priv; + spin_lock_init(&priv->lock); + + dev->bounce_size = BOUNCE_SIZE; + dev->bounce_buf = kmalloc(BOUNCE_SIZE, GFP_DMA); + if (!dev->bounce_buf) { + error = -ENOMEM; + goto fail_free_priv; + } + + error = ps3stor_setup(dev, ps3disk_interrupt); + if (error) + goto fail_free_bounce; + + ps3disk_identify(dev); + + queue = blk_init_queue(ps3disk_request, &priv->lock); + if (!queue) { + dev_err(&dev->sbd.core, "%s:%u: blk_init_queue failed\n", + __func__, __LINE__); + error = -ENOMEM; + goto fail_teardown; + } + + priv->queue = queue; + queue->queuedata = dev; + + blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); + + blk_queue_max_sectors(queue, dev->bounce_size >> 9); + blk_queue_segment_boundary(queue, -1UL); + blk_queue_dma_alignment(queue, dev->blk_size-1); + blk_queue_hardsect_size(queue, dev->blk_size); + + blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, + ps3disk_prepare_flush); + + blk_queue_max_phys_segments(queue, -1); + blk_queue_max_hw_segments(queue, -1); + blk_queue_max_segment_size(queue, dev->bounce_size); + + gendisk = alloc_disk(PS3DISK_MINORS); + if (!gendisk) { + dev_err(&dev->sbd.core, "%s:%u: alloc_disk failed\n", __func__, + __LINE__); + error = -ENOMEM; + goto fail_cleanup_queue; + } + + priv->gendisk = gendisk; + gendisk->major = ps3disk_major; + gendisk->first_minor = devidx * PS3DISK_MINORS; + gendisk->fops = &ps3disk_fops; + gendisk->queue = queue; + gendisk->private_data = dev; + gendisk->driverfs_dev = &dev->sbd.core; + snprintf(gendisk->disk_name, sizeof(gendisk->disk_name), PS3DISK_NAME, + devidx+'a'); + priv->blocking_factor = dev->blk_size >> 9; + set_capacity(gendisk, + dev->regions[dev->region_idx].size*priv->blocking_factor); + + dev_info(&dev->sbd.core, + "%s is a %s (%lu MiB total, %lu MiB for OtherOS)\n", + gendisk->disk_name, priv->model, priv->raw_capacity >> 11, + get_capacity(gendisk) >> 11); + + add_disk(gendisk); + return 0; + +fail_cleanup_queue: + blk_cleanup_queue(queue); +fail_teardown: + ps3stor_teardown(dev); +fail_free_bounce: + kfree(dev->bounce_buf); +fail_free_priv: + kfree(priv); + dev->sbd.core.driver_data = NULL; +fail: + mutex_lock(&ps3disk_mask_mutex); + __clear_bit(devidx, &ps3disk_mask); + mutex_unlock(&ps3disk_mask_mutex); + return error; +} + +static int ps3disk_remove(struct ps3_system_bus_device *_dev) +{ + struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core); + struct ps3disk_private *priv = dev->sbd.core.driver_data; + + mutex_lock(&ps3disk_mask_mutex); + __clear_bit(MINOR(disk_devt(priv->gendisk)) / PS3DISK_MINORS, + &ps3disk_mask); + mutex_unlock(&ps3disk_mask_mutex); + del_gendisk(priv->gendisk); + blk_cleanup_queue(priv->queue); + put_disk(priv->gendisk); + dev_notice(&dev->sbd.core, "Synchronizing disk cache\n"); + ps3disk_sync_cache(dev); + ps3stor_teardown(dev); + kfree(dev->bounce_buf); + kfree(priv); + dev->sbd.core.driver_data = NULL; + return 0; +} + +static struct ps3_system_bus_driver ps3disk = { + .match_id = PS3_MATCH_ID_STOR_DISK, + .core.name = DEVICE_NAME, + .core.owner = THIS_MODULE, + .probe = ps3disk_probe, + .remove = ps3disk_remove, + .shutdown = ps3disk_remove, +}; + + +static int __init ps3disk_init(void) +{ + int error; + + if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) + return -ENODEV; + + error = register_blkdev(0, DEVICE_NAME); + if (error <= 0) { + printk(KERN_ERR "%s:%u: register_blkdev failed %d\n", __func__, + __LINE__, error); + return error; + } + ps3disk_major = error; + + pr_info("%s:%u: registered block device major %d\n", __func__, + __LINE__, ps3disk_major); + + error = ps3_system_bus_driver_register(&ps3disk); + if (error) + unregister_blkdev(ps3disk_major, DEVICE_NAME); + + return error; +} + +static void __exit ps3disk_exit(void) +{ + ps3_system_bus_driver_unregister(&ps3disk); + unregister_blkdev(ps3disk_major, DEVICE_NAME); +} + +module_init(ps3disk_init); +module_exit(ps3disk_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("PS3 Disk Storage Driver"); +MODULE_AUTHOR("Sony Corporation"); +MODULE_ALIAS(PS3_MODULE_ALIAS_STOR_DISK); diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h new file mode 100644 index 0000000..a0b403a --- /dev/null +++ b/drivers/block/smart1,2.h @@ -0,0 +1,278 @@ +/* + * Disk Array driver for Compaq SMART2 Controllers + * Copyright 1998 Compaq Computer Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Questions/Comments/Bugfixes to iss_storagedev@hp.com + * + * If you want to make changes, improve or add functionality to this + * driver, you'll probably need the Compaq Array Controller Interface + * Specificiation (Document number ECG086/1198) + */ + +/* + * This file contains the controller communication implementation for + * Compaq SMART-1 and SMART-2 controllers. To the best of my knowledge, + * this should support: + * + * PCI: + * SMART-2/P, SMART-2DH, SMART-2SL, SMART-221, SMART-3100ES, SMART-3200 + * Integerated SMART Array Controller, SMART-4200, SMART-4250ES + * + * EISA: + * SMART-2/E, SMART, IAES, IDA-2, IDA + */ + +/* + * Memory mapped FIFO interface (SMART 42xx cards) + */ +static void smart4_submit_command(ctlr_info_t *h, cmdlist_t *c) +{ + writel(c->busaddr, h->vaddr + S42XX_REQUEST_PORT_OFFSET); +} + +/* + * This card is the opposite of the other cards. + * 0 turns interrupts on... + * 0x08 turns them off... + */ +static void smart4_intr_mask(ctlr_info_t *h, unsigned long val) +{ + if (val) + { /* Turn interrupts on */ + writel(0, h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET); + } else /* Turn them off */ + { + writel( S42XX_INTR_OFF, + h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET); + } +} + +/* + * For older cards FIFO Full = 0. + * On this card 0 means there is room, anything else FIFO Full. + * + */ +static unsigned long smart4_fifo_full(ctlr_info_t *h) +{ + + return (!readl(h->vaddr + S42XX_REQUEST_PORT_OFFSET)); +} + +/* This type of controller returns -1 if the fifo is empty, + * Not 0 like the others. + * And we need to let it know we read a value out + */ +static unsigned long smart4_completed(ctlr_info_t *h) +{ + long register_value + = readl(h->vaddr + S42XX_REPLY_PORT_OFFSET); + + /* Fifo is empty */ + if( register_value == 0xffffffff) + return 0; + + /* Need to let it know we got the reply */ + /* We do this by writing a 0 to the port we just read from */ + writel(0, h->vaddr + S42XX_REPLY_PORT_OFFSET); + + return ((unsigned long) register_value); +} + + /* + * This hardware returns interrupt pending at a different place and + * it does not tell us if the fifo is empty, we will have check + * that by getting a 0 back from the comamnd_completed call. + */ +static unsigned long smart4_intr_pending(ctlr_info_t *h) +{ + unsigned long register_value = + readl(h->vaddr + S42XX_INTR_STATUS); + + if( register_value & S42XX_INTR_PENDING) + return FIFO_NOT_EMPTY; + return 0 ; +} + +static struct access_method smart4_access = { + smart4_submit_command, + smart4_intr_mask, + smart4_fifo_full, + smart4_intr_pending, + smart4_completed, +}; + +/* + * Memory mapped FIFO interface (PCI SMART2 and SMART 3xxx cards) + */ +static void smart2_submit_command(ctlr_info_t *h, cmdlist_t *c) +{ + writel(c->busaddr, h->vaddr + COMMAND_FIFO); +} + +static void smart2_intr_mask(ctlr_info_t *h, unsigned long val) +{ + writel(val, h->vaddr + INTR_MASK); +} + +static unsigned long smart2_fifo_full(ctlr_info_t *h) +{ + return readl(h->vaddr + COMMAND_FIFO); +} + +static unsigned long smart2_completed(ctlr_info_t *h) +{ + return readl(h->vaddr + COMMAND_COMPLETE_FIFO); +} + +static unsigned long smart2_intr_pending(ctlr_info_t *h) +{ + return readl(h->vaddr + INTR_PENDING); +} + +static struct access_method smart2_access = { + smart2_submit_command, + smart2_intr_mask, + smart2_fifo_full, + smart2_intr_pending, + smart2_completed, +}; + +/* + * IO access for SMART-2/E cards + */ +static void smart2e_submit_command(ctlr_info_t *h, cmdlist_t *c) +{ + outl(c->busaddr, h->io_mem_addr + COMMAND_FIFO); +} + +static void smart2e_intr_mask(ctlr_info_t *h, unsigned long val) +{ + outl(val, h->io_mem_addr + INTR_MASK); +} + +static unsigned long smart2e_fifo_full(ctlr_info_t *h) +{ + return inl(h->io_mem_addr + COMMAND_FIFO); +} + +static unsigned long smart2e_completed(ctlr_info_t *h) +{ + return inl(h->io_mem_addr + COMMAND_COMPLETE_FIFO); +} + +static unsigned long smart2e_intr_pending(ctlr_info_t *h) +{ + return inl(h->io_mem_addr + INTR_PENDING); +} + +static struct access_method smart2e_access = { + smart2e_submit_command, + smart2e_intr_mask, + smart2e_fifo_full, + smart2e_intr_pending, + smart2e_completed, +}; + +/* + * IO access for older SMART-1 type cards + */ +#define SMART1_SYSTEM_MASK 0xC8E +#define SMART1_SYSTEM_DOORBELL 0xC8F +#define SMART1_LOCAL_MASK 0xC8C +#define SMART1_LOCAL_DOORBELL 0xC8D +#define SMART1_INTR_MASK 0xC89 +#define SMART1_LISTADDR 0xC90 +#define SMART1_LISTLEN 0xC94 +#define SMART1_TAG 0xC97 +#define SMART1_COMPLETE_ADDR 0xC98 +#define SMART1_LISTSTATUS 0xC9E + +#define CHANNEL_BUSY 0x01 +#define CHANNEL_CLEAR 0x02 + +static void smart1_submit_command(ctlr_info_t *h, cmdlist_t *c) +{ + /* + * This __u16 is actually a bunch of control flags on SMART + * and below. We want them all to be zero. + */ + c->hdr.size = 0; + + outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_SYSTEM_DOORBELL); + + outl(c->busaddr, h->io_mem_addr + SMART1_LISTADDR); + outw(c->size, h->io_mem_addr + SMART1_LISTLEN); + + outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL); +} + +static void smart1_intr_mask(ctlr_info_t *h, unsigned long val) +{ + if (val == 1) { + outb(0xFD, h->io_mem_addr + SMART1_SYSTEM_DOORBELL); + outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL); + outb(0x01, h->io_mem_addr + SMART1_INTR_MASK); + outb(0x01, h->io_mem_addr + SMART1_SYSTEM_MASK); + } else { + outb(0, h->io_mem_addr + 0xC8E); + } +} + +static unsigned long smart1_fifo_full(ctlr_info_t *h) +{ + unsigned char chan; + chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_CLEAR; + return chan; +} + +static unsigned long smart1_completed(ctlr_info_t *h) +{ + unsigned char status; + unsigned long cmd; + + if (inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY) { + outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_SYSTEM_DOORBELL); + + cmd = inl(h->io_mem_addr + SMART1_COMPLETE_ADDR); + status = inb(h->io_mem_addr + SMART1_LISTSTATUS); + + outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_LOCAL_DOORBELL); + + /* + * this is x86 (actually compaq x86) only, so it's ok + */ + if (cmd) ((cmdlist_t*)bus_to_virt(cmd))->req.hdr.rcode = status; + } else { + cmd = 0; + } + return cmd; +} + +static unsigned long smart1_intr_pending(ctlr_info_t *h) +{ + unsigned char chan; + chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY; + return chan; +} + +static struct access_method smart1_access = { + smart1_submit_command, + smart1_intr_mask, + smart1_fifo_full, + smart1_intr_pending, + smart1_completed, +}; diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c new file mode 100644 index 0000000..953c0b8 --- /dev/null +++ b/drivers/block/sunvdc.c @@ -0,0 +1,886 @@ +/* sunvdc.c: Sun LDOM Virtual Disk Client. + * + * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/blkdev.h> +#include <linux/hdreg.h> +#include <linux/genhd.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/scatterlist.h> + +#include <asm/vio.h> +#include <asm/ldc.h> + +#define DRV_MODULE_NAME "sunvdc" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "1.0" +#define DRV_MODULE_RELDATE "June 25, 2007" + +static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; +MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); +MODULE_DESCRIPTION("Sun LDOM virtual disk client driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define VDC_TX_RING_SIZE 256 + +#define WAITING_FOR_LINK_UP 0x01 +#define WAITING_FOR_TX_SPACE 0x02 +#define WAITING_FOR_GEN_CMD 0x04 +#define WAITING_FOR_ANY -1 + +struct vdc_req_entry { + struct request *req; +}; + +struct vdc_port { + struct vio_driver_state vio; + + struct gendisk *disk; + + struct vdc_completion *cmp; + + u64 req_id; + u64 seq; + struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE]; + + unsigned long ring_cookies; + + u64 max_xfer_size; + u32 vdisk_block_size; + + /* The server fills these in for us in the disk attribute + * ACK packet. + */ + u64 operations; + u32 vdisk_size; + u8 vdisk_type; + + char disk_name[32]; + + struct vio_disk_geom geom; + struct vio_disk_vtoc label; +}; + +static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) +{ + return container_of(vio, struct vdc_port, vio); +} + +/* Ordered from largest major to lowest */ +static struct vio_version vdc_versions[] = { + { .major = 1, .minor = 0 }, +}; + +#define VDCBLK_NAME "vdisk" +static int vdc_major; +#define PARTITION_SHIFT 3 + +static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr) +{ + return vio_dring_avail(dr, VDC_TX_RING_SIZE); +} + +static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct gendisk *disk = bdev->bd_disk; + struct vdc_port *port = disk->private_data; + + geo->heads = (u8) port->geom.num_hd; + geo->sectors = (u8) port->geom.num_sec; + geo->cylinders = port->geom.num_cyl; + + return 0; +} + +static struct block_device_operations vdc_fops = { + .owner = THIS_MODULE, + .getgeo = vdc_getgeo, +}; + +static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) +{ + if (vio->cmp && + (waiting_for == -1 || + vio->cmp->waiting_for == waiting_for)) { + vio->cmp->err = err; + complete(&vio->cmp->com); + vio->cmp = NULL; + } +} + +static void vdc_handshake_complete(struct vio_driver_state *vio) +{ + vdc_finish(vio, 0, WAITING_FOR_LINK_UP); +} + +static int vdc_handle_unknown(struct vdc_port *port, void *arg) +{ + struct vio_msg_tag *pkt = arg; + + printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n", + pkt->type, pkt->stype, pkt->stype_env, pkt->sid); + printk(KERN_ERR PFX "Resetting connection.\n"); + + ldc_disconnect(port->vio.lp); + + return -ECONNRESET; +} + +static int vdc_send_attr(struct vio_driver_state *vio) +{ + struct vdc_port *port = to_vdc_port(vio); + struct vio_disk_attr_info pkt; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.tag.type = VIO_TYPE_CTRL; + pkt.tag.stype = VIO_SUBTYPE_INFO; + pkt.tag.stype_env = VIO_ATTR_INFO; + pkt.tag.sid = vio_send_sid(vio); + + pkt.xfer_mode = VIO_DRING_MODE; + pkt.vdisk_block_size = port->vdisk_block_size; + pkt.max_xfer_size = port->max_xfer_size; + + viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n", + pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size); + + return vio_ldc_send(&port->vio, &pkt, sizeof(pkt)); +} + +static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) +{ + struct vdc_port *port = to_vdc_port(vio); + struct vio_disk_attr_info *pkt = arg; + + viodbg(HS, "GOT ATTR stype[0x%x] ops[%lx] disk_size[%lu] disk_type[%x] " + "xfer_mode[0x%x] blksz[%u] max_xfer[%lu]\n", + pkt->tag.stype, pkt->operations, + pkt->vdisk_size, pkt->vdisk_type, + pkt->xfer_mode, pkt->vdisk_block_size, + pkt->max_xfer_size); + + if (pkt->tag.stype == VIO_SUBTYPE_ACK) { + switch (pkt->vdisk_type) { + case VD_DISK_TYPE_DISK: + case VD_DISK_TYPE_SLICE: + break; + + default: + printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n", + vio->name, pkt->vdisk_type); + return -ECONNRESET; + } + + if (pkt->vdisk_block_size > port->vdisk_block_size) { + printk(KERN_ERR PFX "%s: BLOCK size increased " + "%u --> %u\n", + vio->name, + port->vdisk_block_size, pkt->vdisk_block_size); + return -ECONNRESET; + } + + port->operations = pkt->operations; + port->vdisk_size = pkt->vdisk_size; + port->vdisk_type = pkt->vdisk_type; + if (pkt->max_xfer_size < port->max_xfer_size) + port->max_xfer_size = pkt->max_xfer_size; + port->vdisk_block_size = pkt->vdisk_block_size; + return 0; + } else { + printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name); + + return -ECONNRESET; + } +} + +static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc) +{ + int err = desc->status; + + vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); +} + +static void vdc_end_request(struct request *req, int error, int num_sectors) +{ + __blk_end_request(req, error, num_sectors << 9); +} + +static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, + unsigned int index) +{ + struct vio_disk_desc *desc = vio_dring_entry(dr, index); + struct vdc_req_entry *rqe = &port->rq_arr[index]; + struct request *req; + + if (unlikely(desc->hdr.state != VIO_DESC_DONE)) + return; + + ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies); + desc->hdr.state = VIO_DESC_FREE; + dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1); + + req = rqe->req; + if (req == NULL) { + vdc_end_special(port, desc); + return; + } + + rqe->req = NULL; + + vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9); + + if (blk_queue_stopped(port->disk->queue)) + blk_start_queue(port->disk->queue); +} + +static int vdc_ack(struct vdc_port *port, void *msgbuf) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct vio_dring_data *pkt = msgbuf; + + if (unlikely(pkt->dring_ident != dr->ident || + pkt->start_idx != pkt->end_idx || + pkt->start_idx >= VDC_TX_RING_SIZE)) + return 0; + + vdc_end_one(port, dr, pkt->start_idx); + + return 0; +} + +static int vdc_nack(struct vdc_port *port, void *msgbuf) +{ + /* XXX Implement me XXX */ + return 0; +} + +static void vdc_event(void *arg, int event) +{ + struct vdc_port *port = arg; + struct vio_driver_state *vio = &port->vio; + unsigned long flags; + int err; + + spin_lock_irqsave(&vio->lock, flags); + + if (unlikely(event == LDC_EVENT_RESET || + event == LDC_EVENT_UP)) { + vio_link_state_change(vio, event); + spin_unlock_irqrestore(&vio->lock, flags); + return; + } + + if (unlikely(event != LDC_EVENT_DATA_READY)) { + printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); + spin_unlock_irqrestore(&vio->lock, flags); + return; + } + + err = 0; + while (1) { + union { + struct vio_msg_tag tag; + u64 raw[8]; + } msgbuf; + + err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); + if (unlikely(err < 0)) { + if (err == -ECONNRESET) + vio_conn_reset(vio); + break; + } + if (err == 0) + break; + viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", + msgbuf.tag.type, + msgbuf.tag.stype, + msgbuf.tag.stype_env, + msgbuf.tag.sid); + err = vio_validate_sid(vio, &msgbuf.tag); + if (err < 0) + break; + + if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { + if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) + err = vdc_ack(port, &msgbuf); + else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) + err = vdc_nack(port, &msgbuf); + else + err = vdc_handle_unknown(port, &msgbuf); + } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { + err = vio_control_pkt_engine(vio, &msgbuf); + } else { + err = vdc_handle_unknown(port, &msgbuf); + } + if (err < 0) + break; + } + if (err < 0) + vdc_finish(&port->vio, err, WAITING_FOR_ANY); + spin_unlock_irqrestore(&vio->lock, flags); +} + +static int __vdc_tx_trigger(struct vdc_port *port) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct vio_dring_data hdr = { + .tag = { + .type = VIO_TYPE_DATA, + .stype = VIO_SUBTYPE_INFO, + .stype_env = VIO_DRING_DATA, + .sid = vio_send_sid(&port->vio), + }, + .dring_ident = dr->ident, + .start_idx = dr->prod, + .end_idx = dr->prod, + }; + int err, delay; + + hdr.seq = dr->snd_nxt; + delay = 1; + do { + err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); + if (err > 0) { + dr->snd_nxt++; + break; + } + udelay(delay); + if ((delay <<= 1) > 128) + delay = 128; + } while (err == -EAGAIN); + + return err; +} + +static int __send_request(struct request *req) +{ + struct vdc_port *port = req->rq_disk->private_data; + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct scatterlist sg[port->ring_cookies]; + struct vdc_req_entry *rqe; + struct vio_disk_desc *desc; + unsigned int map_perm; + int nsg, err, i; + u64 len; + u8 op; + + map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; + + if (rq_data_dir(req) == READ) { + map_perm |= LDC_MAP_W; + op = VD_OP_BREAD; + } else { + map_perm |= LDC_MAP_R; + op = VD_OP_BWRITE; + } + + sg_init_table(sg, port->ring_cookies); + nsg = blk_rq_map_sg(req->q, req, sg); + + len = 0; + for (i = 0; i < nsg; i++) + len += sg[i].length; + + if (unlikely(vdc_tx_dring_avail(dr) < 1)) { + blk_stop_queue(port->disk->queue); + err = -ENOMEM; + goto out; + } + + desc = vio_dring_cur(dr); + + err = ldc_map_sg(port->vio.lp, sg, nsg, + desc->cookies, port->ring_cookies, + map_perm); + if (err < 0) { + printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err); + return err; + } + + rqe = &port->rq_arr[dr->prod]; + rqe->req = req; + + desc->hdr.ack = VIO_ACK_ENABLE; + desc->req_id = port->req_id; + desc->operation = op; + if (port->vdisk_type == VD_DISK_TYPE_DISK) { + desc->slice = 0xff; + } else { + desc->slice = 0; + } + desc->status = ~0; + desc->offset = (req->sector << 9) / port->vdisk_block_size; + desc->size = len; + desc->ncookies = err; + + /* This has to be a non-SMP write barrier because we are writing + * to memory which is shared with the peer LDOM. + */ + wmb(); + desc->hdr.state = VIO_DESC_READY; + + err = __vdc_tx_trigger(port); + if (err < 0) { + printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err); + } else { + port->req_id++; + dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); + } +out: + + return err; +} + +static void do_vdc_request(struct request_queue *q) +{ + while (1) { + struct request *req = elv_next_request(q); + + if (!req) + break; + + blkdev_dequeue_request(req); + if (__send_request(req) < 0) + vdc_end_request(req, -EIO, req->hard_nr_sectors); + } +} + +static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) +{ + struct vio_dring_state *dr; + struct vio_completion comp; + struct vio_disk_desc *desc; + unsigned int map_perm; + unsigned long flags; + int op_len, err; + void *req_buf; + + if (!(((u64)1 << ((u64)op - 1)) & port->operations)) + return -EOPNOTSUPP; + + switch (op) { + case VD_OP_BREAD: + case VD_OP_BWRITE: + default: + return -EINVAL; + + case VD_OP_FLUSH: + op_len = 0; + map_perm = 0; + break; + + case VD_OP_GET_WCE: + op_len = sizeof(u32); + map_perm = LDC_MAP_W; + break; + + case VD_OP_SET_WCE: + op_len = sizeof(u32); + map_perm = LDC_MAP_R; + break; + + case VD_OP_GET_VTOC: + op_len = sizeof(struct vio_disk_vtoc); + map_perm = LDC_MAP_W; + break; + + case VD_OP_SET_VTOC: + op_len = sizeof(struct vio_disk_vtoc); + map_perm = LDC_MAP_R; + break; + + case VD_OP_GET_DISKGEOM: + op_len = sizeof(struct vio_disk_geom); + map_perm = LDC_MAP_W; + break; + + case VD_OP_SET_DISKGEOM: + op_len = sizeof(struct vio_disk_geom); + map_perm = LDC_MAP_R; + break; + + case VD_OP_SCSICMD: + op_len = 16; + map_perm = LDC_MAP_RW; + break; + + case VD_OP_GET_DEVID: + op_len = sizeof(struct vio_disk_devid); + map_perm = LDC_MAP_W; + break; + + case VD_OP_GET_EFI: + case VD_OP_SET_EFI: + return -EOPNOTSUPP; + break; + }; + + map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; + + op_len = (op_len + 7) & ~7; + req_buf = kzalloc(op_len, GFP_KERNEL); + if (!req_buf) + return -ENOMEM; + + if (len > op_len) + len = op_len; + + if (map_perm & LDC_MAP_R) + memcpy(req_buf, buf, len); + + spin_lock_irqsave(&port->vio.lock, flags); + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + + /* XXX If we want to use this code generically we have to + * XXX handle TX ring exhaustion etc. + */ + desc = vio_dring_cur(dr); + + err = ldc_map_single(port->vio.lp, req_buf, op_len, + desc->cookies, port->ring_cookies, + map_perm); + if (err < 0) { + spin_unlock_irqrestore(&port->vio.lock, flags); + kfree(req_buf); + return err; + } + + init_completion(&comp.com); + comp.waiting_for = WAITING_FOR_GEN_CMD; + port->vio.cmp = ∁ + + desc->hdr.ack = VIO_ACK_ENABLE; + desc->req_id = port->req_id; + desc->operation = op; + desc->slice = 0; + desc->status = ~0; + desc->offset = 0; + desc->size = op_len; + desc->ncookies = err; + + /* This has to be a non-SMP write barrier because we are writing + * to memory which is shared with the peer LDOM. + */ + wmb(); + desc->hdr.state = VIO_DESC_READY; + + err = __vdc_tx_trigger(port); + if (err >= 0) { + port->req_id++; + dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); + spin_unlock_irqrestore(&port->vio.lock, flags); + + wait_for_completion(&comp.com); + err = comp.err; + } else { + port->vio.cmp = NULL; + spin_unlock_irqrestore(&port->vio.lock, flags); + } + + if (map_perm & LDC_MAP_W) + memcpy(buf, req_buf, len); + + kfree(req_buf); + + return err; +} + +static int __devinit vdc_alloc_tx_ring(struct vdc_port *port) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + unsigned long len, entry_size; + int ncookies; + void *dring; + + entry_size = sizeof(struct vio_disk_desc) + + (sizeof(struct ldc_trans_cookie) * port->ring_cookies); + len = (VDC_TX_RING_SIZE * entry_size); + + ncookies = VIO_MAX_RING_COOKIES; + dring = ldc_alloc_exp_dring(port->vio.lp, len, + dr->cookies, &ncookies, + (LDC_MAP_SHADOW | + LDC_MAP_DIRECT | + LDC_MAP_RW)); + if (IS_ERR(dring)) + return PTR_ERR(dring); + + dr->base = dring; + dr->entry_size = entry_size; + dr->num_entries = VDC_TX_RING_SIZE; + dr->prod = dr->cons = 0; + dr->pending = VDC_TX_RING_SIZE; + dr->ncookies = ncookies; + + return 0; +} + +static void vdc_free_tx_ring(struct vdc_port *port) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + + if (dr->base) { + ldc_free_exp_dring(port->vio.lp, dr->base, + (dr->entry_size * dr->num_entries), + dr->cookies, dr->ncookies); + dr->base = NULL; + dr->entry_size = 0; + dr->num_entries = 0; + dr->pending = 0; + dr->ncookies = 0; + } +} + +static int probe_disk(struct vdc_port *port) +{ + struct vio_completion comp; + struct request_queue *q; + struct gendisk *g; + int err; + + init_completion(&comp.com); + comp.err = 0; + comp.waiting_for = WAITING_FOR_LINK_UP; + port->vio.cmp = ∁ + + vio_port_up(&port->vio); + + wait_for_completion(&comp.com); + if (comp.err) + return comp.err; + + err = generic_request(port, VD_OP_GET_VTOC, + &port->label, sizeof(port->label)); + if (err < 0) { + printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err); + return err; + } + + err = generic_request(port, VD_OP_GET_DISKGEOM, + &port->geom, sizeof(port->geom)); + if (err < 0) { + printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " + "error %d\n", err); + return err; + } + + port->vdisk_size = ((u64)port->geom.num_cyl * + (u64)port->geom.num_hd * + (u64)port->geom.num_sec); + + q = blk_init_queue(do_vdc_request, &port->vio.lock); + if (!q) { + printk(KERN_ERR PFX "%s: Could not allocate queue.\n", + port->vio.name); + return -ENOMEM; + } + g = alloc_disk(1 << PARTITION_SHIFT); + if (!g) { + printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n", + port->vio.name); + blk_cleanup_queue(q); + return -ENOMEM; + } + + port->disk = g; + + blk_queue_max_hw_segments(q, port->ring_cookies); + blk_queue_max_phys_segments(q, port->ring_cookies); + blk_queue_max_sectors(q, port->max_xfer_size); + g->major = vdc_major; + g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; + strcpy(g->disk_name, port->disk_name); + + g->fops = &vdc_fops; + g->queue = q; + g->private_data = port; + g->driverfs_dev = &port->vio.vdev->dev; + + set_capacity(g, port->vdisk_size); + + printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n", + g->disk_name, + port->vdisk_size, (port->vdisk_size >> (20 - 9))); + + add_disk(g); + + return 0; +} + +static struct ldc_channel_config vdc_ldc_cfg = { + .event = vdc_event, + .mtu = 64, + .mode = LDC_MODE_UNRELIABLE, +}; + +static struct vio_driver_ops vdc_vio_ops = { + .send_attr = vdc_send_attr, + .handle_attr = vdc_handle_attr, + .handshake_complete = vdc_handshake_complete, +}; + +static void __devinit print_version(void) +{ + static int version_printed; + + if (version_printed++ == 0) + printk(KERN_INFO "%s", version); +} + +static int __devinit vdc_port_probe(struct vio_dev *vdev, + const struct vio_device_id *id) +{ + struct mdesc_handle *hp; + struct vdc_port *port; + int err; + + print_version(); + + hp = mdesc_grab(); + + err = -ENODEV; + if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { + printk(KERN_ERR PFX "Port id [%lu] too large.\n", + vdev->dev_no); + goto err_out_release_mdesc; + } + + port = kzalloc(sizeof(*port), GFP_KERNEL); + err = -ENOMEM; + if (!port) { + printk(KERN_ERR PFX "Cannot allocate vdc_port.\n"); + goto err_out_release_mdesc; + } + + if (vdev->dev_no >= 26) + snprintf(port->disk_name, sizeof(port->disk_name), + VDCBLK_NAME "%c%c", + 'a' + ((int)vdev->dev_no / 26) - 1, + 'a' + ((int)vdev->dev_no % 26)); + else + snprintf(port->disk_name, sizeof(port->disk_name), + VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); + + err = vio_driver_init(&port->vio, vdev, VDEV_DISK, + vdc_versions, ARRAY_SIZE(vdc_versions), + &vdc_vio_ops, port->disk_name); + if (err) + goto err_out_free_port; + + port->vdisk_block_size = 512; + port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size); + port->ring_cookies = ((port->max_xfer_size * + port->vdisk_block_size) / PAGE_SIZE) + 2; + + err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port); + if (err) + goto err_out_free_port; + + err = vdc_alloc_tx_ring(port); + if (err) + goto err_out_free_ldc; + + err = probe_disk(port); + if (err) + goto err_out_free_tx_ring; + + dev_set_drvdata(&vdev->dev, port); + + mdesc_release(hp); + + return 0; + +err_out_free_tx_ring: + vdc_free_tx_ring(port); + +err_out_free_ldc: + vio_ldc_free(&port->vio); + +err_out_free_port: + kfree(port); + +err_out_release_mdesc: + mdesc_release(hp); + return err; +} + +static int vdc_port_remove(struct vio_dev *vdev) +{ + struct vdc_port *port = dev_get_drvdata(&vdev->dev); + + if (port) { + del_timer_sync(&port->vio.timer); + + vdc_free_tx_ring(port); + vio_ldc_free(&port->vio); + + dev_set_drvdata(&vdev->dev, NULL); + + kfree(port); + } + return 0; +} + +static const struct vio_device_id vdc_port_match[] = { + { + .type = "vdc-port", + }, + {}, +}; +MODULE_DEVICE_TABLE(vio, vdc_port_match); + +static struct vio_driver vdc_port_driver = { + .id_table = vdc_port_match, + .probe = vdc_port_probe, + .remove = vdc_port_remove, + .driver = { + .name = "vdc_port", + .owner = THIS_MODULE, + } +}; + +static int __init vdc_init(void) +{ + int err; + + err = register_blkdev(0, VDCBLK_NAME); + if (err < 0) + goto out_err; + + vdc_major = err; + + err = vio_register_driver(&vdc_port_driver); + if (err) + goto out_unregister_blkdev; + + return 0; + +out_unregister_blkdev: + unregister_blkdev(vdc_major, VDCBLK_NAME); + vdc_major = 0; + +out_err: + return err; +} + +static void __exit vdc_exit(void) +{ + vio_unregister_driver(&vdc_port_driver); + unregister_blkdev(vdc_major, VDCBLK_NAME); +} + +module_init(vdc_init); +module_exit(vdc_exit); diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c new file mode 100644 index 0000000..6129653 --- /dev/null +++ b/drivers/block/swim3.c @@ -0,0 +1,1188 @@ +/* + * Driver for the SWIM3 (Super Woz Integrated Machine 3) + * floppy controller found on Power Macintoshes. + * + * Copyright (C) 1996 Paul Mackerras. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +/* + * TODO: + * handle 2 drives + * handle GCR disks + */ + +#include <linux/stddef.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/timer.h> +#include <linux/delay.h> +#include <linux/fd.h> +#include <linux/ioctl.h> +#include <linux/blkdev.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <asm/io.h> +#include <asm/dbdma.h> +#include <asm/prom.h> +#include <asm/uaccess.h> +#include <asm/mediabay.h> +#include <asm/machdep.h> +#include <asm/pmac_feature.h> + +static struct request_queue *swim3_queue; +static struct gendisk *disks[2]; +static struct request *fd_req; + +#define MAX_FLOPPIES 2 + +enum swim_state { + idle, + locating, + seeking, + settling, + do_transfer, + jogging, + available, + revalidating, + ejecting +}; + +#define REG(x) unsigned char x; char x ## _pad[15]; + +/* + * The names for these registers mostly represent speculation on my part. + * It will be interesting to see how close they are to the names Apple uses. + */ +struct swim3 { + REG(data); + REG(timer); /* counts down at 1MHz */ + REG(error); + REG(mode); + REG(select); /* controls CA0, CA1, CA2 and LSTRB signals */ + REG(setup); + REG(control); /* writing bits clears them */ + REG(status); /* writing bits sets them in control */ + REG(intr); + REG(nseek); /* # tracks to seek */ + REG(ctrack); /* current track number */ + REG(csect); /* current sector number */ + REG(gap3); /* size of gap 3 in track format */ + REG(sector); /* sector # to read or write */ + REG(nsect); /* # sectors to read or write */ + REG(intr_enable); +}; + +#define control_bic control +#define control_bis status + +/* Bits in select register */ +#define CA_MASK 7 +#define LSTRB 8 + +/* Bits in control register */ +#define DO_SEEK 0x80 +#define FORMAT 0x40 +#define SELECT 0x20 +#define WRITE_SECTORS 0x10 +#define DO_ACTION 0x08 +#define DRIVE2_ENABLE 0x04 +#define DRIVE_ENABLE 0x02 +#define INTR_ENABLE 0x01 + +/* Bits in status register */ +#define FIFO_1BYTE 0x80 +#define FIFO_2BYTE 0x40 +#define ERROR 0x20 +#define DATA 0x08 +#define RDDATA 0x04 +#define INTR_PENDING 0x02 +#define MARK_BYTE 0x01 + +/* Bits in intr and intr_enable registers */ +#define ERROR_INTR 0x20 +#define DATA_CHANGED 0x10 +#define TRANSFER_DONE 0x08 +#define SEEN_SECTOR 0x04 +#define SEEK_DONE 0x02 +#define TIMER_DONE 0x01 + +/* Bits in error register */ +#define ERR_DATA_CRC 0x80 +#define ERR_ADDR_CRC 0x40 +#define ERR_OVERRUN 0x04 +#define ERR_UNDERRUN 0x01 + +/* Bits in setup register */ +#define S_SW_RESET 0x80 +#define S_GCR_WRITE 0x40 +#define S_IBM_DRIVE 0x20 +#define S_TEST_MODE 0x10 +#define S_FCLK_DIV2 0x08 +#define S_GCR 0x04 +#define S_COPY_PROT 0x02 +#define S_INV_WDATA 0x01 + +/* Select values for swim3_action */ +#define SEEK_POSITIVE 0 +#define SEEK_NEGATIVE 4 +#define STEP 1 +#define MOTOR_ON 2 +#define MOTOR_OFF 6 +#define INDEX 3 +#define EJECT 7 +#define SETMFM 9 +#define SETGCR 13 + +/* Select values for swim3_select and swim3_readbit */ +#define STEP_DIR 0 +#define STEPPING 1 +#define MOTOR_ON 2 +#define RELAX 3 /* also eject in progress */ +#define READ_DATA_0 4 +#define TWOMEG_DRIVE 5 +#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ +#define DRIVE_PRESENT 7 +#define DISK_IN 8 +#define WRITE_PROT 9 +#define TRACK_ZERO 10 +#define TACHO 11 +#define READ_DATA_1 12 +#define MFM_MODE 13 +#define SEEK_COMPLETE 14 +#define ONEMEG_MEDIA 15 + +/* Definitions of values used in writing and formatting */ +#define DATA_ESCAPE 0x99 +#define GCR_SYNC_EXC 0x3f +#define GCR_SYNC_CONV 0x80 +#define GCR_FIRST_MARK 0xd5 +#define GCR_SECOND_MARK 0xaa +#define GCR_ADDR_MARK "\xd5\xaa\x00" +#define GCR_DATA_MARK "\xd5\xaa\x0b" +#define GCR_SLIP_BYTE "\x27\xaa" +#define GCR_SELF_SYNC "\x3f\xbf\x1e\x34\x3c\x3f" + +#define DATA_99 "\x99\x99" +#define MFM_ADDR_MARK "\x99\xa1\x99\xa1\x99\xa1\x99\xfe" +#define MFM_INDEX_MARK "\x99\xc2\x99\xc2\x99\xc2\x99\xfc" +#define MFM_GAP_LEN 12 + +struct floppy_state { + enum swim_state state; + spinlock_t lock; + struct swim3 __iomem *swim3; /* hardware registers */ + struct dbdma_regs __iomem *dma; /* DMA controller registers */ + int swim3_intr; /* interrupt number for SWIM3 */ + int dma_intr; /* interrupt number for DMA channel */ + int cur_cyl; /* cylinder head is on, or -1 */ + int cur_sector; /* last sector we saw go past */ + int req_cyl; /* the cylinder for the current r/w request */ + int head; /* head number ditto */ + int req_sector; /* sector number ditto */ + int scount; /* # sectors we're transferring at present */ + int retries; + int settle_time; + int secpercyl; /* disk geometry information */ + int secpertrack; + int total_secs; + int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */ + struct dbdma_cmd *dma_cmd; + int ref_count; + int expect_cyl; + struct timer_list timeout; + int timeout_pending; + int ejected; + wait_queue_head_t wait; + int wanted; + struct device_node* media_bay; /* NULL when not in bay */ + char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)]; +}; + +static struct floppy_state floppy_states[MAX_FLOPPIES]; +static int floppy_count = 0; +static DEFINE_SPINLOCK(swim3_lock); + +static unsigned short write_preamble[] = { + 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, /* gap field */ + 0, 0, 0, 0, 0, 0, /* sync field */ + 0x99a1, 0x99a1, 0x99a1, 0x99fb, /* data address mark */ + 0x990f /* no escape for 512 bytes */ +}; + +static unsigned short write_postamble[] = { + 0x9904, /* insert CRC */ + 0x4e4e, 0x4e4e, + 0x9908, /* stop writing */ + 0, 0, 0, 0, 0, 0 +}; + +static void swim3_select(struct floppy_state *fs, int sel); +static void swim3_action(struct floppy_state *fs, int action); +static int swim3_readbit(struct floppy_state *fs, int bit); +static void do_fd_request(struct request_queue * q); +static void start_request(struct floppy_state *fs); +static void set_timeout(struct floppy_state *fs, int nticks, + void (*proc)(unsigned long)); +static void scan_track(struct floppy_state *fs); +static void seek_track(struct floppy_state *fs, int n); +static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count); +static void setup_transfer(struct floppy_state *fs); +static void act(struct floppy_state *fs); +static void scan_timeout(unsigned long data); +static void seek_timeout(unsigned long data); +static void settle_timeout(unsigned long data); +static void xfer_timeout(unsigned long data); +static irqreturn_t swim3_interrupt(int irq, void *dev_id); +/*static void fd_dma_interrupt(int irq, void *dev_id);*/ +static int grab_drive(struct floppy_state *fs, enum swim_state state, + int interruptible); +static void release_drive(struct floppy_state *fs); +static int fd_eject(struct floppy_state *fs); +static int floppy_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long param); +static int floppy_open(struct block_device *bdev, fmode_t mode); +static int floppy_release(struct gendisk *disk, fmode_t mode); +static int floppy_check_change(struct gendisk *disk); +static int floppy_revalidate(struct gendisk *disk); + +static void swim3_select(struct floppy_state *fs, int sel) +{ + struct swim3 __iomem *sw = fs->swim3; + + out_8(&sw->select, RELAX); + if (sel & 8) + out_8(&sw->control_bis, SELECT); + else + out_8(&sw->control_bic, SELECT); + out_8(&sw->select, sel & CA_MASK); +} + +static void swim3_action(struct floppy_state *fs, int action) +{ + struct swim3 __iomem *sw = fs->swim3; + + swim3_select(fs, action); + udelay(1); + out_8(&sw->select, sw->select | LSTRB); + udelay(2); + out_8(&sw->select, sw->select & ~LSTRB); + udelay(1); +} + +static int swim3_readbit(struct floppy_state *fs, int bit) +{ + struct swim3 __iomem *sw = fs->swim3; + int stat; + + swim3_select(fs, bit); + udelay(1); + stat = in_8(&sw->status); + return (stat & DATA) == 0; +} + +static void do_fd_request(struct request_queue * q) +{ + int i; + for(i=0;i<floppy_count;i++) + { +#ifdef CONFIG_PMAC_MEDIABAY + if (floppy_states[i].media_bay && + check_media_bay(floppy_states[i].media_bay, MB_FD)) + continue; +#endif /* CONFIG_PMAC_MEDIABAY */ + start_request(&floppy_states[i]); + } +} + +static void start_request(struct floppy_state *fs) +{ + struct request *req; + unsigned long x; + + if (fs->state == idle && fs->wanted) { + fs->state = available; + wake_up(&fs->wait); + return; + } + while (fs->state == idle && (req = elv_next_request(swim3_queue))) { +#if 0 + printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n", + req->rq_disk->disk_name, req->cmd, + (long)req->sector, req->nr_sectors, req->buffer); + printk(" errors=%d current_nr_sectors=%ld\n", + req->errors, req->current_nr_sectors); +#endif + + if (req->sector < 0 || req->sector >= fs->total_secs) { + end_request(req, 0); + continue; + } + if (req->current_nr_sectors == 0) { + end_request(req, 1); + continue; + } + if (fs->ejected) { + end_request(req, 0); + continue; + } + + if (rq_data_dir(req) == WRITE) { + if (fs->write_prot < 0) + fs->write_prot = swim3_readbit(fs, WRITE_PROT); + if (fs->write_prot) { + end_request(req, 0); + continue; + } + } + + /* Do not remove the cast. req->sector is now a sector_t and + * can be 64 bits, but it will never go past 32 bits for this + * driver anyway, so we can safely cast it down and not have + * to do a 64/32 division + */ + fs->req_cyl = ((long)req->sector) / fs->secpercyl; + x = ((long)req->sector) % fs->secpercyl; + fs->head = x / fs->secpertrack; + fs->req_sector = x % fs->secpertrack + 1; + fd_req = req; + fs->state = do_transfer; + fs->retries = 0; + + act(fs); + } +} + +static void set_timeout(struct floppy_state *fs, int nticks, + void (*proc)(unsigned long)) +{ + unsigned long flags; + + spin_lock_irqsave(&fs->lock, flags); + if (fs->timeout_pending) + del_timer(&fs->timeout); + fs->timeout.expires = jiffies + nticks; + fs->timeout.function = proc; + fs->timeout.data = (unsigned long) fs; + add_timer(&fs->timeout); + fs->timeout_pending = 1; + spin_unlock_irqrestore(&fs->lock, flags); +} + +static inline void scan_track(struct floppy_state *fs) +{ + struct swim3 __iomem *sw = fs->swim3; + + swim3_select(fs, READ_DATA_0); + in_8(&sw->intr); /* clear SEEN_SECTOR bit */ + in_8(&sw->error); + out_8(&sw->intr_enable, SEEN_SECTOR); + out_8(&sw->control_bis, DO_ACTION); + /* enable intr when track found */ + set_timeout(fs, HZ, scan_timeout); /* enable timeout */ +} + +static inline void seek_track(struct floppy_state *fs, int n) +{ + struct swim3 __iomem *sw = fs->swim3; + + if (n >= 0) { + swim3_action(fs, SEEK_POSITIVE); + sw->nseek = n; + } else { + swim3_action(fs, SEEK_NEGATIVE); + sw->nseek = -n; + } + fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1; + swim3_select(fs, STEP); + in_8(&sw->error); + /* enable intr when seek finished */ + out_8(&sw->intr_enable, SEEK_DONE); + out_8(&sw->control_bis, DO_SEEK); + set_timeout(fs, 3*HZ, seek_timeout); /* enable timeout */ + fs->settle_time = 0; +} + +static inline void init_dma(struct dbdma_cmd *cp, int cmd, + void *buf, int count) +{ + st_le16(&cp->req_count, count); + st_le16(&cp->command, cmd); + st_le32(&cp->phy_addr, virt_to_bus(buf)); + cp->xfer_status = 0; +} + +static inline void setup_transfer(struct floppy_state *fs) +{ + int n; + struct swim3 __iomem *sw = fs->swim3; + struct dbdma_cmd *cp = fs->dma_cmd; + struct dbdma_regs __iomem *dr = fs->dma; + + if (fd_req->current_nr_sectors <= 0) { + printk(KERN_ERR "swim3: transfer 0 sectors?\n"); + return; + } + if (rq_data_dir(fd_req) == WRITE) + n = 1; + else { + n = fs->secpertrack - fs->req_sector + 1; + if (n > fd_req->current_nr_sectors) + n = fd_req->current_nr_sectors; + } + fs->scount = n; + swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); + out_8(&sw->sector, fs->req_sector); + out_8(&sw->nsect, n); + out_8(&sw->gap3, 0); + out_le32(&dr->cmdptr, virt_to_bus(cp)); + if (rq_data_dir(fd_req) == WRITE) { + /* Set up 3 dma commands: write preamble, data, postamble */ + init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); + ++cp; + init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512); + ++cp; + init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); + } else { + init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512); + } + ++cp; + out_le16(&cp->command, DBDMA_STOP); + out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); + in_8(&sw->error); + out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); + if (rq_data_dir(fd_req) == WRITE) + out_8(&sw->control_bis, WRITE_SECTORS); + in_8(&sw->intr); + out_le32(&dr->control, (RUN << 16) | RUN); + /* enable intr when transfer complete */ + out_8(&sw->intr_enable, TRANSFER_DONE); + out_8(&sw->control_bis, DO_ACTION); + set_timeout(fs, 2*HZ, xfer_timeout); /* enable timeout */ +} + +static void act(struct floppy_state *fs) +{ + for (;;) { + switch (fs->state) { + case idle: + return; /* XXX shouldn't get here */ + + case locating: + if (swim3_readbit(fs, TRACK_ZERO)) { + fs->cur_cyl = 0; + if (fs->req_cyl == 0) + fs->state = do_transfer; + else + fs->state = seeking; + break; + } + scan_track(fs); + return; + + case seeking: + if (fs->cur_cyl < 0) { + fs->expect_cyl = -1; + fs->state = locating; + break; + } + if (fs->req_cyl == fs->cur_cyl) { + printk("whoops, seeking 0\n"); + fs->state = do_transfer; + break; + } + seek_track(fs, fs->req_cyl - fs->cur_cyl); + return; + + case settling: + /* check for SEEK_COMPLETE after 30ms */ + fs->settle_time = (HZ + 32) / 33; + set_timeout(fs, fs->settle_time, settle_timeout); + return; + + case do_transfer: + if (fs->cur_cyl != fs->req_cyl) { + if (fs->retries > 5) { + end_request(fd_req, 0); + fs->state = idle; + return; + } + fs->state = seeking; + break; + } + setup_transfer(fs); + return; + + case jogging: + seek_track(fs, -5); + return; + + default: + printk(KERN_ERR"swim3: unknown state %d\n", fs->state); + return; + } + } +} + +static void scan_timeout(unsigned long data) +{ + struct floppy_state *fs = (struct floppy_state *) data; + struct swim3 __iomem *sw = fs->swim3; + + fs->timeout_pending = 0; + out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); + out_8(&sw->select, RELAX); + out_8(&sw->intr_enable, 0); + fs->cur_cyl = -1; + if (fs->retries > 5) { + end_request(fd_req, 0); + fs->state = idle; + start_request(fs); + } else { + fs->state = jogging; + act(fs); + } +} + +static void seek_timeout(unsigned long data) +{ + struct floppy_state *fs = (struct floppy_state *) data; + struct swim3 __iomem *sw = fs->swim3; + + fs->timeout_pending = 0; + out_8(&sw->control_bic, DO_SEEK); + out_8(&sw->select, RELAX); + out_8(&sw->intr_enable, 0); + printk(KERN_ERR "swim3: seek timeout\n"); + end_request(fd_req, 0); + fs->state = idle; + start_request(fs); +} + +static void settle_timeout(unsigned long data) +{ + struct floppy_state *fs = (struct floppy_state *) data; + struct swim3 __iomem *sw = fs->swim3; + + fs->timeout_pending = 0; + if (swim3_readbit(fs, SEEK_COMPLETE)) { + out_8(&sw->select, RELAX); + fs->state = locating; + act(fs); + return; + } + out_8(&sw->select, RELAX); + if (fs->settle_time < 2*HZ) { + ++fs->settle_time; + set_timeout(fs, 1, settle_timeout); + return; + } + printk(KERN_ERR "swim3: seek settle timeout\n"); + end_request(fd_req, 0); + fs->state = idle; + start_request(fs); +} + +static void xfer_timeout(unsigned long data) +{ + struct floppy_state *fs = (struct floppy_state *) data; + struct swim3 __iomem *sw = fs->swim3; + struct dbdma_regs __iomem *dr = fs->dma; + struct dbdma_cmd *cp = fs->dma_cmd; + unsigned long s; + int n; + + fs->timeout_pending = 0; + out_le32(&dr->control, RUN << 16); + /* We must wait a bit for dbdma to stop */ + for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++) + udelay(1); + out_8(&sw->intr_enable, 0); + out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); + out_8(&sw->select, RELAX); + if (rq_data_dir(fd_req) == WRITE) + ++cp; + if (ld_le16(&cp->xfer_status) != 0) + s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9); + else + s = 0; + fd_req->sector += s; + fd_req->current_nr_sectors -= s; + printk(KERN_ERR "swim3: timeout %sing sector %ld\n", + (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); + end_request(fd_req, 0); + fs->state = idle; + start_request(fs); +} + +static irqreturn_t swim3_interrupt(int irq, void *dev_id) +{ + struct floppy_state *fs = (struct floppy_state *) dev_id; + struct swim3 __iomem *sw = fs->swim3; + int intr, err, n; + int stat, resid; + struct dbdma_regs __iomem *dr; + struct dbdma_cmd *cp; + + intr = in_8(&sw->intr); + err = (intr & ERROR_INTR)? in_8(&sw->error): 0; + if ((intr & ERROR_INTR) && fs->state != do_transfer) + printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n", + fs->state, rq_data_dir(fd_req), intr, err); + switch (fs->state) { + case locating: + if (intr & SEEN_SECTOR) { + out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); + out_8(&sw->select, RELAX); + out_8(&sw->intr_enable, 0); + del_timer(&fs->timeout); + fs->timeout_pending = 0; + if (sw->ctrack == 0xff) { + printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); + fs->cur_cyl = -1; + if (fs->retries > 5) { + end_request(fd_req, 0); + fs->state = idle; + start_request(fs); + } else { + fs->state = jogging; + act(fs); + } + break; + } + fs->cur_cyl = sw->ctrack; + fs->cur_sector = sw->csect; + if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl) + printk(KERN_ERR "swim3: expected cyl %d, got %d\n", + fs->expect_cyl, fs->cur_cyl); + fs->state = do_transfer; + act(fs); + } + break; + case seeking: + case jogging: + if (sw->nseek == 0) { + out_8(&sw->control_bic, DO_SEEK); + out_8(&sw->select, RELAX); + out_8(&sw->intr_enable, 0); + del_timer(&fs->timeout); + fs->timeout_pending = 0; + if (fs->state == seeking) + ++fs->retries; + fs->state = settling; + act(fs); + } + break; + case settling: + out_8(&sw->intr_enable, 0); + del_timer(&fs->timeout); + fs->timeout_pending = 0; + act(fs); + break; + case do_transfer: + if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0) + break; + out_8(&sw->intr_enable, 0); + out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); + out_8(&sw->select, RELAX); + del_timer(&fs->timeout); + fs->timeout_pending = 0; + dr = fs->dma; + cp = fs->dma_cmd; + if (rq_data_dir(fd_req) == WRITE) + ++cp; + /* + * Check that the main data transfer has finished. + * On writing, the swim3 sometimes doesn't use + * up all the bytes of the postamble, so we can still + * see DMA active here. That doesn't matter as long + * as all the sector data has been transferred. + */ + if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) { + /* wait a little while for DMA to complete */ + for (n = 0; n < 100; ++n) { + if (cp->xfer_status != 0) + break; + udelay(1); + barrier(); + } + } + /* turn off DMA */ + out_le32(&dr->control, (RUN | PAUSE) << 16); + stat = ld_le16(&cp->xfer_status); + resid = ld_le16(&cp->res_count); + if (intr & ERROR_INTR) { + n = fs->scount - 1 - resid / 512; + if (n > 0) { + fd_req->sector += n; + fd_req->current_nr_sectors -= n; + fd_req->buffer += n * 512; + fs->req_sector += n; + } + if (fs->retries < 5) { + ++fs->retries; + act(fs); + } else { + printk("swim3: error %sing block %ld (err=%x)\n", + rq_data_dir(fd_req) == WRITE? "writ": "read", + (long)fd_req->sector, err); + end_request(fd_req, 0); + fs->state = idle; + } + } else { + if ((stat & ACTIVE) == 0 || resid != 0) { + /* musta been an error */ + printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); + printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", + fs->state, rq_data_dir(fd_req), intr, err); + end_request(fd_req, 0); + fs->state = idle; + start_request(fs); + break; + } + fd_req->sector += fs->scount; + fd_req->current_nr_sectors -= fs->scount; + fd_req->buffer += fs->scount * 512; + if (fd_req->current_nr_sectors <= 0) { + end_request(fd_req, 1); + fs->state = idle; + } else { + fs->req_sector += fs->scount; + if (fs->req_sector > fs->secpertrack) { + fs->req_sector -= fs->secpertrack; + if (++fs->head > 1) { + fs->head = 0; + ++fs->req_cyl; + } + } + act(fs); + } + } + if (fs->state == idle) + start_request(fs); + break; + default: + printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state); + } + return IRQ_HANDLED; +} + +/* +static void fd_dma_interrupt(int irq, void *dev_id) +{ +} +*/ + +static int grab_drive(struct floppy_state *fs, enum swim_state state, + int interruptible) +{ + unsigned long flags; + + spin_lock_irqsave(&fs->lock, flags); + if (fs->state != idle) { + ++fs->wanted; + while (fs->state != available) { + if (interruptible && signal_pending(current)) { + --fs->wanted; + spin_unlock_irqrestore(&fs->lock, flags); + return -EINTR; + } + interruptible_sleep_on(&fs->wait); + } + --fs->wanted; + } + fs->state = state; + spin_unlock_irqrestore(&fs->lock, flags); + return 0; +} + +static void release_drive(struct floppy_state *fs) +{ + unsigned long flags; + + spin_lock_irqsave(&fs->lock, flags); + fs->state = idle; + start_request(fs); + spin_unlock_irqrestore(&fs->lock, flags); +} + +static int fd_eject(struct floppy_state *fs) +{ + int err, n; + + err = grab_drive(fs, ejecting, 1); + if (err) + return err; + swim3_action(fs, EJECT); + for (n = 20; n > 0; --n) { + if (signal_pending(current)) { + err = -EINTR; + break; + } + swim3_select(fs, RELAX); + schedule_timeout_interruptible(1); + if (swim3_readbit(fs, DISK_IN) == 0) + break; + } + swim3_select(fs, RELAX); + udelay(150); + fs->ejected = 1; + release_drive(fs); + return err; +} + +static struct floppy_struct floppy_type = + { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */ + +static int floppy_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long param) +{ + struct floppy_state *fs = bdev->bd_disk->private_data; + int err; + + if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)) + return -EPERM; + +#ifdef CONFIG_PMAC_MEDIABAY + if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) + return -ENXIO; +#endif + + switch (cmd) { + case FDEJECT: + if (fs->ref_count != 1) + return -EBUSY; + err = fd_eject(fs); + return err; + case FDGETPRM: + if (copy_to_user((void __user *) param, &floppy_type, + sizeof(struct floppy_struct))) + return -EFAULT; + return 0; + } + return -ENOTTY; +} + +static int floppy_open(struct block_device *bdev, fmode_t mode) +{ + struct floppy_state *fs = bdev->bd_disk->private_data; + struct swim3 __iomem *sw = fs->swim3; + int n, err = 0; + + if (fs->ref_count == 0) { +#ifdef CONFIG_PMAC_MEDIABAY + if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) + return -ENXIO; +#endif + out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2); + out_8(&sw->control_bic, 0xff); + out_8(&sw->mode, 0x95); + udelay(10); + out_8(&sw->intr_enable, 0); + out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE); + swim3_action(fs, MOTOR_ON); + fs->write_prot = -1; + fs->cur_cyl = -1; + for (n = 0; n < 2 * HZ; ++n) { + if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE)) + break; + if (signal_pending(current)) { + err = -EINTR; + break; + } + swim3_select(fs, RELAX); + schedule_timeout_interruptible(1); + } + if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0 + || swim3_readbit(fs, DISK_IN) == 0)) + err = -ENXIO; + swim3_action(fs, SETMFM); + swim3_select(fs, RELAX); + + } else if (fs->ref_count == -1 || mode & FMODE_EXCL) + return -EBUSY; + + if (err == 0 && (mode & FMODE_NDELAY) == 0 + && (mode & (FMODE_READ|FMODE_WRITE))) { + check_disk_change(bdev); + if (fs->ejected) + err = -ENXIO; + } + + if (err == 0 && (mode & FMODE_WRITE)) { + if (fs->write_prot < 0) + fs->write_prot = swim3_readbit(fs, WRITE_PROT); + if (fs->write_prot) + err = -EROFS; + } + + if (err) { + if (fs->ref_count == 0) { + swim3_action(fs, MOTOR_OFF); + out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE); + swim3_select(fs, RELAX); + } + return err; + } + + if (mode & FMODE_EXCL) + fs->ref_count = -1; + else + ++fs->ref_count; + + return 0; +} + +static int floppy_release(struct gendisk *disk, fmode_t mode) +{ + struct floppy_state *fs = disk->private_data; + struct swim3 __iomem *sw = fs->swim3; + if (fs->ref_count > 0 && --fs->ref_count == 0) { + swim3_action(fs, MOTOR_OFF); + out_8(&sw->control_bic, 0xff); + swim3_select(fs, RELAX); + } + return 0; +} + +static int floppy_check_change(struct gendisk *disk) +{ + struct floppy_state *fs = disk->private_data; + return fs->ejected; +} + +static int floppy_revalidate(struct gendisk *disk) +{ + struct floppy_state *fs = disk->private_data; + struct swim3 __iomem *sw; + int ret, n; + +#ifdef CONFIG_PMAC_MEDIABAY + if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD)) + return -ENXIO; +#endif + + sw = fs->swim3; + grab_drive(fs, revalidating, 0); + out_8(&sw->intr_enable, 0); + out_8(&sw->control_bis, DRIVE_ENABLE); + swim3_action(fs, MOTOR_ON); /* necessary? */ + fs->write_prot = -1; + fs->cur_cyl = -1; + mdelay(1); + for (n = HZ; n > 0; --n) { + if (swim3_readbit(fs, SEEK_COMPLETE)) + break; + if (signal_pending(current)) + break; + swim3_select(fs, RELAX); + schedule_timeout_interruptible(1); + } + ret = swim3_readbit(fs, SEEK_COMPLETE) == 0 + || swim3_readbit(fs, DISK_IN) == 0; + if (ret) + swim3_action(fs, MOTOR_OFF); + else { + fs->ejected = 0; + swim3_action(fs, SETMFM); + } + swim3_select(fs, RELAX); + + release_drive(fs); + return ret; +} + +static struct block_device_operations floppy_fops = { + .open = floppy_open, + .release = floppy_release, + .locked_ioctl = floppy_ioctl, + .media_changed = floppy_check_change, + .revalidate_disk= floppy_revalidate, +}; + +static int swim3_add_device(struct macio_dev *mdev, int index) +{ + struct device_node *swim = mdev->ofdev.node; + struct device_node *mediabay; + struct floppy_state *fs = &floppy_states[index]; + int rc = -EBUSY; + + /* Check & Request resources */ + if (macio_resource_count(mdev) < 2) { + printk(KERN_WARNING "ifd%d: no address for %s\n", + index, swim->full_name); + return -ENXIO; + } + if (macio_irq_count(mdev) < 2) { + printk(KERN_WARNING "fd%d: no intrs for device %s\n", + index, swim->full_name); + } + if (macio_request_resource(mdev, 0, "swim3 (mmio)")) { + printk(KERN_ERR "fd%d: can't request mmio resource for %s\n", + index, swim->full_name); + return -EBUSY; + } + if (macio_request_resource(mdev, 1, "swim3 (dma)")) { + printk(KERN_ERR "fd%d: can't request dma resource for %s\n", + index, swim->full_name); + macio_release_resource(mdev, 0); + return -EBUSY; + } + dev_set_drvdata(&mdev->ofdev.dev, fs); + + mediabay = (strcasecmp(swim->parent->type, "media-bay") == 0) ? + swim->parent : NULL; + if (mediabay == NULL) + pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1); + + memset(fs, 0, sizeof(*fs)); + spin_lock_init(&fs->lock); + fs->state = idle; + fs->swim3 = (struct swim3 __iomem *) + ioremap(macio_resource_start(mdev, 0), 0x200); + if (fs->swim3 == NULL) { + printk("fd%d: couldn't map registers for %s\n", + index, swim->full_name); + rc = -ENOMEM; + goto out_release; + } + fs->dma = (struct dbdma_regs __iomem *) + ioremap(macio_resource_start(mdev, 1), 0x200); + if (fs->dma == NULL) { + printk("fd%d: couldn't map DMA for %s\n", + index, swim->full_name); + iounmap(fs->swim3); + rc = -ENOMEM; + goto out_release; + } + fs->swim3_intr = macio_irq(mdev, 0); + fs->dma_intr = macio_irq(mdev, 1);; + fs->cur_cyl = -1; + fs->cur_sector = -1; + fs->secpercyl = 36; + fs->secpertrack = 18; + fs->total_secs = 2880; + fs->media_bay = mediabay; + init_waitqueue_head(&fs->wait); + + fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space); + memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd)); + st_le16(&fs->dma_cmd[1].command, DBDMA_STOP); + + if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) { + printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n", + index, fs->swim3_intr, swim->full_name); + pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0); + goto out_unmap; + return -EBUSY; + } +/* + if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) { + printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA", + fs->dma_intr); + return -EBUSY; + } +*/ + + init_timer(&fs->timeout); + + printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count, + mediabay ? "in media bay" : ""); + + return 0; + + out_unmap: + iounmap(fs->dma); + iounmap(fs->swim3); + + out_release: + macio_release_resource(mdev, 0); + macio_release_resource(mdev, 1); + + return rc; +} + +static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match) +{ + int i, rc; + struct gendisk *disk; + + /* Add the drive */ + rc = swim3_add_device(mdev, floppy_count); + if (rc) + return rc; + + /* Now create the queue if not there yet */ + if (swim3_queue == NULL) { + /* If we failed, there isn't much we can do as the driver is still + * too dumb to remove the device, just bail out + */ + if (register_blkdev(FLOPPY_MAJOR, "fd")) + return 0; + swim3_queue = blk_init_queue(do_fd_request, &swim3_lock); + if (swim3_queue == NULL) { + unregister_blkdev(FLOPPY_MAJOR, "fd"); + return 0; + } + } + + /* Now register that disk. Same comment about failure handling */ + i = floppy_count++; + disk = disks[i] = alloc_disk(1); + if (disk == NULL) + return 0; + + disk->major = FLOPPY_MAJOR; + disk->first_minor = i; + disk->fops = &floppy_fops; + disk->private_data = &floppy_states[i]; + disk->queue = swim3_queue; + disk->flags |= GENHD_FL_REMOVABLE; + sprintf(disk->disk_name, "fd%d", i); + set_capacity(disk, 2880); + add_disk(disk); + + return 0; +} + +static struct of_device_id swim3_match[] = +{ + { + .name = "swim3", + }, + { + .compatible = "ohare-swim3" + }, + { + .compatible = "swim3" + }, +}; + +static struct macio_driver swim3_driver = +{ + .name = "swim3", + .match_table = swim3_match, + .probe = swim3_attach, +#if 0 + .suspend = swim3_suspend, + .resume = swim3_resume, +#endif +}; + + +int swim3_init(void) +{ + macio_register_driver(&swim3_driver); + return 0; +} + +module_init(swim3_init) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Paul Mackerras"); +MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR); diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c new file mode 100644 index 0000000..a18e1ca --- /dev/null +++ b/drivers/block/sx8.c @@ -0,0 +1,1765 @@ +/* + * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware + * + * Copyright 2004-2005 Red Hat, Inc. + * + * Author/maintainer: Jeff Garzik <jgarzik@pobox.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/blkdev.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/compiler.h> +#include <linux/workqueue.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/time.h> +#include <linux/hdreg.h> +#include <linux/dma-mapping.h> +#include <linux/completion.h> +#include <linux/scatterlist.h> +#include <asm/io.h> +#include <asm/uaccess.h> + +#if 0 +#define CARM_DEBUG +#define CARM_VERBOSE_DEBUG +#else +#undef CARM_DEBUG +#undef CARM_VERBOSE_DEBUG +#endif +#undef CARM_NDEBUG + +#define DRV_NAME "sx8" +#define DRV_VERSION "1.0" +#define PFX DRV_NAME ": " + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Promise SATA SX8 block driver"); +MODULE_VERSION(DRV_VERSION); + +/* + * SX8 hardware has a single message queue for all ATA ports. + * When this driver was written, the hardware (firmware?) would + * corrupt data eventually, if more than one request was outstanding. + * As one can imagine, having 8 ports bottlenecking on a single + * command hurts performance. + * + * Based on user reports, later versions of the hardware (firmware?) + * seem to be able to survive with more than one command queued. + * + * Therefore, we default to the safe option -- 1 command -- but + * allow the user to increase this. + * + * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ), + * but problems seem to occur when you exceed ~30, even on newer hardware. + */ +static int max_queue = 1; +module_param(max_queue, int, 0444); +MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)"); + + +#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN) + +/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */ +#define TAG_ENCODE(tag) (((tag) << 16) | 0xf) +#define TAG_DECODE(tag) (((tag) >> 16) & 0x1f) +#define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32)) + +/* note: prints function name for you */ +#ifdef CARM_DEBUG +#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) +#ifdef CARM_VERBOSE_DEBUG +#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) +#else +#define VPRINTK(fmt, args...) +#endif /* CARM_VERBOSE_DEBUG */ +#else +#define DPRINTK(fmt, args...) +#define VPRINTK(fmt, args...) +#endif /* CARM_DEBUG */ + +#ifdef CARM_NDEBUG +#define assert(expr) +#else +#define assert(expr) \ + if(unlikely(!(expr))) { \ + printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr, __FILE__, __func__, __LINE__); \ + } +#endif + +/* defines only for the constants which don't work well as enums */ +struct carm_host; + +enum { + /* adapter-wide limits */ + CARM_MAX_PORTS = 8, + CARM_SHM_SIZE = (4096 << 7), + CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS, + CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1, + + /* command message queue limits */ + CARM_MAX_REQ = 64, /* max command msgs per host */ + CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */ + + /* S/G limits, host-wide and per-request */ + CARM_MAX_REQ_SG = 32, /* max s/g entries per request */ + CARM_MAX_HOST_SG = 600, /* max s/g entries per host */ + CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */ + + /* hardware registers */ + CARM_IHQP = 0x1c, + CARM_INT_STAT = 0x10, /* interrupt status */ + CARM_INT_MASK = 0x14, /* interrupt mask */ + CARM_HMUC = 0x18, /* host message unit control */ + RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */ + RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */ + RBUF_BYTE_SZ = 0x28, + CARM_RESP_IDX = 0x2c, + CARM_CMS0 = 0x30, /* command message size reg 0 */ + CARM_LMUC = 0x48, + CARM_HMPHA = 0x6c, + CARM_INITC = 0xb5, + + /* bits in CARM_INT_{STAT,MASK} */ + INT_RESERVED = 0xfffffff0, + INT_WATCHDOG = (1 << 3), /* watchdog timer */ + INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */ + INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */ + INT_RESPONSE = (1 << 0), /* response msg available */ + INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW, + INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW | + INT_RESPONSE, + + /* command messages, and related register bits */ + CARM_HAVE_RESP = 0x01, + CARM_MSG_READ = 1, + CARM_MSG_WRITE = 2, + CARM_MSG_VERIFY = 3, + CARM_MSG_GET_CAPACITY = 4, + CARM_MSG_FLUSH = 5, + CARM_MSG_IOCTL = 6, + CARM_MSG_ARRAY = 8, + CARM_MSG_MISC = 9, + CARM_CME = (1 << 2), + CARM_RME = (1 << 1), + CARM_WZBC = (1 << 0), + CARM_RMI = (1 << 0), + CARM_Q_FULL = (1 << 3), + CARM_MSG_SIZE = 288, + CARM_Q_LEN = 48, + + /* CARM_MSG_IOCTL messages */ + CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */ + CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */ + CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */ + + IOC_SCAN_CHAN_NODEV = 0x1f, + IOC_SCAN_CHAN_OFFSET = 0x40, + + /* CARM_MSG_ARRAY messages */ + CARM_ARRAY_INFO = 0, + + ARRAY_NO_EXIST = (1 << 31), + + /* response messages */ + RMSG_SZ = 8, /* sizeof(struct carm_response) */ + RMSG_Q_LEN = 48, /* resp. msg list length */ + RMSG_OK = 1, /* bit indicating msg was successful */ + /* length of entire resp. msg buffer */ + RBUF_LEN = RMSG_SZ * RMSG_Q_LEN, + + PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */ + + /* CARM_MSG_MISC messages */ + MISC_GET_FW_VER = 2, + MISC_ALLOC_MEM = 3, + MISC_SET_TIME = 5, + + /* MISC_GET_FW_VER feature bits */ + FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */ + FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */ + FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */ + + /* carm_host flags */ + FL_NON_RAID = FW_VER_NON_RAID, + FL_4PORT = FW_VER_4PORT, + FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT), + FL_DAC = (1 << 16), + FL_DYN_MAJOR = (1 << 17), +}; + +enum { + CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */ +}; + +enum scatter_gather_types { + SGT_32BIT = 0, + SGT_64BIT = 1, +}; + +enum host_states { + HST_INVALID, /* invalid state; never used */ + HST_ALLOC_BUF, /* setting up master SHM area */ + HST_ERROR, /* we never leave here */ + HST_PORT_SCAN, /* start dev scan */ + HST_DEV_SCAN_START, /* start per-device probe */ + HST_DEV_SCAN, /* continue per-device probe */ + HST_DEV_ACTIVATE, /* activate devices we found */ + HST_PROBE_FINISHED, /* probe is complete */ + HST_PROBE_START, /* initiate probe */ + HST_SYNC_TIME, /* tell firmware what time it is */ + HST_GET_FW_VER, /* get firmware version, adapter port cnt */ +}; + +#ifdef CARM_DEBUG +static const char *state_name[] = { + "HST_INVALID", + "HST_ALLOC_BUF", + "HST_ERROR", + "HST_PORT_SCAN", + "HST_DEV_SCAN_START", + "HST_DEV_SCAN", + "HST_DEV_ACTIVATE", + "HST_PROBE_FINISHED", + "HST_PROBE_START", + "HST_SYNC_TIME", + "HST_GET_FW_VER", +}; +#endif + +struct carm_port { + unsigned int port_no; + struct gendisk *disk; + struct carm_host *host; + + /* attached device characteristics */ + u64 capacity; + char name[41]; + u16 dev_geom_head; + u16 dev_geom_sect; + u16 dev_geom_cyl; +}; + +struct carm_request { + unsigned int tag; + int n_elem; + unsigned int msg_type; + unsigned int msg_subtype; + unsigned int msg_bucket; + struct request *rq; + struct carm_port *port; + struct scatterlist sg[CARM_MAX_REQ_SG]; +}; + +struct carm_host { + unsigned long flags; + void __iomem *mmio; + void *shm; + dma_addr_t shm_dma; + + int major; + int id; + char name[32]; + + spinlock_t lock; + struct pci_dev *pdev; + unsigned int state; + u32 fw_ver; + + struct request_queue *oob_q; + unsigned int n_oob; + + unsigned int hw_sg_used; + + unsigned int resp_idx; + + unsigned int wait_q_prod; + unsigned int wait_q_cons; + struct request_queue *wait_q[CARM_MAX_WAIT_Q]; + + unsigned int n_msgs; + u64 msg_alloc; + struct carm_request req[CARM_MAX_REQ]; + void *msg_base; + dma_addr_t msg_dma; + + int cur_scan_dev; + unsigned long dev_active; + unsigned long dev_present; + struct carm_port port[CARM_MAX_PORTS]; + + struct work_struct fsm_task; + + struct completion probe_comp; +}; + +struct carm_response { + __le32 ret_handle; + __le32 status; +} __attribute__((packed)); + +struct carm_msg_sg { + __le32 start; + __le32 len; +} __attribute__((packed)); + +struct carm_msg_rw { + u8 type; + u8 id; + u8 sg_count; + u8 sg_type; + __le32 handle; + __le32 lba; + __le16 lba_count; + __le16 lba_high; + struct carm_msg_sg sg[32]; +} __attribute__((packed)); + +struct carm_msg_allocbuf { + u8 type; + u8 subtype; + u8 n_sg; + u8 sg_type; + __le32 handle; + __le32 addr; + __le32 len; + __le32 evt_pool; + __le32 n_evt; + __le32 rbuf_pool; + __le32 n_rbuf; + __le32 msg_pool; + __le32 n_msg; + struct carm_msg_sg sg[8]; +} __attribute__((packed)); + +struct carm_msg_ioctl { + u8 type; + u8 subtype; + u8 array_id; + u8 reserved1; + __le32 handle; + __le32 data_addr; + u32 reserved2; +} __attribute__((packed)); + +struct carm_msg_sync_time { + u8 type; + u8 subtype; + u16 reserved1; + __le32 handle; + u32 reserved2; + __le32 timestamp; +} __attribute__((packed)); + +struct carm_msg_get_fw_ver { + u8 type; + u8 subtype; + u16 reserved1; + __le32 handle; + __le32 data_addr; + u32 reserved2; +} __attribute__((packed)); + +struct carm_fw_ver { + __le32 version; + u8 features; + u8 reserved1; + u16 reserved2; +} __attribute__((packed)); + +struct carm_array_info { + __le32 size; + + __le16 size_hi; + __le16 stripe_size; + + __le32 mode; + + __le16 stripe_blk_sz; + __le16 reserved1; + + __le16 cyl; + __le16 head; + + __le16 sect; + u8 array_id; + u8 reserved2; + + char name[40]; + + __le32 array_status; + + /* device list continues beyond this point? */ +} __attribute__((packed)); + +static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static void carm_remove_one (struct pci_dev *pdev); +static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo); + +static struct pci_device_id carm_pci_tbl[] = { + { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, + { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, carm_pci_tbl); + +static struct pci_driver carm_driver = { + .name = DRV_NAME, + .id_table = carm_pci_tbl, + .probe = carm_init_one, + .remove = carm_remove_one, +}; + +static struct block_device_operations carm_bd_ops = { + .owner = THIS_MODULE, + .getgeo = carm_bdev_getgeo, +}; + +static unsigned int carm_host_id; +static unsigned long carm_major_alloc; + + + +static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct carm_port *port = bdev->bd_disk->private_data; + + geo->heads = (u8) port->dev_geom_head; + geo->sectors = (u8) port->dev_geom_sect; + geo->cylinders = port->dev_geom_cyl; + return 0; +} + +static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE }; + +static inline int carm_lookup_bucket(u32 msg_size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) + if (msg_size <= msg_sizes[i]) + return i; + + return -ENOENT; +} + +static void carm_init_buckets(void __iomem *mmio) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) + writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i)); +} + +static inline void *carm_ref_msg(struct carm_host *host, + unsigned int msg_idx) +{ + return host->msg_base + (msg_idx * CARM_MSG_SIZE); +} + +static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host, + unsigned int msg_idx) +{ + return host->msg_dma + (msg_idx * CARM_MSG_SIZE); +} + +static int carm_send_msg(struct carm_host *host, + struct carm_request *crq) +{ + void __iomem *mmio = host->mmio; + u32 msg = (u32) carm_ref_msg_dma(host, crq->tag); + u32 cm_bucket = crq->msg_bucket; + u32 tmp; + int rc = 0; + + VPRINTK("ENTER\n"); + + tmp = readl(mmio + CARM_HMUC); + if (tmp & CARM_Q_FULL) { +#if 0 + tmp = readl(mmio + CARM_INT_MASK); + tmp |= INT_Q_AVAILABLE; + writel(tmp, mmio + CARM_INT_MASK); + readl(mmio + CARM_INT_MASK); /* flush */ +#endif + DPRINTK("host msg queue full\n"); + rc = -EBUSY; + } else { + writel(msg | (cm_bucket << 1), mmio + CARM_IHQP); + readl(mmio + CARM_IHQP); /* flush */ + } + + return rc; +} + +static struct carm_request *carm_get_request(struct carm_host *host) +{ + unsigned int i; + + /* obey global hardware limit on S/G entries */ + if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG)) + return NULL; + + for (i = 0; i < max_queue; i++) + if ((host->msg_alloc & (1ULL << i)) == 0) { + struct carm_request *crq = &host->req[i]; + crq->port = NULL; + crq->n_elem = 0; + + host->msg_alloc |= (1ULL << i); + host->n_msgs++; + + assert(host->n_msgs <= CARM_MAX_REQ); + sg_init_table(crq->sg, CARM_MAX_REQ_SG); + return crq; + } + + DPRINTK("no request available, returning NULL\n"); + return NULL; +} + +static int carm_put_request(struct carm_host *host, struct carm_request *crq) +{ + assert(crq->tag < max_queue); + + if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0)) + return -EINVAL; /* tried to clear a tag that was not active */ + + assert(host->hw_sg_used >= crq->n_elem); + + host->msg_alloc &= ~(1ULL << crq->tag); + host->hw_sg_used -= crq->n_elem; + host->n_msgs--; + + return 0; +} + +static struct carm_request *carm_get_special(struct carm_host *host) +{ + unsigned long flags; + struct carm_request *crq = NULL; + struct request *rq; + int tries = 5000; + + while (tries-- > 0) { + spin_lock_irqsave(&host->lock, flags); + crq = carm_get_request(host); + spin_unlock_irqrestore(&host->lock, flags); + + if (crq) + break; + msleep(10); + } + + if (!crq) + return NULL; + + rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); + if (!rq) { + spin_lock_irqsave(&host->lock, flags); + carm_put_request(host, crq); + spin_unlock_irqrestore(&host->lock, flags); + return NULL; + } + + crq->rq = rq; + return crq; +} + +static int carm_array_info (struct carm_host *host, unsigned int array_idx) +{ + struct carm_msg_ioctl *ioc; + unsigned int idx; + u32 msg_data; + dma_addr_t msg_dma; + struct carm_request *crq; + int rc; + + crq = carm_get_special(host); + if (!crq) { + rc = -ENOMEM; + goto err_out; + } + + idx = crq->tag; + + ioc = carm_ref_msg(host, idx); + msg_dma = carm_ref_msg_dma(host, idx); + msg_data = (u32) (msg_dma + sizeof(struct carm_array_info)); + + crq->msg_type = CARM_MSG_ARRAY; + crq->msg_subtype = CARM_ARRAY_INFO; + rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) + + sizeof(struct carm_array_info)); + BUG_ON(rc < 0); + crq->msg_bucket = (u32) rc; + + memset(ioc, 0, sizeof(*ioc)); + ioc->type = CARM_MSG_ARRAY; + ioc->subtype = CARM_ARRAY_INFO; + ioc->array_id = (u8) array_idx; + ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); + ioc->data_addr = cpu_to_le32(msg_data); + + spin_lock_irq(&host->lock); + assert(host->state == HST_DEV_SCAN_START || + host->state == HST_DEV_SCAN); + spin_unlock_irq(&host->lock); + + DPRINTK("blk_insert_request, tag == %u\n", idx); + blk_insert_request(host->oob_q, crq->rq, 1, crq); + + return 0; + +err_out: + spin_lock_irq(&host->lock); + host->state = HST_ERROR; + spin_unlock_irq(&host->lock); + return rc; +} + +typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *); + +static int carm_send_special (struct carm_host *host, carm_sspc_t func) +{ + struct carm_request *crq; + struct carm_msg_ioctl *ioc; + void *mem; + unsigned int idx, msg_size; + int rc; + + crq = carm_get_special(host); + if (!crq) + return -ENOMEM; + + idx = crq->tag; + + mem = carm_ref_msg(host, idx); + + msg_size = func(host, idx, mem); + + ioc = mem; + crq->msg_type = ioc->type; + crq->msg_subtype = ioc->subtype; + rc = carm_lookup_bucket(msg_size); + BUG_ON(rc < 0); + crq->msg_bucket = (u32) rc; + + DPRINTK("blk_insert_request, tag == %u\n", idx); + blk_insert_request(host->oob_q, crq->rq, 1, crq); + + return 0; +} + +static unsigned int carm_fill_sync_time(struct carm_host *host, + unsigned int idx, void *mem) +{ + struct timeval tv; + struct carm_msg_sync_time *st = mem; + + do_gettimeofday(&tv); + + memset(st, 0, sizeof(*st)); + st->type = CARM_MSG_MISC; + st->subtype = MISC_SET_TIME; + st->handle = cpu_to_le32(TAG_ENCODE(idx)); + st->timestamp = cpu_to_le32(tv.tv_sec); + + return sizeof(struct carm_msg_sync_time); +} + +static unsigned int carm_fill_alloc_buf(struct carm_host *host, + unsigned int idx, void *mem) +{ + struct carm_msg_allocbuf *ab = mem; + + memset(ab, 0, sizeof(*ab)); + ab->type = CARM_MSG_MISC; + ab->subtype = MISC_ALLOC_MEM; + ab->handle = cpu_to_le32(TAG_ENCODE(idx)); + ab->n_sg = 1; + ab->sg_type = SGT_32BIT; + ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); + ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1); + ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024)); + ab->n_evt = cpu_to_le32(1024); + ab->rbuf_pool = cpu_to_le32(host->shm_dma); + ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN); + ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN); + ab->n_msg = cpu_to_le32(CARM_Q_LEN); + ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); + ab->sg[0].len = cpu_to_le32(65536); + + return sizeof(struct carm_msg_allocbuf); +} + +static unsigned int carm_fill_scan_channels(struct carm_host *host, + unsigned int idx, void *mem) +{ + struct carm_msg_ioctl *ioc = mem; + u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + + IOC_SCAN_CHAN_OFFSET); + + memset(ioc, 0, sizeof(*ioc)); + ioc->type = CARM_MSG_IOCTL; + ioc->subtype = CARM_IOC_SCAN_CHAN; + ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); + ioc->data_addr = cpu_to_le32(msg_data); + + /* fill output data area with "no device" default values */ + mem += IOC_SCAN_CHAN_OFFSET; + memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS); + + return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS; +} + +static unsigned int carm_fill_get_fw_ver(struct carm_host *host, + unsigned int idx, void *mem) +{ + struct carm_msg_get_fw_ver *ioc = mem; + u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc)); + + memset(ioc, 0, sizeof(*ioc)); + ioc->type = CARM_MSG_MISC; + ioc->subtype = MISC_GET_FW_VER; + ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); + ioc->data_addr = cpu_to_le32(msg_data); + + return sizeof(struct carm_msg_get_fw_ver) + + sizeof(struct carm_fw_ver); +} + +static inline void carm_end_request_queued(struct carm_host *host, + struct carm_request *crq, + int error) +{ + struct request *req = crq->rq; + int rc; + + rc = __blk_end_request(req, error, blk_rq_bytes(req)); + assert(rc == 0); + + rc = carm_put_request(host, crq); + assert(rc == 0); +} + +static inline void carm_push_q (struct carm_host *host, struct request_queue *q) +{ + unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; + + blk_stop_queue(q); + VPRINTK("STOPPED QUEUE %p\n", q); + + host->wait_q[idx] = q; + host->wait_q_prod++; + BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */ +} + +static inline struct request_queue *carm_pop_q(struct carm_host *host) +{ + unsigned int idx; + + if (host->wait_q_prod == host->wait_q_cons) + return NULL; + + idx = host->wait_q_cons % CARM_MAX_WAIT_Q; + host->wait_q_cons++; + + return host->wait_q[idx]; +} + +static inline void carm_round_robin(struct carm_host *host) +{ + struct request_queue *q = carm_pop_q(host); + if (q) { + blk_start_queue(q); + VPRINTK("STARTED QUEUE %p\n", q); + } +} + +static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq, + int error) +{ + carm_end_request_queued(host, crq, error); + if (max_queue == 1) + carm_round_robin(host); + else if ((host->n_msgs <= CARM_MSG_LOW_WATER) && + (host->hw_sg_used <= CARM_SG_LOW_WATER)) { + carm_round_robin(host); + } +} + +static void carm_oob_rq_fn(struct request_queue *q) +{ + struct carm_host *host = q->queuedata; + struct carm_request *crq; + struct request *rq; + int rc; + + while (1) { + DPRINTK("get req\n"); + rq = elv_next_request(q); + if (!rq) + break; + + blkdev_dequeue_request(rq); + + crq = rq->special; + assert(crq != NULL); + assert(crq->rq == rq); + + crq->n_elem = 0; + + DPRINTK("send req\n"); + rc = carm_send_msg(host, crq); + if (rc) { + blk_requeue_request(q, rq); + carm_push_q(host, q); + return; /* call us again later, eventually */ + } + } +} + +static void carm_rq_fn(struct request_queue *q) +{ + struct carm_port *port = q->queuedata; + struct carm_host *host = port->host; + struct carm_msg_rw *msg; + struct carm_request *crq; + struct request *rq; + struct scatterlist *sg; + int writing = 0, pci_dir, i, n_elem, rc; + u32 tmp; + unsigned int msg_size; + +queue_one_request: + VPRINTK("get req\n"); + rq = elv_next_request(q); + if (!rq) + return; + + crq = carm_get_request(host); + if (!crq) { + carm_push_q(host, q); + return; /* call us again later, eventually */ + } + crq->rq = rq; + + blkdev_dequeue_request(rq); + + if (rq_data_dir(rq) == WRITE) { + writing = 1; + pci_dir = PCI_DMA_TODEVICE; + } else { + pci_dir = PCI_DMA_FROMDEVICE; + } + + /* get scatterlist from block layer */ + sg = &crq->sg[0]; + n_elem = blk_rq_map_sg(q, rq, sg); + if (n_elem <= 0) { + carm_end_rq(host, crq, -EIO); + return; /* request with no s/g entries? */ + } + + /* map scatterlist to PCI bus addresses */ + n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir); + if (n_elem <= 0) { + carm_end_rq(host, crq, -EIO); + return; /* request with no s/g entries? */ + } + crq->n_elem = n_elem; + crq->port = port; + host->hw_sg_used += n_elem; + + /* + * build read/write message + */ + + VPRINTK("build msg\n"); + msg = (struct carm_msg_rw *) carm_ref_msg(host, crq->tag); + + if (writing) { + msg->type = CARM_MSG_WRITE; + crq->msg_type = CARM_MSG_WRITE; + } else { + msg->type = CARM_MSG_READ; + crq->msg_type = CARM_MSG_READ; + } + + msg->id = port->port_no; + msg->sg_count = n_elem; + msg->sg_type = SGT_32BIT; + msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); + msg->lba = cpu_to_le32(rq->sector & 0xffffffff); + tmp = (rq->sector >> 16) >> 16; + msg->lba_high = cpu_to_le16( (u16) tmp ); + msg->lba_count = cpu_to_le16(rq->nr_sectors); + + msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); + for (i = 0; i < n_elem; i++) { + struct carm_msg_sg *carm_sg = &msg->sg[i]; + carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i])); + carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i])); + msg_size += sizeof(struct carm_msg_sg); + } + + rc = carm_lookup_bucket(msg_size); + BUG_ON(rc < 0); + crq->msg_bucket = (u32) rc; + + /* + * queue read/write message to hardware + */ + + VPRINTK("send msg, tag == %u\n", crq->tag); + rc = carm_send_msg(host, crq); + if (rc) { + carm_put_request(host, crq); + blk_requeue_request(q, rq); + carm_push_q(host, q); + return; /* call us again later, eventually */ + } + + goto queue_one_request; +} + +static void carm_handle_array_info(struct carm_host *host, + struct carm_request *crq, u8 *mem, + int error) +{ + struct carm_port *port; + u8 *msg_data = mem + sizeof(struct carm_array_info); + struct carm_array_info *desc = (struct carm_array_info *) msg_data; + u64 lo, hi; + int cur_port; + size_t slen; + + DPRINTK("ENTER\n"); + + carm_end_rq(host, crq, error); + + if (error) + goto out; + if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST) + goto out; + + cur_port = host->cur_scan_dev; + + /* should never occur */ + if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) { + printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n", + cur_port, (int) desc->array_id); + goto out; + } + + port = &host->port[cur_port]; + + lo = (u64) le32_to_cpu(desc->size); + hi = (u64) le16_to_cpu(desc->size_hi); + + port->capacity = lo | (hi << 32); + port->dev_geom_head = le16_to_cpu(desc->head); + port->dev_geom_sect = le16_to_cpu(desc->sect); + port->dev_geom_cyl = le16_to_cpu(desc->cyl); + + host->dev_active |= (1 << cur_port); + + strncpy(port->name, desc->name, sizeof(port->name)); + port->name[sizeof(port->name) - 1] = 0; + slen = strlen(port->name); + while (slen && (port->name[slen - 1] == ' ')) { + port->name[slen - 1] = 0; + slen--; + } + + printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n", + pci_name(host->pdev), port->port_no, + (unsigned long long) port->capacity); + printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n", + pci_name(host->pdev), port->port_no, port->name); + +out: + assert(host->state == HST_DEV_SCAN); + schedule_work(&host->fsm_task); +} + +static void carm_handle_scan_chan(struct carm_host *host, + struct carm_request *crq, u8 *mem, + int error) +{ + u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET; + unsigned int i, dev_count = 0; + int new_state = HST_DEV_SCAN_START; + + DPRINTK("ENTER\n"); + + carm_end_rq(host, crq, error); + + if (error) { + new_state = HST_ERROR; + goto out; + } + + /* TODO: scan and support non-disk devices */ + for (i = 0; i < 8; i++) + if (msg_data[i] == 0) { /* direct-access device (disk) */ + host->dev_present |= (1 << i); + dev_count++; + } + + printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n", + pci_name(host->pdev), dev_count); + +out: + assert(host->state == HST_PORT_SCAN); + host->state = new_state; + schedule_work(&host->fsm_task); +} + +static void carm_handle_generic(struct carm_host *host, + struct carm_request *crq, int error, + int cur_state, int next_state) +{ + DPRINTK("ENTER\n"); + + carm_end_rq(host, crq, error); + + assert(host->state == cur_state); + if (error) + host->state = HST_ERROR; + else + host->state = next_state; + schedule_work(&host->fsm_task); +} + +static inline void carm_handle_rw(struct carm_host *host, + struct carm_request *crq, int error) +{ + int pci_dir; + + VPRINTK("ENTER\n"); + + if (rq_data_dir(crq->rq) == WRITE) + pci_dir = PCI_DMA_TODEVICE; + else + pci_dir = PCI_DMA_FROMDEVICE; + + pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir); + + carm_end_rq(host, crq, error); +} + +static inline void carm_handle_resp(struct carm_host *host, + __le32 ret_handle_le, u32 status) +{ + u32 handle = le32_to_cpu(ret_handle_le); + unsigned int msg_idx; + struct carm_request *crq; + int error = (status == RMSG_OK) ? 0 : -EIO; + u8 *mem; + + VPRINTK("ENTER, handle == 0x%x\n", handle); + + if (unlikely(!TAG_VALID(handle))) { + printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n", + pci_name(host->pdev), handle); + return; + } + + msg_idx = TAG_DECODE(handle); + VPRINTK("tag == %u\n", msg_idx); + + crq = &host->req[msg_idx]; + + /* fast path */ + if (likely(crq->msg_type == CARM_MSG_READ || + crq->msg_type == CARM_MSG_WRITE)) { + carm_handle_rw(host, crq, error); + return; + } + + mem = carm_ref_msg(host, msg_idx); + + switch (crq->msg_type) { + case CARM_MSG_IOCTL: { + switch (crq->msg_subtype) { + case CARM_IOC_SCAN_CHAN: + carm_handle_scan_chan(host, crq, mem, error); + break; + default: + /* unknown / invalid response */ + goto err_out; + } + break; + } + + case CARM_MSG_MISC: { + switch (crq->msg_subtype) { + case MISC_ALLOC_MEM: + carm_handle_generic(host, crq, error, + HST_ALLOC_BUF, HST_SYNC_TIME); + break; + case MISC_SET_TIME: + carm_handle_generic(host, crq, error, + HST_SYNC_TIME, HST_GET_FW_VER); + break; + case MISC_GET_FW_VER: { + struct carm_fw_ver *ver = (struct carm_fw_ver *) + mem + sizeof(struct carm_msg_get_fw_ver); + if (!error) { + host->fw_ver = le32_to_cpu(ver->version); + host->flags |= (ver->features & FL_FW_VER_MASK); + } + carm_handle_generic(host, crq, error, + HST_GET_FW_VER, HST_PORT_SCAN); + break; + } + default: + /* unknown / invalid response */ + goto err_out; + } + break; + } + + case CARM_MSG_ARRAY: { + switch (crq->msg_subtype) { + case CARM_ARRAY_INFO: + carm_handle_array_info(host, crq, mem, error); + break; + default: + /* unknown / invalid response */ + goto err_out; + } + break; + } + + default: + /* unknown / invalid response */ + goto err_out; + } + + return; + +err_out: + printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n", + pci_name(host->pdev), crq->msg_type, crq->msg_subtype); + carm_end_rq(host, crq, -EIO); +} + +static inline void carm_handle_responses(struct carm_host *host) +{ + void __iomem *mmio = host->mmio; + struct carm_response *resp = (struct carm_response *) host->shm; + unsigned int work = 0; + unsigned int idx = host->resp_idx % RMSG_Q_LEN; + + while (1) { + u32 status = le32_to_cpu(resp[idx].status); + + if (status == 0xffffffff) { + VPRINTK("ending response on index %u\n", idx); + writel(idx << 3, mmio + CARM_RESP_IDX); + break; + } + + /* response to a message we sent */ + else if ((status & (1 << 31)) == 0) { + VPRINTK("handling msg response on index %u\n", idx); + carm_handle_resp(host, resp[idx].ret_handle, status); + resp[idx].status = cpu_to_le32(0xffffffff); + } + + /* asynchronous events the hardware throws our way */ + else if ((status & 0xff000000) == (1 << 31)) { + u8 *evt_type_ptr = (u8 *) &resp[idx]; + u8 evt_type = *evt_type_ptr; + printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n", + pci_name(host->pdev), (int) evt_type); + resp[idx].status = cpu_to_le32(0xffffffff); + } + + idx = NEXT_RESP(idx); + work++; + } + + VPRINTK("EXIT, work==%u\n", work); + host->resp_idx += work; +} + +static irqreturn_t carm_interrupt(int irq, void *__host) +{ + struct carm_host *host = __host; + void __iomem *mmio; + u32 mask; + int handled = 0; + unsigned long flags; + + if (!host) { + VPRINTK("no host\n"); + return IRQ_NONE; + } + + spin_lock_irqsave(&host->lock, flags); + + mmio = host->mmio; + + /* reading should also clear interrupts */ + mask = readl(mmio + CARM_INT_STAT); + + if (mask == 0 || mask == 0xffffffff) { + VPRINTK("no work, mask == 0x%x\n", mask); + goto out; + } + + if (mask & INT_ACK_MASK) + writel(mask, mmio + CARM_INT_STAT); + + if (unlikely(host->state == HST_INVALID)) { + VPRINTK("not initialized yet, mask = 0x%x\n", mask); + goto out; + } + + if (mask & CARM_HAVE_RESP) { + handled = 1; + carm_handle_responses(host); + } + +out: + spin_unlock_irqrestore(&host->lock, flags); + VPRINTK("EXIT\n"); + return IRQ_RETVAL(handled); +} + +static void carm_fsm_task (struct work_struct *work) +{ + struct carm_host *host = + container_of(work, struct carm_host, fsm_task); + unsigned long flags; + unsigned int state; + int rc, i, next_dev; + int reschedule = 0; + int new_state = HST_INVALID; + + spin_lock_irqsave(&host->lock, flags); + state = host->state; + spin_unlock_irqrestore(&host->lock, flags); + + DPRINTK("ENTER, state == %s\n", state_name[state]); + + switch (state) { + case HST_PROBE_START: + new_state = HST_ALLOC_BUF; + reschedule = 1; + break; + + case HST_ALLOC_BUF: + rc = carm_send_special(host, carm_fill_alloc_buf); + if (rc) { + new_state = HST_ERROR; + reschedule = 1; + } + break; + + case HST_SYNC_TIME: + rc = carm_send_special(host, carm_fill_sync_time); + if (rc) { + new_state = HST_ERROR; + reschedule = 1; + } + break; + + case HST_GET_FW_VER: + rc = carm_send_special(host, carm_fill_get_fw_ver); + if (rc) { + new_state = HST_ERROR; + reschedule = 1; + } + break; + + case HST_PORT_SCAN: + rc = carm_send_special(host, carm_fill_scan_channels); + if (rc) { + new_state = HST_ERROR; + reschedule = 1; + } + break; + + case HST_DEV_SCAN_START: + host->cur_scan_dev = -1; + new_state = HST_DEV_SCAN; + reschedule = 1; + break; + + case HST_DEV_SCAN: + next_dev = -1; + for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++) + if (host->dev_present & (1 << i)) { + next_dev = i; + break; + } + + if (next_dev >= 0) { + host->cur_scan_dev = next_dev; + rc = carm_array_info(host, next_dev); + if (rc) { + new_state = HST_ERROR; + reschedule = 1; + } + } else { + new_state = HST_DEV_ACTIVATE; + reschedule = 1; + } + break; + + case HST_DEV_ACTIVATE: { + int activated = 0; + for (i = 0; i < CARM_MAX_PORTS; i++) + if (host->dev_active & (1 << i)) { + struct carm_port *port = &host->port[i]; + struct gendisk *disk = port->disk; + + set_capacity(disk, port->capacity); + add_disk(disk); + activated++; + } + + printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n", + pci_name(host->pdev), activated); + + new_state = HST_PROBE_FINISHED; + reschedule = 1; + break; + } + + case HST_PROBE_FINISHED: + complete(&host->probe_comp); + break; + + case HST_ERROR: + /* FIXME: TODO */ + break; + + default: + /* should never occur */ + printk(KERN_ERR PFX "BUG: unknown state %d\n", state); + assert(0); + break; + } + + if (new_state != HST_INVALID) { + spin_lock_irqsave(&host->lock, flags); + host->state = new_state; + spin_unlock_irqrestore(&host->lock, flags); + } + if (reschedule) + schedule_work(&host->fsm_task); +} + +static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit) +{ + unsigned int i; + + for (i = 0; i < 50000; i++) { + u32 tmp = readl(mmio + CARM_LMUC); + udelay(100); + + if (test_bit) { + if ((tmp & bits) == bits) + return 0; + } else { + if ((tmp & bits) == 0) + return 0; + } + + cond_resched(); + } + + printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n", + bits, test_bit ? "yes" : "no"); + return -EBUSY; +} + +static void carm_init_responses(struct carm_host *host) +{ + void __iomem *mmio = host->mmio; + unsigned int i; + struct carm_response *resp = (struct carm_response *) host->shm; + + for (i = 0; i < RMSG_Q_LEN; i++) + resp[i].status = cpu_to_le32(0xffffffff); + + writel(0, mmio + CARM_RESP_IDX); +} + +static int carm_init_host(struct carm_host *host) +{ + void __iomem *mmio = host->mmio; + u32 tmp; + u8 tmp8; + int rc; + + DPRINTK("ENTER\n"); + + writel(0, mmio + CARM_INT_MASK); + + tmp8 = readb(mmio + CARM_INITC); + if (tmp8 & 0x01) { + tmp8 &= ~0x01; + writeb(tmp8, mmio + CARM_INITC); + readb(mmio + CARM_INITC); /* flush */ + + DPRINTK("snooze...\n"); + msleep(5000); + } + + tmp = readl(mmio + CARM_HMUC); + if (tmp & CARM_CME) { + DPRINTK("CME bit present, waiting\n"); + rc = carm_init_wait(mmio, CARM_CME, 1); + if (rc) { + DPRINTK("EXIT, carm_init_wait 1 failed\n"); + return rc; + } + } + if (tmp & CARM_RME) { + DPRINTK("RME bit present, waiting\n"); + rc = carm_init_wait(mmio, CARM_RME, 1); + if (rc) { + DPRINTK("EXIT, carm_init_wait 2 failed\n"); + return rc; + } + } + + tmp &= ~(CARM_RME | CARM_CME); + writel(tmp, mmio + CARM_HMUC); + readl(mmio + CARM_HMUC); /* flush */ + + rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0); + if (rc) { + DPRINTK("EXIT, carm_init_wait 3 failed\n"); + return rc; + } + + carm_init_buckets(mmio); + + writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO); + writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI); + writel(RBUF_LEN, mmio + RBUF_BYTE_SZ); + + tmp = readl(mmio + CARM_HMUC); + tmp |= (CARM_RME | CARM_CME | CARM_WZBC); + writel(tmp, mmio + CARM_HMUC); + readl(mmio + CARM_HMUC); /* flush */ + + rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1); + if (rc) { + DPRINTK("EXIT, carm_init_wait 4 failed\n"); + return rc; + } + + writel(0, mmio + CARM_HMPHA); + writel(INT_DEF_MASK, mmio + CARM_INT_MASK); + + carm_init_responses(host); + + /* start initialization, probing state machine */ + spin_lock_irq(&host->lock); + assert(host->state == HST_INVALID); + host->state = HST_PROBE_START; + spin_unlock_irq(&host->lock); + schedule_work(&host->fsm_task); + + DPRINTK("EXIT\n"); + return 0; +} + +static int carm_init_disks(struct carm_host *host) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < CARM_MAX_PORTS; i++) { + struct gendisk *disk; + struct request_queue *q; + struct carm_port *port; + + port = &host->port[i]; + port->host = host; + port->port_no = i; + + disk = alloc_disk(CARM_MINORS_PER_MAJOR); + if (!disk) { + rc = -ENOMEM; + break; + } + + port->disk = disk; + sprintf(disk->disk_name, DRV_NAME "/%u", + (unsigned int) (host->id * CARM_MAX_PORTS) + i); + disk->major = host->major; + disk->first_minor = i * CARM_MINORS_PER_MAJOR; + disk->fops = &carm_bd_ops; + disk->private_data = port; + + q = blk_init_queue(carm_rq_fn, &host->lock); + if (!q) { + rc = -ENOMEM; + break; + } + disk->queue = q; + blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG); + blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG); + blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); + + q->queuedata = port; + } + + return rc; +} + +static void carm_free_disks(struct carm_host *host) +{ + unsigned int i; + + for (i = 0; i < CARM_MAX_PORTS; i++) { + struct gendisk *disk = host->port[i].disk; + if (disk) { + struct request_queue *q = disk->queue; + + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + if (q) + blk_cleanup_queue(q); + put_disk(disk); + } + } +} + +static int carm_init_shm(struct carm_host *host) +{ + host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE, + &host->shm_dma); + if (!host->shm) + return -ENOMEM; + + host->msg_base = host->shm + RBUF_LEN; + host->msg_dma = host->shm_dma + RBUF_LEN; + + memset(host->shm, 0xff, RBUF_LEN); + memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN); + + return 0; +} + +static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static unsigned int printed_version; + struct carm_host *host; + unsigned int pci_dac; + int rc; + struct request_queue *q; + unsigned int i; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + +#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ + rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + if (!rc) { + rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + if (rc) { + printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n", + pci_name(pdev)); + goto err_out_regions; + } + pci_dac = 1; + } else { +#endif + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (rc) { + printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n", + pci_name(pdev)); + goto err_out_regions; + } + pci_dac = 0; +#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ + } +#endif + + host = kzalloc(sizeof(*host), GFP_KERNEL); + if (!host) { + printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n", + pci_name(pdev)); + rc = -ENOMEM; + goto err_out_regions; + } + + host->pdev = pdev; + host->flags = pci_dac ? FL_DAC : 0; + spin_lock_init(&host->lock); + INIT_WORK(&host->fsm_task, carm_fsm_task); + init_completion(&host->probe_comp); + + for (i = 0; i < ARRAY_SIZE(host->req); i++) + host->req[i].tag = i; + + host->mmio = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!host->mmio) { + printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n", + pci_name(pdev)); + rc = -ENOMEM; + goto err_out_kfree; + } + + rc = carm_init_shm(host); + if (rc) { + printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n", + pci_name(pdev)); + goto err_out_iounmap; + } + + q = blk_init_queue(carm_oob_rq_fn, &host->lock); + if (!q) { + printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n", + pci_name(pdev)); + rc = -ENOMEM; + goto err_out_pci_free; + } + host->oob_q = q; + q->queuedata = host; + + /* + * Figure out which major to use: 160, 161, or dynamic + */ + if (!test_and_set_bit(0, &carm_major_alloc)) + host->major = 160; + else if (!test_and_set_bit(1, &carm_major_alloc)) + host->major = 161; + else + host->flags |= FL_DYN_MAJOR; + + host->id = carm_host_id; + sprintf(host->name, DRV_NAME "%d", carm_host_id); + + rc = register_blkdev(host->major, host->name); + if (rc < 0) + goto err_out_free_majors; + if (host->flags & FL_DYN_MAJOR) + host->major = rc; + + rc = carm_init_disks(host); + if (rc) + goto err_out_blkdev_disks; + + pci_set_master(pdev); + + rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host); + if (rc) { + printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n", + pci_name(pdev)); + goto err_out_blkdev_disks; + } + + rc = carm_init_host(host); + if (rc) + goto err_out_free_irq; + + DPRINTK("waiting for probe_comp\n"); + wait_for_completion(&host->probe_comp); + + printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n", + host->name, pci_name(pdev), (int) CARM_MAX_PORTS, + (unsigned long long)pci_resource_start(pdev, 0), + pdev->irq, host->major); + + carm_host_id++; + pci_set_drvdata(pdev, host); + return 0; + +err_out_free_irq: + free_irq(pdev->irq, host); +err_out_blkdev_disks: + carm_free_disks(host); + unregister_blkdev(host->major, host->name); +err_out_free_majors: + if (host->major == 160) + clear_bit(0, &carm_major_alloc); + else if (host->major == 161) + clear_bit(1, &carm_major_alloc); + blk_cleanup_queue(host->oob_q); +err_out_pci_free: + pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma); +err_out_iounmap: + iounmap(host->mmio); +err_out_kfree: + kfree(host); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + +static void carm_remove_one (struct pci_dev *pdev) +{ + struct carm_host *host = pci_get_drvdata(pdev); + + if (!host) { + printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n", + pci_name(pdev)); + return; + } + + free_irq(pdev->irq, host); + carm_free_disks(host); + unregister_blkdev(host->major, host->name); + if (host->major == 160) + clear_bit(0, &carm_major_alloc); + else if (host->major == 161) + clear_bit(1, &carm_major_alloc); + blk_cleanup_queue(host->oob_q); + pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma); + iounmap(host->mmio); + kfree(host); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int __init carm_init(void) +{ + return pci_register_driver(&carm_driver); +} + +static void __exit carm_exit(void) +{ + pci_unregister_driver(&carm_driver); +} + +module_init(carm_init); +module_exit(carm_exit); + + diff --git a/drivers/block/ub.c b/drivers/block/ub.c new file mode 100644 index 0000000..048d71d --- /dev/null +++ b/drivers/block/ub.c @@ -0,0 +1,2501 @@ +/* + * The low performance USB storage driver (ub). + * + * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net) + * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com) + * + * This work is a part of Linux kernel, is derived from it, + * and is not licensed separately. See file COPYING for details. + * + * TODO (sorted by decreasing priority) + * -- Return sense now that rq allows it (we always auto-sense anyway). + * -- set readonly flag for CDs, set removable flag for CF readers + * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) + * -- verify the 13 conditions and do bulk resets + * -- highmem + * -- move top_sense and work_bcs into separate allocations (if they survive) + * for cache purists and esoteric architectures. + * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? + * -- prune comments, they are too volumnous + * -- Resove XXX's + * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/usb_usual.h> +#include <linux/blkdev.h> +#include <linux/timer.h> +#include <linux/scatterlist.h> +#include <scsi/scsi.h> + +#define DRV_NAME "ub" + +#define UB_MAJOR 180 + +/* + * The command state machine is the key model for understanding of this driver. + * + * The general rule is that all transitions are done towards the bottom + * of the diagram, thus preventing any loops. + * + * An exception to that is how the STAT state is handled. A counter allows it + * to be re-entered along the path marked with [C]. + * + * +--------+ + * ! INIT ! + * +--------+ + * ! + * ub_scsi_cmd_start fails ->--------------------------------------\ + * ! ! + * V ! + * +--------+ ! + * ! CMD ! ! + * +--------+ ! + * ! +--------+ ! + * was -EPIPE -->-------------------------------->! CLEAR ! ! + * ! +--------+ ! + * ! ! ! + * was error -->------------------------------------- ! --------->\ + * ! ! ! + * /--<-- cmd->dir == NONE ? ! ! + * ! ! ! ! + * ! V ! ! + * ! +--------+ ! ! + * ! ! DATA ! ! ! + * ! +--------+ ! ! + * ! ! +---------+ ! ! + * ! was -EPIPE -->--------------->! CLR2STS ! ! ! + * ! ! +---------+ ! ! + * ! ! ! ! ! + * ! ! was error -->---- ! --------->\ + * ! was error -->--------------------- ! ------------- ! --------->\ + * ! ! ! ! ! + * ! V ! ! ! + * \--->+--------+ ! ! ! + * ! STAT !<--------------------------/ ! ! + * /--->+--------+ ! ! + * ! ! ! ! + * [C] was -EPIPE -->-----------\ ! ! + * ! ! ! ! ! + * +<---- len == 0 ! ! ! + * ! ! ! ! ! + * ! was error -->--------------------------------------!---------->\ + * ! ! ! ! ! + * +<---- bad CSW ! ! ! + * +<---- bad tag ! ! ! + * ! ! V ! ! + * ! ! +--------+ ! ! + * ! ! ! CLRRS ! ! ! + * ! ! +--------+ ! ! + * ! ! ! ! ! + * \------- ! --------------------[C]--------\ ! ! + * ! ! ! ! + * cmd->error---\ +--------+ ! ! + * ! +--------------->! SENSE !<----------/ ! + * STAT_FAIL----/ +--------+ ! + * ! ! V + * ! V +--------+ + * \--------------------------------\--------------------->! DONE ! + * +--------+ + */ + +/* + * This many LUNs per USB device. + * Every one of them takes a host, see UB_MAX_HOSTS. + */ +#define UB_MAX_LUNS 9 + +/* + */ + +#define UB_PARTS_PER_LUN 8 + +#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ + +#define UB_SENSE_SIZE 18 + +/* + */ + +/* command block wrapper */ +struct bulk_cb_wrap { + __le32 Signature; /* contains 'USBC' */ + u32 Tag; /* unique per command id */ + __le32 DataTransferLength; /* size of data */ + u8 Flags; /* direction in bit 0 */ + u8 Lun; /* LUN */ + u8 Length; /* of of the CDB */ + u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ +}; + +#define US_BULK_CB_WRAP_LEN 31 +#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ +#define US_BULK_FLAG_IN 1 +#define US_BULK_FLAG_OUT 0 + +/* command status wrapper */ +struct bulk_cs_wrap { + __le32 Signature; /* should = 'USBS' */ + u32 Tag; /* same as original command */ + __le32 Residue; /* amount not transferred */ + u8 Status; /* see below */ +}; + +#define US_BULK_CS_WRAP_LEN 13 +#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ +#define US_BULK_STAT_OK 0 +#define US_BULK_STAT_FAIL 1 +#define US_BULK_STAT_PHASE 2 + +/* bulk-only class specific requests */ +#define US_BULK_RESET_REQUEST 0xff +#define US_BULK_GET_MAX_LUN 0xfe + +/* + */ +struct ub_dev; + +#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ +#define UB_MAX_SECTORS 64 + +/* + * A second is more than enough for a 32K transfer (UB_MAX_SECTORS) + * even if a webcam hogs the bus, but some devices need time to spin up. + */ +#define UB_URB_TIMEOUT (HZ*2) +#define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */ +#define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */ +#define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */ + +/* + * An instance of a SCSI command in transit. + */ +#define UB_DIR_NONE 0 +#define UB_DIR_READ 1 +#define UB_DIR_ILLEGAL2 2 +#define UB_DIR_WRITE 3 + +#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \ + (((c)==UB_DIR_READ)? 'r': 'n')) + +enum ub_scsi_cmd_state { + UB_CMDST_INIT, /* Initial state */ + UB_CMDST_CMD, /* Command submitted */ + UB_CMDST_DATA, /* Data phase */ + UB_CMDST_CLR2STS, /* Clearing before requesting status */ + UB_CMDST_STAT, /* Status phase */ + UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ + UB_CMDST_CLRRS, /* Clearing before retrying status */ + UB_CMDST_SENSE, /* Sending Request Sense */ + UB_CMDST_DONE /* Final state */ +}; + +struct ub_scsi_cmd { + unsigned char cdb[UB_MAX_CDB_SIZE]; + unsigned char cdb_len; + + unsigned char dir; /* 0 - none, 1 - read, 3 - write. */ + enum ub_scsi_cmd_state state; + unsigned int tag; + struct ub_scsi_cmd *next; + + int error; /* Return code - valid upon done */ + unsigned int act_len; /* Return size */ + unsigned char key, asc, ascq; /* May be valid if error==-EIO */ + + int stat_count; /* Retries getting status. */ + unsigned int timeo; /* jiffies until rq->timeout changes */ + + unsigned int len; /* Requested length */ + unsigned int current_sg; + unsigned int nsg; /* sgv[nsg] */ + struct scatterlist sgv[UB_MAX_REQ_SG]; + + struct ub_lun *lun; + void (*done)(struct ub_dev *, struct ub_scsi_cmd *); + void *back; +}; + +struct ub_request { + struct request *rq; + unsigned int current_try; + unsigned int nsg; /* sgv[nsg] */ + struct scatterlist sgv[UB_MAX_REQ_SG]; +}; + +/* + */ +struct ub_capacity { + unsigned long nsec; /* Linux size - 512 byte sectors */ + unsigned int bsize; /* Linux hardsect_size */ + unsigned int bshift; /* Shift between 512 and hard sects */ +}; + +/* + * This is a direct take-off from linux/include/completion.h + * The difference is that I do not wait on this thing, just poll. + * When I want to wait (ub_probe), I just use the stock completion. + * + * Note that INIT_COMPLETION takes no lock. It is correct. But why + * in the bloody hell that thing takes struct instead of pointer to struct + * is quite beyond me. I just copied it from the stock completion. + */ +struct ub_completion { + unsigned int done; + spinlock_t lock; +}; + +static inline void ub_init_completion(struct ub_completion *x) +{ + x->done = 0; + spin_lock_init(&x->lock); +} + +#define UB_INIT_COMPLETION(x) ((x).done = 0) + +static void ub_complete(struct ub_completion *x) +{ + unsigned long flags; + + spin_lock_irqsave(&x->lock, flags); + x->done++; + spin_unlock_irqrestore(&x->lock, flags); +} + +static int ub_is_completed(struct ub_completion *x) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&x->lock, flags); + ret = x->done; + spin_unlock_irqrestore(&x->lock, flags); + return ret; +} + +/* + */ +struct ub_scsi_cmd_queue { + int qlen, qmax; + struct ub_scsi_cmd *head, *tail; +}; + +/* + * The block device instance (one per LUN). + */ +struct ub_lun { + struct ub_dev *udev; + struct list_head link; + struct gendisk *disk; + int id; /* Host index */ + int num; /* LUN number */ + char name[16]; + + int changed; /* Media was changed */ + int removable; + int readonly; + + struct ub_request urq; + + /* Use Ingo's mempool if or when we have more than one command. */ + /* + * Currently we never need more than one command for the whole device. + * However, giving every LUN a command is a cheap and automatic way + * to enforce fairness between them. + */ + int cmda[1]; + struct ub_scsi_cmd cmdv[1]; + + struct ub_capacity capacity; +}; + +/* + * The USB device instance. + */ +struct ub_dev { + spinlock_t *lock; + atomic_t poison; /* The USB device is disconnected */ + int openc; /* protected by ub_lock! */ + /* kref is too implicit for our taste */ + int reset; /* Reset is running */ + int bad_resid; + unsigned int tagcnt; + char name[12]; + struct usb_device *dev; + struct usb_interface *intf; + + struct list_head luns; + + unsigned int send_bulk_pipe; /* cached pipe values */ + unsigned int recv_bulk_pipe; + unsigned int send_ctrl_pipe; + unsigned int recv_ctrl_pipe; + + struct tasklet_struct tasklet; + + struct ub_scsi_cmd_queue cmd_queue; + struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ + unsigned char top_sense[UB_SENSE_SIZE]; + + struct ub_completion work_done; + struct urb work_urb; + struct timer_list work_timer; + int last_pipe; /* What might need clearing */ + __le32 signature; /* Learned signature */ + struct bulk_cb_wrap work_bcb; + struct bulk_cs_wrap work_bcs; + struct usb_ctrlrequest work_cr; + + struct work_struct reset_work; + wait_queue_head_t reset_wait; +}; + +/* + */ +static void ub_cleanup(struct ub_dev *sc); +static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); +static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct ub_request *urq); +static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct ub_request *urq); +static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_end_rq(struct request *rq, unsigned int status, + unsigned int cmd_len); +static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, + struct ub_request *urq, struct ub_scsi_cmd *cmd); +static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_urb_complete(struct urb *urb); +static void ub_scsi_action(unsigned long _dev); +static void ub_scsi_dispatch(struct ub_dev *sc); +static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); +static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); +static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, + int stalled_pipe); +static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); +static void ub_reset_enter(struct ub_dev *sc, int try); +static void ub_reset_task(struct work_struct *work); +static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); +static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, + struct ub_capacity *ret); +static int ub_sync_reset(struct ub_dev *sc); +static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe); +static int ub_probe_lun(struct ub_dev *sc, int lnum); + +/* + */ +#ifdef CONFIG_USB_LIBUSUAL + +#define ub_usb_ids storage_usb_ids +#else + +static struct usb_device_id ub_usb_ids[] = { + { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, + { } +}; + +MODULE_DEVICE_TABLE(usb, ub_usb_ids); +#endif /* CONFIG_USB_LIBUSUAL */ + +/* + * Find me a way to identify "next free minor" for add_disk(), + * and the array disappears the next day. However, the number of + * hosts has something to do with the naming and /proc/partitions. + * This has to be thought out in detail before changing. + * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure. + */ +#define UB_MAX_HOSTS 26 +static char ub_hostv[UB_MAX_HOSTS]; + +#define UB_QLOCK_NUM 5 +static spinlock_t ub_qlockv[UB_QLOCK_NUM]; +static int ub_qlock_next = 0; + +static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ + +/* + * The id allocator. + * + * This also stores the host for indexing by minor, which is somewhat dirty. + */ +static int ub_id_get(void) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&ub_lock, flags); + for (i = 0; i < UB_MAX_HOSTS; i++) { + if (ub_hostv[i] == 0) { + ub_hostv[i] = 1; + spin_unlock_irqrestore(&ub_lock, flags); + return i; + } + } + spin_unlock_irqrestore(&ub_lock, flags); + return -1; +} + +static void ub_id_put(int id) +{ + unsigned long flags; + + if (id < 0 || id >= UB_MAX_HOSTS) { + printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id); + return; + } + + spin_lock_irqsave(&ub_lock, flags); + if (ub_hostv[id] == 0) { + spin_unlock_irqrestore(&ub_lock, flags); + printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id); + return; + } + ub_hostv[id] = 0; + spin_unlock_irqrestore(&ub_lock, flags); +} + +/* + * This is necessitated by the fact that blk_cleanup_queue does not + * necesserily destroy the queue. Instead, it may merely decrease q->refcnt. + * Since our blk_init_queue() passes a spinlock common with ub_dev, + * we have life time issues when ub_cleanup frees ub_dev. + */ +static spinlock_t *ub_next_lock(void) +{ + unsigned long flags; + spinlock_t *ret; + + spin_lock_irqsave(&ub_lock, flags); + ret = &ub_qlockv[ub_qlock_next]; + ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM; + spin_unlock_irqrestore(&ub_lock, flags); + return ret; +} + +/* + * Downcount for deallocation. This rides on two assumptions: + * - once something is poisoned, its refcount cannot grow + * - opens cannot happen at this time (del_gendisk was done) + * If the above is true, we can drop the lock, which we need for + * blk_cleanup_queue(): the silly thing may attempt to sleep. + * [Actually, it never needs to sleep for us, but it calls might_sleep()] + */ +static void ub_put(struct ub_dev *sc) +{ + unsigned long flags; + + spin_lock_irqsave(&ub_lock, flags); + --sc->openc; + if (sc->openc == 0 && atomic_read(&sc->poison)) { + spin_unlock_irqrestore(&ub_lock, flags); + ub_cleanup(sc); + } else { + spin_unlock_irqrestore(&ub_lock, flags); + } +} + +/* + * Final cleanup and deallocation. + */ +static void ub_cleanup(struct ub_dev *sc) +{ + struct list_head *p; + struct ub_lun *lun; + struct request_queue *q; + + while (!list_empty(&sc->luns)) { + p = sc->luns.next; + lun = list_entry(p, struct ub_lun, link); + list_del(p); + + /* I don't think queue can be NULL. But... Stolen from sx8.c */ + if ((q = lun->disk->queue) != NULL) + blk_cleanup_queue(q); + /* + * If we zero disk->private_data BEFORE put_disk, we have + * to check for NULL all over the place in open, release, + * check_media and revalidate, because the block level + * semaphore is well inside the put_disk. + * But we cannot zero after the call, because *disk is gone. + * The sd.c is blatantly racy in this area. + */ + /* disk->private_data = NULL; */ + put_disk(lun->disk); + lun->disk = NULL; + + ub_id_put(lun->id); + kfree(lun); + } + + usb_set_intfdata(sc->intf, NULL); + usb_put_intf(sc->intf); + usb_put_dev(sc->dev); + kfree(sc); +} + +/* + * The "command allocator". + */ +static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) +{ + struct ub_scsi_cmd *ret; + + if (lun->cmda[0]) + return NULL; + ret = &lun->cmdv[0]; + lun->cmda[0] = 1; + return ret; +} + +static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) +{ + if (cmd != &lun->cmdv[0]) { + printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", + lun->name, cmd); + return; + } + if (!lun->cmda[0]) { + printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); + return; + } + lun->cmda[0] = 0; +} + +/* + * The command queue. + */ +static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct ub_scsi_cmd_queue *t = &sc->cmd_queue; + + if (t->qlen++ == 0) { + t->head = cmd; + t->tail = cmd; + } else { + t->tail->next = cmd; + t->tail = cmd; + } + + if (t->qlen > t->qmax) + t->qmax = t->qlen; +} + +static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct ub_scsi_cmd_queue *t = &sc->cmd_queue; + + if (t->qlen++ == 0) { + t->head = cmd; + t->tail = cmd; + } else { + cmd->next = t->head; + t->head = cmd; + } + + if (t->qlen > t->qmax) + t->qmax = t->qlen; +} + +static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) +{ + struct ub_scsi_cmd_queue *t = &sc->cmd_queue; + struct ub_scsi_cmd *cmd; + + if (t->qlen == 0) + return NULL; + if (--t->qlen == 0) + t->tail = NULL; + cmd = t->head; + t->head = cmd->next; + cmd->next = NULL; + return cmd; +} + +#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head) + +/* + * The request function is our main entry point + */ + +static void ub_request_fn(struct request_queue *q) +{ + struct ub_lun *lun = q->queuedata; + struct request *rq; + + while ((rq = elv_next_request(q)) != NULL) { + if (ub_request_fn_1(lun, rq) != 0) { + blk_stop_queue(q); + break; + } + } +} + +static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) +{ + struct ub_dev *sc = lun->udev; + struct ub_scsi_cmd *cmd; + struct ub_request *urq; + int n_elem; + + if (atomic_read(&sc->poison)) { + blkdev_dequeue_request(rq); + ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq)); + return 0; + } + + if (lun->changed && !blk_pc_request(rq)) { + blkdev_dequeue_request(rq); + ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq)); + return 0; + } + + if (lun->urq.rq != NULL) + return -1; + if ((cmd = ub_get_cmd(lun)) == NULL) + return -1; + memset(cmd, 0, sizeof(struct ub_scsi_cmd)); + + blkdev_dequeue_request(rq); + + urq = &lun->urq; + memset(urq, 0, sizeof(struct ub_request)); + urq->rq = rq; + + /* + * get scatterlist from block layer + */ + sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG); + n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); + if (n_elem < 0) { + /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */ + printk(KERN_INFO "%s: failed request map (%d)\n", + lun->name, n_elem); + goto drop; + } + if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ + printk(KERN_WARNING "%s: request with %d segments\n", + lun->name, n_elem); + goto drop; + } + urq->nsg = n_elem; + + if (blk_pc_request(rq)) { + ub_cmd_build_packet(sc, lun, cmd, urq); + } else { + ub_cmd_build_block(sc, lun, cmd, urq); + } + cmd->state = UB_CMDST_INIT; + cmd->lun = lun; + cmd->done = ub_rw_cmd_done; + cmd->back = urq; + + cmd->tag = sc->tagcnt++; + if (ub_submit_scsi(sc, cmd) != 0) + goto drop; + + return 0; + +drop: + ub_put_cmd(lun, cmd); + ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq)); + return 0; +} + +static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct ub_request *urq) +{ + struct request *rq = urq->rq; + unsigned int block, nblks; + + if (rq_data_dir(rq) == WRITE) + cmd->dir = UB_DIR_WRITE; + else + cmd->dir = UB_DIR_READ; + + cmd->nsg = urq->nsg; + memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); + + /* + * build the command + * + * The call to blk_queue_hardsect_size() guarantees that request + * is aligned, but it is given in terms of 512 byte units, always. + */ + block = rq->sector >> lun->capacity.bshift; + nblks = rq->nr_sectors >> lun->capacity.bshift; + + cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; + /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ + cmd->cdb[2] = block >> 24; + cmd->cdb[3] = block >> 16; + cmd->cdb[4] = block >> 8; + cmd->cdb[5] = block; + cmd->cdb[7] = nblks >> 8; + cmd->cdb[8] = nblks; + cmd->cdb_len = 10; + + cmd->len = rq->nr_sectors * 512; +} + +static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, + struct ub_scsi_cmd *cmd, struct ub_request *urq) +{ + struct request *rq = urq->rq; + + if (rq->data_len == 0) { + cmd->dir = UB_DIR_NONE; + } else { + if (rq_data_dir(rq) == WRITE) + cmd->dir = UB_DIR_WRITE; + else + cmd->dir = UB_DIR_READ; + } + + cmd->nsg = urq->nsg; + memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); + + memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); + cmd->cdb_len = rq->cmd_len; + + cmd->len = rq->data_len; + + /* + * To reapply this to every URB is not as incorrect as it looks. + * In return, we avoid any complicated tracking calculations. + */ + cmd->timeo = rq->timeout; +} + +static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct ub_lun *lun = cmd->lun; + struct ub_request *urq = cmd->back; + struct request *rq; + unsigned int scsi_status; + unsigned int cmd_len; + + rq = urq->rq; + + if (cmd->error == 0) { + if (blk_pc_request(rq)) { + if (cmd->act_len >= rq->data_len) + rq->data_len = 0; + else + rq->data_len -= cmd->act_len; + scsi_status = 0; + } else { + if (cmd->act_len != cmd->len) { + scsi_status = SAM_STAT_CHECK_CONDITION; + } else { + scsi_status = 0; + } + } + } else { + if (blk_pc_request(rq)) { + /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ + memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); + rq->sense_len = UB_SENSE_SIZE; + if (sc->top_sense[0] != 0) + scsi_status = SAM_STAT_CHECK_CONDITION; + else + scsi_status = DID_ERROR << 16; + } else { + if (cmd->error == -EIO && + (cmd->key == 0 || + cmd->key == MEDIUM_ERROR || + cmd->key == UNIT_ATTENTION)) { + if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) + return; + } + scsi_status = SAM_STAT_CHECK_CONDITION; + } + } + + urq->rq = NULL; + + cmd_len = cmd->len; + ub_put_cmd(lun, cmd); + ub_end_rq(rq, scsi_status, cmd_len); + blk_start_queue(lun->disk->queue); +} + +static void ub_end_rq(struct request *rq, unsigned int scsi_status, + unsigned int cmd_len) +{ + int error; + long rqlen; + + if (scsi_status == 0) { + error = 0; + } else { + error = -EIO; + rq->errors = scsi_status; + } + rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */ + if (__blk_end_request(rq, error, cmd_len)) { + printk(KERN_WARNING DRV_NAME + ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n", + blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen); + } +} + +static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, + struct ub_request *urq, struct ub_scsi_cmd *cmd) +{ + + if (atomic_read(&sc->poison)) + return -ENXIO; + + ub_reset_enter(sc, urq->current_try); + + if (urq->current_try >= 3) + return -EIO; + urq->current_try++; + + /* Remove this if anyone complains of flooding. */ + printk(KERN_DEBUG "%s: dir %c len/act %d/%d " + "[sense %x %02x %02x] retry %d\n", + sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len, + cmd->key, cmd->asc, cmd->ascq, urq->current_try); + + memset(cmd, 0, sizeof(struct ub_scsi_cmd)); + ub_cmd_build_block(sc, lun, cmd, urq); + + cmd->state = UB_CMDST_INIT; + cmd->lun = lun; + cmd->done = ub_rw_cmd_done; + cmd->back = urq; + + cmd->tag = sc->tagcnt++; + +#if 0 /* Wasteful */ + return ub_submit_scsi(sc, cmd); +#else + ub_cmdq_add(sc, cmd); + return 0; +#endif +} + +/* + * Submit a regular SCSI operation (not an auto-sense). + * + * The Iron Law of Good Submit Routine is: + * Zero return - callback is done, Nonzero return - callback is not done. + * No exceptions. + * + * Host is assumed locked. + */ +static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + + if (cmd->state != UB_CMDST_INIT || + (cmd->dir != UB_DIR_NONE && cmd->len == 0)) { + return -EINVAL; + } + + ub_cmdq_add(sc, cmd); + /* + * We can call ub_scsi_dispatch(sc) right away here, but it's a little + * safer to jump to a tasklet, in case upper layers do something silly. + */ + tasklet_schedule(&sc->tasklet); + return 0; +} + +/* + * Submit the first URB for the queued command. + * This function does not deal with queueing in any way. + */ +static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct bulk_cb_wrap *bcb; + int rc; + + bcb = &sc->work_bcb; + + /* + * ``If the allocation length is eighteen or greater, and a device + * server returns less than eithteen bytes of data, the application + * client should assume that the bytes not transferred would have been + * zeroes had the device server returned those bytes.'' + * + * We zero sense for all commands so that when a packet request + * fails it does not return a stale sense. + */ + memset(&sc->top_sense, 0, UB_SENSE_SIZE); + + /* set up the command wrapper */ + bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); + bcb->Tag = cmd->tag; /* Endianness is not important */ + bcb->DataTransferLength = cpu_to_le32(cmd->len); + bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; + bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; + bcb->Length = cmd->cdb_len; + + /* copy the command payload */ + memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE); + + UB_INIT_COMPLETION(sc->work_done); + + sc->last_pipe = sc->send_bulk_pipe; + usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, + bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { + /* XXX Clear stalls */ + ub_complete(&sc->work_done); + return rc; + } + + sc->work_timer.expires = jiffies + UB_URB_TIMEOUT; + add_timer(&sc->work_timer); + + cmd->state = UB_CMDST_CMD; + return 0; +} + +/* + * Timeout handler. + */ +static void ub_urb_timeout(unsigned long arg) +{ + struct ub_dev *sc = (struct ub_dev *) arg; + unsigned long flags; + + spin_lock_irqsave(sc->lock, flags); + if (!ub_is_completed(&sc->work_done)) + usb_unlink_urb(&sc->work_urb); + spin_unlock_irqrestore(sc->lock, flags); +} + +/* + * Completion routine for the work URB. + * + * This can be called directly from usb_submit_urb (while we have + * the sc->lock taken) and from an interrupt (while we do NOT have + * the sc->lock taken). Therefore, bounce this off to a tasklet. + */ +static void ub_urb_complete(struct urb *urb) +{ + struct ub_dev *sc = urb->context; + + ub_complete(&sc->work_done); + tasklet_schedule(&sc->tasklet); +} + +static void ub_scsi_action(unsigned long _dev) +{ + struct ub_dev *sc = (struct ub_dev *) _dev; + unsigned long flags; + + spin_lock_irqsave(sc->lock, flags); + ub_scsi_dispatch(sc); + spin_unlock_irqrestore(sc->lock, flags); +} + +static void ub_scsi_dispatch(struct ub_dev *sc) +{ + struct ub_scsi_cmd *cmd; + int rc; + + while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) { + if (cmd->state == UB_CMDST_DONE) { + ub_cmdq_pop(sc); + (*cmd->done)(sc, cmd); + } else if (cmd->state == UB_CMDST_INIT) { + if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0) + break; + cmd->error = rc; + cmd->state = UB_CMDST_DONE; + } else { + if (!ub_is_completed(&sc->work_done)) + break; + del_timer(&sc->work_timer); + ub_scsi_urb_compl(sc, cmd); + } + } +} + +static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct urb *urb = &sc->work_urb; + struct bulk_cs_wrap *bcs; + int len; + int rc; + + if (atomic_read(&sc->poison)) { + ub_state_done(sc, cmd, -ENODEV); + return; + } + + if (cmd->state == UB_CMDST_CLEAR) { + if (urb->status == -EPIPE) { + /* + * STALL while clearning STALL. + * The control pipe clears itself - nothing to do. + */ + printk(KERN_NOTICE "%s: stall on control pipe\n", + sc->name); + goto Bad_End; + } + + /* + * We ignore the result for the halt clear. + */ + + /* reset the endpoint toggle */ + usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), + usb_pipeout(sc->last_pipe), 0); + + ub_state_sense(sc, cmd); + + } else if (cmd->state == UB_CMDST_CLR2STS) { + if (urb->status == -EPIPE) { + printk(KERN_NOTICE "%s: stall on control pipe\n", + sc->name); + goto Bad_End; + } + + /* + * We ignore the result for the halt clear. + */ + + /* reset the endpoint toggle */ + usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), + usb_pipeout(sc->last_pipe), 0); + + ub_state_stat(sc, cmd); + + } else if (cmd->state == UB_CMDST_CLRRS) { + if (urb->status == -EPIPE) { + printk(KERN_NOTICE "%s: stall on control pipe\n", + sc->name); + goto Bad_End; + } + + /* + * We ignore the result for the halt clear. + */ + + /* reset the endpoint toggle */ + usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), + usb_pipeout(sc->last_pipe), 0); + + ub_state_stat_counted(sc, cmd); + + } else if (cmd->state == UB_CMDST_CMD) { + switch (urb->status) { + case 0: + break; + case -EOVERFLOW: + goto Bad_End; + case -EPIPE: + rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); + if (rc != 0) { + printk(KERN_NOTICE "%s: " + "unable to submit clear (%d)\n", + sc->name, rc); + /* + * This is typically ENOMEM or some other such shit. + * Retrying is pointless. Just do Bad End on it... + */ + ub_state_done(sc, cmd, rc); + return; + } + cmd->state = UB_CMDST_CLEAR; + return; + case -ESHUTDOWN: /* unplug */ + case -EILSEQ: /* unplug timeout on uhci */ + ub_state_done(sc, cmd, -ENODEV); + return; + default: + goto Bad_End; + } + if (urb->actual_length != US_BULK_CB_WRAP_LEN) { + goto Bad_End; + } + + if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { + ub_state_stat(sc, cmd); + return; + } + + // udelay(125); // usb-storage has this + ub_data_start(sc, cmd); + + } else if (cmd->state == UB_CMDST_DATA) { + if (urb->status == -EPIPE) { + rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); + if (rc != 0) { + printk(KERN_NOTICE "%s: " + "unable to submit clear (%d)\n", + sc->name, rc); + ub_state_done(sc, cmd, rc); + return; + } + cmd->state = UB_CMDST_CLR2STS; + return; + } + if (urb->status == -EOVERFLOW) { + /* + * A babble? Failure, but we must transfer CSW now. + */ + cmd->error = -EOVERFLOW; /* A cheap trick... */ + ub_state_stat(sc, cmd); + return; + } + + if (cmd->dir == UB_DIR_WRITE) { + /* + * Do not continue writes in case of a failure. + * Doing so would cause sectors to be mixed up, + * which is worse than sectors lost. + * + * We must try to read the CSW, or many devices + * get confused. + */ + len = urb->actual_length; + if (urb->status != 0 || + len != cmd->sgv[cmd->current_sg].length) { + cmd->act_len += len; + + cmd->error = -EIO; + ub_state_stat(sc, cmd); + return; + } + + } else { + /* + * If an error occurs on read, we record it, and + * continue to fetch data in order to avoid bubble. + * + * As a small shortcut, we stop if we detect that + * a CSW mixed into data. + */ + if (urb->status != 0) + cmd->error = -EIO; + + len = urb->actual_length; + if (urb->status != 0 || + len != cmd->sgv[cmd->current_sg].length) { + if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN) + goto Bad_End; + } + } + + cmd->act_len += urb->actual_length; + + if (++cmd->current_sg < cmd->nsg) { + ub_data_start(sc, cmd); + return; + } + ub_state_stat(sc, cmd); + + } else if (cmd->state == UB_CMDST_STAT) { + if (urb->status == -EPIPE) { + rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); + if (rc != 0) { + printk(KERN_NOTICE "%s: " + "unable to submit clear (%d)\n", + sc->name, rc); + ub_state_done(sc, cmd, rc); + return; + } + + /* + * Having a stall when getting CSW is an error, so + * make sure uppper levels are not oblivious to it. + */ + cmd->error = -EIO; /* A cheap trick... */ + + cmd->state = UB_CMDST_CLRRS; + return; + } + + /* Catch everything, including -EOVERFLOW and other nasties. */ + if (urb->status != 0) + goto Bad_End; + + if (urb->actual_length == 0) { + ub_state_stat_counted(sc, cmd); + return; + } + + /* + * Check the returned Bulk protocol status. + * The status block has to be validated first. + */ + + bcs = &sc->work_bcs; + + if (sc->signature == cpu_to_le32(0)) { + /* + * This is the first reply, so do not perform the check. + * Instead, remember the signature the device uses + * for future checks. But do not allow a nul. + */ + sc->signature = bcs->Signature; + if (sc->signature == cpu_to_le32(0)) { + ub_state_stat_counted(sc, cmd); + return; + } + } else { + if (bcs->Signature != sc->signature) { + ub_state_stat_counted(sc, cmd); + return; + } + } + + if (bcs->Tag != cmd->tag) { + /* + * This usually happens when we disagree with the + * device's microcode about something. For instance, + * a few of them throw this after timeouts. They buffer + * commands and reply at commands we timed out before. + * Without flushing these replies we loop forever. + */ + ub_state_stat_counted(sc, cmd); + return; + } + + if (!sc->bad_resid) { + len = le32_to_cpu(bcs->Residue); + if (len != cmd->len - cmd->act_len) { + /* + * Only start ignoring if this cmd ended well. + */ + if (cmd->len == cmd->act_len) { + printk(KERN_NOTICE "%s: " + "bad residual %d of %d, ignoring\n", + sc->name, len, cmd->len); + sc->bad_resid = 1; + } + } + } + + switch (bcs->Status) { + case US_BULK_STAT_OK: + break; + case US_BULK_STAT_FAIL: + ub_state_sense(sc, cmd); + return; + case US_BULK_STAT_PHASE: + goto Bad_End; + default: + printk(KERN_INFO "%s: unknown CSW status 0x%x\n", + sc->name, bcs->Status); + ub_state_done(sc, cmd, -EINVAL); + return; + } + + /* Not zeroing error to preserve a babble indicator */ + if (cmd->error != 0) { + ub_state_sense(sc, cmd); + return; + } + cmd->state = UB_CMDST_DONE; + ub_cmdq_pop(sc); + (*cmd->done)(sc, cmd); + + } else if (cmd->state == UB_CMDST_SENSE) { + ub_state_done(sc, cmd, -EIO); + + } else { + printk(KERN_WARNING "%s: wrong command state %d\n", + sc->name, cmd->state); + ub_state_done(sc, cmd, -EINVAL); + return; + } + return; + +Bad_End: /* Little Excel is dead */ + ub_state_done(sc, cmd, -EIO); +} + +/* + * Factorization helper for the command state machine: + * Initiate a data segment transfer. + */ +static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; + int pipe; + int rc; + + UB_INIT_COMPLETION(sc->work_done); + + if (cmd->dir == UB_DIR_READ) + pipe = sc->recv_bulk_pipe; + else + pipe = sc->send_bulk_pipe; + sc->last_pipe = pipe; + usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg), + sg->length, ub_urb_complete, sc); + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { + /* XXX Clear stalls */ + ub_complete(&sc->work_done); + ub_state_done(sc, cmd, rc); + return; + } + + if (cmd->timeo) + sc->work_timer.expires = jiffies + cmd->timeo; + else + sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; + add_timer(&sc->work_timer); + + cmd->state = UB_CMDST_DATA; +} + +/* + * Factorization helper for the command state machine: + * Finish the command. + */ +static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) +{ + + cmd->error = rc; + cmd->state = UB_CMDST_DONE; + ub_cmdq_pop(sc); + (*cmd->done)(sc, cmd); +} + +/* + * Factorization helper for the command state machine: + * Submit a CSW read. + */ +static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + int rc; + + UB_INIT_COMPLETION(sc->work_done); + + sc->last_pipe = sc->recv_bulk_pipe; + usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, + &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { + /* XXX Clear stalls */ + ub_complete(&sc->work_done); + ub_state_done(sc, cmd, rc); + return -1; + } + + if (cmd->timeo) + sc->work_timer.expires = jiffies + cmd->timeo; + else + sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; + add_timer(&sc->work_timer); + return 0; +} + +/* + * Factorization helper for the command state machine: + * Submit a CSW read and go to STAT state. + */ +static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + + if (__ub_state_stat(sc, cmd) != 0) + return; + + cmd->stat_count = 0; + cmd->state = UB_CMDST_STAT; +} + +/* + * Factorization helper for the command state machine: + * Submit a CSW read and go to STAT state with counter (along [C] path). + */ +static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + + if (++cmd->stat_count >= 4) { + ub_state_sense(sc, cmd); + return; + } + + if (__ub_state_stat(sc, cmd) != 0) + return; + + cmd->state = UB_CMDST_STAT; +} + +/* + * Factorization helper for the command state machine: + * Submit a REQUEST SENSE and go to SENSE state. + */ +static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct ub_scsi_cmd *scmd; + struct scatterlist *sg; + int rc; + + if (cmd->cdb[0] == REQUEST_SENSE) { + rc = -EPIPE; + goto error; + } + + scmd = &sc->top_rqs_cmd; + memset(scmd, 0, sizeof(struct ub_scsi_cmd)); + scmd->cdb[0] = REQUEST_SENSE; + scmd->cdb[4] = UB_SENSE_SIZE; + scmd->cdb_len = 6; + scmd->dir = UB_DIR_READ; + scmd->state = UB_CMDST_INIT; + scmd->nsg = 1; + sg = &scmd->sgv[0]; + sg_init_table(sg, UB_MAX_REQ_SG); + sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE, + (unsigned long)sc->top_sense & (PAGE_SIZE-1)); + scmd->len = UB_SENSE_SIZE; + scmd->lun = cmd->lun; + scmd->done = ub_top_sense_done; + scmd->back = cmd; + + scmd->tag = sc->tagcnt++; + + cmd->state = UB_CMDST_SENSE; + + ub_cmdq_insert(sc, scmd); + return; + +error: + ub_state_done(sc, cmd, rc); +} + +/* + * A helper for the command's state machine: + * Submit a stall clear. + */ +static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, + int stalled_pipe) +{ + int endp; + struct usb_ctrlrequest *cr; + int rc; + + endp = usb_pipeendpoint(stalled_pipe); + if (usb_pipein (stalled_pipe)) + endp |= USB_DIR_IN; + + cr = &sc->work_cr; + cr->bRequestType = USB_RECIP_ENDPOINT; + cr->bRequest = USB_REQ_CLEAR_FEATURE; + cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); + cr->wIndex = cpu_to_le16(endp); + cr->wLength = cpu_to_le16(0); + + UB_INIT_COMPLETION(sc->work_done); + + usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, + (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { + ub_complete(&sc->work_done); + return rc; + } + + sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT; + add_timer(&sc->work_timer); + return 0; +} + +/* + */ +static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) +{ + unsigned char *sense = sc->top_sense; + struct ub_scsi_cmd *cmd; + + /* + * Find the command which triggered the unit attention or a check, + * save the sense into it, and advance its state machine. + */ + if ((cmd = ub_cmdq_peek(sc)) == NULL) { + printk(KERN_WARNING "%s: sense done while idle\n", sc->name); + return; + } + if (cmd != scmd->back) { + printk(KERN_WARNING "%s: " + "sense done for wrong command 0x%x\n", + sc->name, cmd->tag); + return; + } + if (cmd->state != UB_CMDST_SENSE) { + printk(KERN_WARNING "%s: sense done with bad cmd state %d\n", + sc->name, cmd->state); + return; + } + + /* + * Ignoring scmd->act_len, because the buffer was pre-zeroed. + */ + cmd->key = sense[2] & 0x0F; + cmd->asc = sense[12]; + cmd->ascq = sense[13]; + + ub_scsi_urb_compl(sc, cmd); +} + +/* + * Reset management + */ + +static void ub_reset_enter(struct ub_dev *sc, int try) +{ + + if (sc->reset) { + /* This happens often on multi-LUN devices. */ + return; + } + sc->reset = try + 1; + +#if 0 /* Not needed because the disconnect waits for us. */ + unsigned long flags; + spin_lock_irqsave(&ub_lock, flags); + sc->openc++; + spin_unlock_irqrestore(&ub_lock, flags); +#endif + +#if 0 /* We let them stop themselves. */ + struct ub_lun *lun; + list_for_each_entry(lun, &sc->luns, link) { + blk_stop_queue(lun->disk->queue); + } +#endif + + schedule_work(&sc->reset_work); +} + +static void ub_reset_task(struct work_struct *work) +{ + struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); + unsigned long flags; + struct ub_lun *lun; + int lkr, rc; + + if (!sc->reset) { + printk(KERN_WARNING "%s: Running reset unrequested\n", + sc->name); + return; + } + + if (atomic_read(&sc->poison)) { + ; + } else if ((sc->reset & 1) == 0) { + ub_sync_reset(sc); + msleep(700); /* usb-storage sleeps 6s (!) */ + ub_probe_clear_stall(sc, sc->recv_bulk_pipe); + ub_probe_clear_stall(sc, sc->send_bulk_pipe); + } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { + ; + } else { + if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) { + printk(KERN_NOTICE + "%s: usb_lock_device_for_reset failed (%d)\n", + sc->name, lkr); + } else { + rc = usb_reset_device(sc->dev); + if (rc < 0) { + printk(KERN_NOTICE "%s: " + "usb_lock_device_for_reset failed (%d)\n", + sc->name, rc); + } + + if (lkr) + usb_unlock_device(sc->dev); + } + } + + /* + * In theory, no commands can be running while reset is active, + * so nobody can ask for another reset, and so we do not need any + * queues of resets or anything. We do need a spinlock though, + * to interact with block layer. + */ + spin_lock_irqsave(sc->lock, flags); + sc->reset = 0; + tasklet_schedule(&sc->tasklet); + list_for_each_entry(lun, &sc->luns, link) { + blk_start_queue(lun->disk->queue); + } + wake_up(&sc->reset_wait); + spin_unlock_irqrestore(sc->lock, flags); +} + +/* + * XXX Reset brackets are too much hassle to implement, so just stub them + * in order to prevent forced unbinding (which deadlocks solid when our + * ->disconnect method waits for the reset to complete and this kills keventd). + * + * XXX Tell Alan to move usb_unlock_device inside of usb_reset_device, + * or else the post_reset is invoked, and restats I/O on a locked device. + */ +static int ub_pre_reset(struct usb_interface *iface) { + return 0; +} + +static int ub_post_reset(struct usb_interface *iface) { + return 0; +} + +/* + * This is called from a process context. + */ +static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) +{ + + lun->readonly = 0; /* XXX Query this from the device */ + + lun->capacity.nsec = 0; + lun->capacity.bsize = 512; + lun->capacity.bshift = 0; + + if (ub_sync_tur(sc, lun) != 0) + return; /* Not ready */ + lun->changed = 0; + + if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { + /* + * The retry here means something is wrong, either with the + * device, with the transport, or with our code. + * We keep this because sd.c has retries for capacity. + */ + if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { + lun->capacity.nsec = 0; + lun->capacity.bsize = 512; + lun->capacity.bshift = 0; + } + } +} + +/* + * The open funcion. + * This is mostly needed to keep refcounting, but also to support + * media checks on removable media drives. + */ +static int ub_bd_open(struct block_device *bdev, fmode_t mode) +{ + struct ub_lun *lun = bdev->bd_disk->private_data; + struct ub_dev *sc = lun->udev; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ub_lock, flags); + if (atomic_read(&sc->poison)) { + spin_unlock_irqrestore(&ub_lock, flags); + return -ENXIO; + } + sc->openc++; + spin_unlock_irqrestore(&ub_lock, flags); + + if (lun->removable || lun->readonly) + check_disk_change(bdev); + + /* + * The sd.c considers ->media_present and ->changed not equivalent, + * under some pretty murky conditions (a failure of READ CAPACITY). + * We may need it one day. + */ + if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) { + rc = -ENOMEDIUM; + goto err_open; + } + + if (lun->readonly && (mode & FMODE_WRITE)) { + rc = -EROFS; + goto err_open; + } + + return 0; + +err_open: + ub_put(sc); + return rc; +} + +/* + */ +static int ub_bd_release(struct gendisk *disk, fmode_t mode) +{ + struct ub_lun *lun = disk->private_data; + struct ub_dev *sc = lun->udev; + + ub_put(sc); + return 0; +} + +/* + * The ioctl interface. + */ +static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct gendisk *disk = bdev->bd_disk; + void __user *usermem = (void __user *) arg; + + return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem); +} + +/* + * This is called by check_disk_change if we reported a media change. + * The main onjective here is to discover the features of the media such as + * the capacity, read-only status, etc. USB storage generally does not + * need to be spun up, but if we needed it, this would be the place. + * + * This call can sleep. + * + * The return code is not used. + */ +static int ub_bd_revalidate(struct gendisk *disk) +{ + struct ub_lun *lun = disk->private_data; + + ub_revalidate(lun->udev, lun); + + /* XXX Support sector size switching like in sr.c */ + blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); + set_capacity(disk, lun->capacity.nsec); + // set_disk_ro(sdkp->disk, lun->readonly); + + return 0; +} + +/* + * The check is called by the block layer to verify if the media + * is still available. It is supposed to be harmless, lightweight and + * non-intrusive in case the media was not changed. + * + * This call can sleep. + * + * The return code is bool! + */ +static int ub_bd_media_changed(struct gendisk *disk) +{ + struct ub_lun *lun = disk->private_data; + + if (!lun->removable) + return 0; + + /* + * We clean checks always after every command, so this is not + * as dangerous as it looks. If the TEST_UNIT_READY fails here, + * the device is actually not ready with operator or software + * intervention required. One dangerous item might be a drive which + * spins itself down, and come the time to write dirty pages, this + * will fail, then block layer discards the data. Since we never + * spin drives up, such devices simply cannot be used with ub anyway. + */ + if (ub_sync_tur(lun->udev, lun) != 0) { + lun->changed = 1; + return 1; + } + + return lun->changed; +} + +static struct block_device_operations ub_bd_fops = { + .owner = THIS_MODULE, + .open = ub_bd_open, + .release = ub_bd_release, + .locked_ioctl = ub_bd_ioctl, + .media_changed = ub_bd_media_changed, + .revalidate_disk = ub_bd_revalidate, +}; + +/* + * Common ->done routine for commands executed synchronously. + */ +static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) +{ + struct completion *cop = cmd->back; + complete(cop); +} + +/* + * Test if the device has a check condition on it, synchronously. + */ +static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) +{ + struct ub_scsi_cmd *cmd; + enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; + unsigned long flags; + struct completion compl; + int rc; + + init_completion(&compl); + + rc = -ENOMEM; + if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) + goto err_alloc; + + cmd->cdb[0] = TEST_UNIT_READY; + cmd->cdb_len = 6; + cmd->dir = UB_DIR_NONE; + cmd->state = UB_CMDST_INIT; + cmd->lun = lun; /* This may be NULL, but that's ok */ + cmd->done = ub_probe_done; + cmd->back = &compl; + + spin_lock_irqsave(sc->lock, flags); + cmd->tag = sc->tagcnt++; + + rc = ub_submit_scsi(sc, cmd); + spin_unlock_irqrestore(sc->lock, flags); + + if (rc != 0) + goto err_submit; + + wait_for_completion(&compl); + + rc = cmd->error; + + if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */ + rc = cmd->key; + +err_submit: + kfree(cmd); +err_alloc: + return rc; +} + +/* + * Read the SCSI capacity synchronously (for probing). + */ +static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, + struct ub_capacity *ret) +{ + struct ub_scsi_cmd *cmd; + struct scatterlist *sg; + char *p; + enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; + unsigned long flags; + unsigned int bsize, shift; + unsigned long nsec; + struct completion compl; + int rc; + + init_completion(&compl); + + rc = -ENOMEM; + if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) + goto err_alloc; + p = (char *)cmd + sizeof(struct ub_scsi_cmd); + + cmd->cdb[0] = 0x25; + cmd->cdb_len = 10; + cmd->dir = UB_DIR_READ; + cmd->state = UB_CMDST_INIT; + cmd->nsg = 1; + sg = &cmd->sgv[0]; + sg_init_table(sg, UB_MAX_REQ_SG); + sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1)); + cmd->len = 8; + cmd->lun = lun; + cmd->done = ub_probe_done; + cmd->back = &compl; + + spin_lock_irqsave(sc->lock, flags); + cmd->tag = sc->tagcnt++; + + rc = ub_submit_scsi(sc, cmd); + spin_unlock_irqrestore(sc->lock, flags); + + if (rc != 0) + goto err_submit; + + wait_for_completion(&compl); + + if (cmd->error != 0) { + rc = -EIO; + goto err_read; + } + if (cmd->act_len != 8) { + rc = -EIO; + goto err_read; + } + + /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */ + nsec = be32_to_cpu(*(__be32 *)p) + 1; + bsize = be32_to_cpu(*(__be32 *)(p + 4)); + switch (bsize) { + case 512: shift = 0; break; + case 1024: shift = 1; break; + case 2048: shift = 2; break; + case 4096: shift = 3; break; + default: + rc = -EDOM; + goto err_inv_bsize; + } + + ret->bsize = bsize; + ret->bshift = shift; + ret->nsec = nsec << shift; + rc = 0; + +err_inv_bsize: +err_read: +err_submit: + kfree(cmd); +err_alloc: + return rc; +} + +/* + */ +static void ub_probe_urb_complete(struct urb *urb) +{ + struct completion *cop = urb->context; + complete(cop); +} + +static void ub_probe_timeout(unsigned long arg) +{ + struct completion *cop = (struct completion *) arg; + complete(cop); +} + +/* + * Reset with a Bulk reset. + */ +static int ub_sync_reset(struct ub_dev *sc) +{ + int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; + struct usb_ctrlrequest *cr; + struct completion compl; + struct timer_list timer; + int rc; + + init_completion(&compl); + + cr = &sc->work_cr; + cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE; + cr->bRequest = US_BULK_RESET_REQUEST; + cr->wValue = cpu_to_le16(0); + cr->wIndex = cpu_to_le16(ifnum); + cr->wLength = cpu_to_le16(0); + + usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, + (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { + printk(KERN_WARNING + "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc); + return rc; + } + + init_timer(&timer); + timer.function = ub_probe_timeout; + timer.data = (unsigned long) &compl; + timer.expires = jiffies + UB_CTRL_TIMEOUT; + add_timer(&timer); + + wait_for_completion(&compl); + + del_timer_sync(&timer); + usb_kill_urb(&sc->work_urb); + + return sc->work_urb.status; +} + +/* + * Get number of LUNs by the way of Bulk GetMaxLUN command. + */ +static int ub_sync_getmaxlun(struct ub_dev *sc) +{ + int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; + unsigned char *p; + enum { ALLOC_SIZE = 1 }; + struct usb_ctrlrequest *cr; + struct completion compl; + struct timer_list timer; + int nluns; + int rc; + + init_completion(&compl); + + rc = -ENOMEM; + if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) + goto err_alloc; + *p = 55; + + cr = &sc->work_cr; + cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; + cr->bRequest = US_BULK_GET_MAX_LUN; + cr->wValue = cpu_to_le16(0); + cr->wIndex = cpu_to_le16(ifnum); + cr->wLength = cpu_to_le16(1); + + usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, + (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) + goto err_submit; + + init_timer(&timer); + timer.function = ub_probe_timeout; + timer.data = (unsigned long) &compl; + timer.expires = jiffies + UB_CTRL_TIMEOUT; + add_timer(&timer); + + wait_for_completion(&compl); + + del_timer_sync(&timer); + usb_kill_urb(&sc->work_urb); + + if ((rc = sc->work_urb.status) < 0) + goto err_io; + + if (sc->work_urb.actual_length != 1) { + nluns = 0; + } else { + if ((nluns = *p) == 55) { + nluns = 0; + } else { + /* GetMaxLUN returns the maximum LUN number */ + nluns += 1; + if (nluns > UB_MAX_LUNS) + nluns = UB_MAX_LUNS; + } + } + + kfree(p); + return nluns; + +err_io: +err_submit: + kfree(p); +err_alloc: + return rc; +} + +/* + * Clear initial stalls. + */ +static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) +{ + int endp; + struct usb_ctrlrequest *cr; + struct completion compl; + struct timer_list timer; + int rc; + + init_completion(&compl); + + endp = usb_pipeendpoint(stalled_pipe); + if (usb_pipein (stalled_pipe)) + endp |= USB_DIR_IN; + + cr = &sc->work_cr; + cr->bRequestType = USB_RECIP_ENDPOINT; + cr->bRequest = USB_REQ_CLEAR_FEATURE; + cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); + cr->wIndex = cpu_to_le16(endp); + cr->wLength = cpu_to_le16(0); + + usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, + (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); + + if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { + printk(KERN_WARNING + "%s: Unable to submit a probe clear (%d)\n", sc->name, rc); + return rc; + } + + init_timer(&timer); + timer.function = ub_probe_timeout; + timer.data = (unsigned long) &compl; + timer.expires = jiffies + UB_CTRL_TIMEOUT; + add_timer(&timer); + + wait_for_completion(&compl); + + del_timer_sync(&timer); + usb_kill_urb(&sc->work_urb); + + /* reset the endpoint toggle */ + usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0); + + return 0; +} + +/* + * Get the pipe settings. + */ +static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, + struct usb_interface *intf) +{ + struct usb_host_interface *altsetting = intf->cur_altsetting; + struct usb_endpoint_descriptor *ep_in = NULL; + struct usb_endpoint_descriptor *ep_out = NULL; + struct usb_endpoint_descriptor *ep; + int i; + + /* + * Find the endpoints we need. + * We are expecting a minimum of 2 endpoints - in and out (bulk). + * We will ignore any others. + */ + for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { + ep = &altsetting->endpoint[i].desc; + + /* Is it a BULK endpoint? */ + if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) + == USB_ENDPOINT_XFER_BULK) { + /* BULK in or out? */ + if (ep->bEndpointAddress & USB_DIR_IN) { + if (ep_in == NULL) + ep_in = ep; + } else { + if (ep_out == NULL) + ep_out = ep; + } + } + } + + if (ep_in == NULL || ep_out == NULL) { + printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name); + return -ENODEV; + } + + /* Calculate and store the pipe values */ + sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0); + sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0); + sc->send_bulk_pipe = usb_sndbulkpipe(dev, + ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, + ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + + return 0; +} + +/* + * Probing is done in the process context, which allows us to cheat + * and not to build a state machine for the discovery. + */ +static int ub_probe(struct usb_interface *intf, + const struct usb_device_id *dev_id) +{ + struct ub_dev *sc; + int nluns; + int rc; + int i; + + if (usb_usual_check_type(dev_id, USB_US_TYPE_UB)) + return -ENXIO; + + rc = -ENOMEM; + if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) + goto err_core; + sc->lock = ub_next_lock(); + INIT_LIST_HEAD(&sc->luns); + usb_init_urb(&sc->work_urb); + tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); + atomic_set(&sc->poison, 0); + INIT_WORK(&sc->reset_work, ub_reset_task); + init_waitqueue_head(&sc->reset_wait); + + init_timer(&sc->work_timer); + sc->work_timer.data = (unsigned long) sc; + sc->work_timer.function = ub_urb_timeout; + + ub_init_completion(&sc->work_done); + sc->work_done.done = 1; /* A little yuk, but oh well... */ + + sc->dev = interface_to_usbdev(intf); + sc->intf = intf; + // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; + usb_set_intfdata(intf, sc); + usb_get_dev(sc->dev); + /* + * Since we give the interface struct to the block level through + * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent + * oopses on close after a disconnect (kernels 2.6.16 and up). + */ + usb_get_intf(sc->intf); + + snprintf(sc->name, 12, DRV_NAME "(%d.%d)", + sc->dev->bus->busnum, sc->dev->devnum); + + /* XXX Verify that we can handle the device (from descriptors) */ + + if (ub_get_pipes(sc, sc->dev, intf) != 0) + goto err_dev_desc; + + /* + * At this point, all USB initialization is done, do upper layer. + * We really hate halfway initialized structures, so from the + * invariants perspective, this ub_dev is fully constructed at + * this point. + */ + + /* + * This is needed to clear toggles. It is a problem only if we do + * `rmmod ub && modprobe ub` without disconnects, but we like that. + */ +#if 0 /* iPod Mini fails if we do this (big white iPod works) */ + ub_probe_clear_stall(sc, sc->recv_bulk_pipe); + ub_probe_clear_stall(sc, sc->send_bulk_pipe); +#endif + + /* + * The way this is used by the startup code is a little specific. + * A SCSI check causes a USB stall. Our common case code sees it + * and clears the check, after which the device is ready for use. + * But if a check was not present, any command other than + * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE). + * + * If we neglect to clear the SCSI check, the first real command fails + * (which is the capacity readout). We clear that and retry, but why + * causing spurious retries for no reason. + * + * Revalidation may start with its own TEST_UNIT_READY, but that one + * has to succeed, so we clear checks with an additional one here. + * In any case it's not our business how revaliadation is implemented. + */ + for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */ + if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; + if (rc != 0x6) break; + msleep(10); + } + + nluns = 1; + for (i = 0; i < 3; i++) { + if ((rc = ub_sync_getmaxlun(sc)) < 0) + break; + if (rc != 0) { + nluns = rc; + break; + } + msleep(100); + } + + for (i = 0; i < nluns; i++) { + ub_probe_lun(sc, i); + } + return 0; + +err_dev_desc: + usb_set_intfdata(intf, NULL); + usb_put_intf(sc->intf); + usb_put_dev(sc->dev); + kfree(sc); +err_core: + return rc; +} + +static int ub_probe_lun(struct ub_dev *sc, int lnum) +{ + struct ub_lun *lun; + struct request_queue *q; + struct gendisk *disk; + int rc; + + rc = -ENOMEM; + if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) + goto err_alloc; + lun->num = lnum; + + rc = -ENOSR; + if ((lun->id = ub_id_get()) == -1) + goto err_id; + + lun->udev = sc; + + snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", + lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); + + lun->removable = 1; /* XXX Query this from the device */ + lun->changed = 1; /* ub_revalidate clears only */ + ub_revalidate(sc, lun); + + rc = -ENOMEM; + if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL) + goto err_diskalloc; + + sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); + disk->major = UB_MAJOR; + disk->first_minor = lun->id * UB_PARTS_PER_LUN; + disk->fops = &ub_bd_fops; + disk->private_data = lun; + disk->driverfs_dev = &sc->intf->dev; + + rc = -ENOMEM; + if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL) + goto err_blkqinit; + + disk->queue = q; + + blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); + blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); + blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); + blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ + blk_queue_max_sectors(q, UB_MAX_SECTORS); + blk_queue_hardsect_size(q, lun->capacity.bsize); + + lun->disk = disk; + q->queuedata = lun; + list_add(&lun->link, &sc->luns); + + set_capacity(disk, lun->capacity.nsec); + if (lun->removable) + disk->flags |= GENHD_FL_REMOVABLE; + + add_disk(disk); + + return 0; + +err_blkqinit: + put_disk(disk); +err_diskalloc: + ub_id_put(lun->id); +err_id: + kfree(lun); +err_alloc: + return rc; +} + +static void ub_disconnect(struct usb_interface *intf) +{ + struct ub_dev *sc = usb_get_intfdata(intf); + struct ub_lun *lun; + unsigned long flags; + + /* + * Prevent ub_bd_release from pulling the rug from under us. + * XXX This is starting to look like a kref. + * XXX Why not to take this ref at probe time? + */ + spin_lock_irqsave(&ub_lock, flags); + sc->openc++; + spin_unlock_irqrestore(&ub_lock, flags); + + /* + * Fence stall clearings, operations triggered by unlinkings and so on. + * We do not attempt to unlink any URBs, because we do not trust the + * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway. + */ + atomic_set(&sc->poison, 1); + + /* + * Wait for reset to end, if any. + */ + wait_event(sc->reset_wait, !sc->reset); + + /* + * Blow away queued commands. + * + * Actually, this never works, because before we get here + * the HCD terminates outstanding URB(s). It causes our + * SCSI command queue to advance, commands fail to submit, + * and the whole queue drains. So, we just use this code to + * print warnings. + */ + spin_lock_irqsave(sc->lock, flags); + { + struct ub_scsi_cmd *cmd; + int cnt = 0; + while ((cmd = ub_cmdq_peek(sc)) != NULL) { + cmd->error = -ENOTCONN; + cmd->state = UB_CMDST_DONE; + ub_cmdq_pop(sc); + (*cmd->done)(sc, cmd); + cnt++; + } + if (cnt != 0) { + printk(KERN_WARNING "%s: " + "%d was queued after shutdown\n", sc->name, cnt); + } + } + spin_unlock_irqrestore(sc->lock, flags); + + /* + * Unregister the upper layer. + */ + list_for_each_entry(lun, &sc->luns, link) { + del_gendisk(lun->disk); + /* + * I wish I could do: + * queue_flag_set(QUEUE_FLAG_DEAD, q); + * As it is, we rely on our internal poisoning and let + * the upper levels to spin furiously failing all the I/O. + */ + } + + /* + * Testing for -EINPROGRESS is always a bug, so we are bending + * the rules a little. + */ + spin_lock_irqsave(sc->lock, flags); + if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ + printk(KERN_WARNING "%s: " + "URB is active after disconnect\n", sc->name); + } + spin_unlock_irqrestore(sc->lock, flags); + + /* + * There is virtually no chance that other CPU runs a timeout so long + * after ub_urb_complete should have called del_timer, but only if HCD + * didn't forget to deliver a callback on unlink. + */ + del_timer_sync(&sc->work_timer); + + /* + * At this point there must be no commands coming from anyone + * and no URBs left in transit. + */ + + ub_put(sc); +} + +static struct usb_driver ub_driver = { + .name = "ub", + .probe = ub_probe, + .disconnect = ub_disconnect, + .id_table = ub_usb_ids, + .pre_reset = ub_pre_reset, + .post_reset = ub_post_reset, +}; + +static int __init ub_init(void) +{ + int rc; + int i; + + for (i = 0; i < UB_QLOCK_NUM; i++) + spin_lock_init(&ub_qlockv[i]); + + if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) + goto err_regblkdev; + + if ((rc = usb_register(&ub_driver)) != 0) + goto err_register; + + usb_usual_set_present(USB_US_TYPE_UB); + return 0; + +err_register: + unregister_blkdev(UB_MAJOR, DRV_NAME); +err_regblkdev: + return rc; +} + +static void __exit ub_exit(void) +{ + usb_deregister(&ub_driver); + + unregister_blkdev(UB_MAJOR, DRV_NAME); + usb_usual_clear_present(USB_US_TYPE_UB); +} + +module_init(ub_init); +module_exit(ub_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/block/umem.c b/drivers/block/umem.c new file mode 100644 index 0000000..c24e1bd --- /dev/null +++ b/drivers/block/umem.c @@ -0,0 +1,1147 @@ +/* + * mm.c - Micro Memory(tm) PCI memory board block device driver - v2.3 + * + * (C) 2001 San Mehat <nettwerk@valinux.com> + * (C) 2001 Johannes Erdfelt <jerdfelt@valinux.com> + * (C) 2001 NeilBrown <neilb@cse.unsw.edu.au> + * + * This driver for the Micro Memory PCI Memory Module with Battery Backup + * is Copyright Micro Memory Inc 2001-2002. All rights reserved. + * + * This driver is released to the public under the terms of the + * GNU GENERAL PUBLIC LICENSE version 2 + * See the file COPYING for details. + * + * This driver provides a standard block device interface for Micro Memory(tm) + * PCI based RAM boards. + * 10/05/01: Phap Nguyen - Rebuilt the driver + * 10/22/01: Phap Nguyen - v2.1 Added disk partitioning + * 29oct2001:NeilBrown - Use make_request_fn instead of request_fn + * - use stand disk partitioning (so fdisk works). + * 08nov2001:NeilBrown - change driver name from "mm" to "umem" + * - incorporate into main kernel + * 08apr2002:NeilBrown - Move some of interrupt handle to tasklet + * - use spin_lock_bh instead of _irq + * - Never block on make_request. queue + * bh's instead. + * - unregister umem from devfs at mod unload + * - Change version to 2.3 + * 07Nov2001:Phap Nguyen - Select pci read command: 06, 12, 15 (Decimal) + * 07Jan2002: P. Nguyen - Used PCI Memory Write & Invalidate for DMA + * 15May2002:NeilBrown - convert to bio for 2.5 + * 17May2002:NeilBrown - remove init_mem initialisation. Instead detect + * - a sequence of writes that cover the card, and + * - set initialised bit then. + */ + +#undef DEBUG /* #define DEBUG if you want debugging info (pr_debug) */ +#include <linux/fs.h> +#include <linux/bio.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/ioctl.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> + +#include <linux/fcntl.h> /* O_ACCMODE */ +#include <linux/hdreg.h> /* HDIO_GETGEO */ + +#include "umem.h" + +#include <asm/uaccess.h> +#include <asm/io.h> + +#define MM_MAXCARDS 4 +#define MM_RAHEAD 2 /* two sectors */ +#define MM_BLKSIZE 1024 /* 1k blocks */ +#define MM_HARDSECT 512 /* 512-byte hardware sectors */ +#define MM_SHIFT 6 /* max 64 partitions on 4 cards */ + +/* + * Version Information + */ + +#define DRIVER_NAME "umem" +#define DRIVER_VERSION "v2.3" +#define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown" +#define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver" + +static int debug; +/* #define HW_TRACE(x) writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */ +#define HW_TRACE(x) + +#define DEBUG_LED_ON_TRANSFER 0x01 +#define DEBUG_BATTERY_POLLING 0x02 + +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "Debug bitmask"); + +static int pci_read_cmd = 0x0C; /* Read Multiple */ +module_param(pci_read_cmd, int, 0); +MODULE_PARM_DESC(pci_read_cmd, "PCI read command"); + +static int pci_write_cmd = 0x0F; /* Write and Invalidate */ +module_param(pci_write_cmd, int, 0); +MODULE_PARM_DESC(pci_write_cmd, "PCI write command"); + +static int pci_cmds; + +static int major_nr; + +#include <linux/blkdev.h> +#include <linux/blkpg.h> + +struct cardinfo { + struct pci_dev *dev; + + unsigned char __iomem *csr_remap; + unsigned int mm_size; /* size in kbytes */ + + unsigned int init_size; /* initial segment, in sectors, + * that we know to + * have been written + */ + struct bio *bio, *currentbio, **biotail; + int current_idx; + sector_t current_sector; + + struct request_queue *queue; + + struct mm_page { + dma_addr_t page_dma; + struct mm_dma_desc *desc; + int cnt, headcnt; + struct bio *bio, **biotail; + int idx; + } mm_pages[2]; +#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) + + int Active, Ready; + + struct tasklet_struct tasklet; + unsigned int dma_status; + + struct { + int good; + int warned; + unsigned long last_change; + } battery[2]; + + spinlock_t lock; + int check_batteries; + + int flags; +}; + +static struct cardinfo cards[MM_MAXCARDS]; +static struct block_device_operations mm_fops; +static struct timer_list battery_timer; + +static int num_cards; + +static struct gendisk *mm_gendisk[MM_MAXCARDS]; + +static void check_batteries(struct cardinfo *card); + +static int get_userbit(struct cardinfo *card, int bit) +{ + unsigned char led; + + led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); + return led & bit; +} + +static int set_userbit(struct cardinfo *card, int bit, unsigned char state) +{ + unsigned char led; + + led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); + if (state) + led |= bit; + else + led &= ~bit; + writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL); + + return 0; +} + +/* + * NOTE: For the power LED, use the LED_POWER_* macros since they differ + */ +static void set_led(struct cardinfo *card, int shift, unsigned char state) +{ + unsigned char led; + + led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); + if (state == LED_FLIP) + led ^= (1<<shift); + else { + led &= ~(0x03 << shift); + led |= (state << shift); + } + writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL); + +} + +#ifdef MM_DIAG +static void dump_regs(struct cardinfo *card) +{ + unsigned char *p; + int i, i1; + + p = card->csr_remap; + for (i = 0; i < 8; i++) { + printk(KERN_DEBUG "%p ", p); + + for (i1 = 0; i1 < 16; i1++) + printk("%02x ", *p++); + + printk("\n"); + } +} +#endif + +static void dump_dmastat(struct cardinfo *card, unsigned int dmastat) +{ + dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - "); + if (dmastat & DMASCR_ANY_ERR) + printk(KERN_CONT "ANY_ERR "); + if (dmastat & DMASCR_MBE_ERR) + printk(KERN_CONT "MBE_ERR "); + if (dmastat & DMASCR_PARITY_ERR_REP) + printk(KERN_CONT "PARITY_ERR_REP "); + if (dmastat & DMASCR_PARITY_ERR_DET) + printk(KERN_CONT "PARITY_ERR_DET "); + if (dmastat & DMASCR_SYSTEM_ERR_SIG) + printk(KERN_CONT "SYSTEM_ERR_SIG "); + if (dmastat & DMASCR_TARGET_ABT) + printk(KERN_CONT "TARGET_ABT "); + if (dmastat & DMASCR_MASTER_ABT) + printk(KERN_CONT "MASTER_ABT "); + if (dmastat & DMASCR_CHAIN_COMPLETE) + printk(KERN_CONT "CHAIN_COMPLETE "); + if (dmastat & DMASCR_DMA_COMPLETE) + printk(KERN_CONT "DMA_COMPLETE "); + printk("\n"); +} + +/* + * Theory of request handling + * + * Each bio is assigned to one mm_dma_desc - which may not be enough FIXME + * We have two pages of mm_dma_desc, holding about 64 descriptors + * each. These are allocated at init time. + * One page is "Ready" and is either full, or can have request added. + * The other page might be "Active", which DMA is happening on it. + * + * Whenever IO on the active page completes, the Ready page is activated + * and the ex-Active page is clean out and made Ready. + * Otherwise the Ready page is only activated when it becomes full, or + * when mm_unplug_device is called via the unplug_io_fn. + * + * If a request arrives while both pages a full, it is queued, and b_rdev is + * overloaded to record whether it was a read or a write. + * + * The interrupt handler only polls the device to clear the interrupt. + * The processing of the result is done in a tasklet. + */ + +static void mm_start_io(struct cardinfo *card) +{ + /* we have the lock, we know there is + * no IO active, and we know that card->Active + * is set + */ + struct mm_dma_desc *desc; + struct mm_page *page; + int offset; + + /* make the last descriptor end the chain */ + page = &card->mm_pages[card->Active]; + pr_debug("start_io: %d %d->%d\n", + card->Active, page->headcnt, page->cnt - 1); + desc = &page->desc[page->cnt-1]; + + desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN); + desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN); + desc->sem_control_bits = desc->control_bits; + + + if (debug & DEBUG_LED_ON_TRANSFER) + set_led(card, LED_REMOVE, LED_ON); + + desc = &page->desc[page->headcnt]; + writel(0, card->csr_remap + DMA_PCI_ADDR); + writel(0, card->csr_remap + DMA_PCI_ADDR + 4); + + writel(0, card->csr_remap + DMA_LOCAL_ADDR); + writel(0, card->csr_remap + DMA_LOCAL_ADDR + 4); + + writel(0, card->csr_remap + DMA_TRANSFER_SIZE); + writel(0, card->csr_remap + DMA_TRANSFER_SIZE + 4); + + writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR); + writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR + 4); + + offset = ((char *)desc) - ((char *)page->desc); + writel(cpu_to_le32((page->page_dma+offset) & 0xffffffff), + card->csr_remap + DMA_DESCRIPTOR_ADDR); + /* Force the value to u64 before shifting otherwise >> 32 is undefined C + * and on some ports will do nothing ! */ + writel(cpu_to_le32(((u64)page->page_dma)>>32), + card->csr_remap + DMA_DESCRIPTOR_ADDR + 4); + + /* Go, go, go */ + writel(cpu_to_le32(DMASCR_GO | DMASCR_CHAIN_EN | pci_cmds), + card->csr_remap + DMA_STATUS_CTRL); +} + +static int add_bio(struct cardinfo *card); + +static void activate(struct cardinfo *card) +{ + /* if No page is Active, and Ready is + * not empty, then switch Ready page + * to active and start IO. + * Then add any bh's that are available to Ready + */ + + do { + while (add_bio(card)) + ; + + if (card->Active == -1 && + card->mm_pages[card->Ready].cnt > 0) { + card->Active = card->Ready; + card->Ready = 1-card->Ready; + mm_start_io(card); + } + + } while (card->Active == -1 && add_bio(card)); +} + +static inline void reset_page(struct mm_page *page) +{ + page->cnt = 0; + page->headcnt = 0; + page->bio = NULL; + page->biotail = &page->bio; +} + +static void mm_unplug_device(struct request_queue *q) +{ + struct cardinfo *card = q->queuedata; + unsigned long flags; + + spin_lock_irqsave(&card->lock, flags); + if (blk_remove_plug(q)) + activate(card); + spin_unlock_irqrestore(&card->lock, flags); +} + +/* + * If there is room on Ready page, take + * one bh off list and add it. + * return 1 if there was room, else 0. + */ +static int add_bio(struct cardinfo *card) +{ + struct mm_page *p; + struct mm_dma_desc *desc; + dma_addr_t dma_handle; + int offset; + struct bio *bio; + struct bio_vec *vec; + int idx; + int rw; + int len; + + bio = card->currentbio; + if (!bio && card->bio) { + card->currentbio = card->bio; + card->current_idx = card->bio->bi_idx; + card->current_sector = card->bio->bi_sector; + card->bio = card->bio->bi_next; + if (card->bio == NULL) + card->biotail = &card->bio; + card->currentbio->bi_next = NULL; + return 1; + } + if (!bio) + return 0; + idx = card->current_idx; + + rw = bio_rw(bio); + if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) + return 0; + + vec = bio_iovec_idx(bio, idx); + len = vec->bv_len; + dma_handle = pci_map_page(card->dev, + vec->bv_page, + vec->bv_offset, + len, + (rw == READ) ? + PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); + + p = &card->mm_pages[card->Ready]; + desc = &p->desc[p->cnt]; + p->cnt++; + if (p->bio == NULL) + p->idx = idx; + if ((p->biotail) != &bio->bi_next) { + *(p->biotail) = bio; + p->biotail = &(bio->bi_next); + bio->bi_next = NULL; + } + + desc->data_dma_handle = dma_handle; + + desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); + desc->local_addr = cpu_to_le64(card->current_sector << 9); + desc->transfer_size = cpu_to_le32(len); + offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); + desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); + desc->zero1 = desc->zero2 = 0; + offset = (((char *)(desc+1)) - ((char *)p->desc)); + desc->next_desc_addr = cpu_to_le64(p->page_dma+offset); + desc->control_bits = cpu_to_le32(DMASCR_GO|DMASCR_ERR_INT_EN| + DMASCR_PARITY_INT_EN| + DMASCR_CHAIN_EN | + DMASCR_SEM_EN | + pci_cmds); + if (rw == WRITE) + desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); + desc->sem_control_bits = desc->control_bits; + + card->current_sector += (len >> 9); + idx++; + card->current_idx = idx; + if (idx >= bio->bi_vcnt) + card->currentbio = NULL; + + return 1; +} + +static void process_page(unsigned long data) +{ + /* check if any of the requests in the page are DMA_COMPLETE, + * and deal with them appropriately. + * If we find a descriptor without DMA_COMPLETE in the semaphore, then + * dma must have hit an error on that descriptor, so use dma_status + * instead and assume that all following descriptors must be re-tried. + */ + struct mm_page *page; + struct bio *return_bio = NULL; + struct cardinfo *card = (struct cardinfo *)data; + unsigned int dma_status = card->dma_status; + + spin_lock_bh(&card->lock); + if (card->Active < 0) + goto out_unlock; + page = &card->mm_pages[card->Active]; + + while (page->headcnt < page->cnt) { + struct bio *bio = page->bio; + struct mm_dma_desc *desc = &page->desc[page->headcnt]; + int control = le32_to_cpu(desc->sem_control_bits); + int last = 0; + int idx; + + if (!(control & DMASCR_DMA_COMPLETE)) { + control = dma_status; + last = 1; + } + page->headcnt++; + idx = page->idx; + page->idx++; + if (page->idx >= bio->bi_vcnt) { + page->bio = bio->bi_next; + if (page->bio) + page->idx = page->bio->bi_idx; + } + + pci_unmap_page(card->dev, desc->data_dma_handle, + bio_iovec_idx(bio, idx)->bv_len, + (control & DMASCR_TRANSFER_READ) ? + PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); + if (control & DMASCR_HARD_ERROR) { + /* error */ + clear_bit(BIO_UPTODATE, &bio->bi_flags); + dev_printk(KERN_WARNING, &card->dev->dev, + "I/O error on sector %d/%d\n", + le32_to_cpu(desc->local_addr)>>9, + le32_to_cpu(desc->transfer_size)); + dump_dmastat(card, control); + } else if (test_bit(BIO_RW, &bio->bi_rw) && + le32_to_cpu(desc->local_addr) >> 9 == + card->init_size) { + card->init_size += le32_to_cpu(desc->transfer_size) >> 9; + if (card->init_size >> 1 >= card->mm_size) { + dev_printk(KERN_INFO, &card->dev->dev, + "memory now initialised\n"); + set_userbit(card, MEMORY_INITIALIZED, 1); + } + } + if (bio != page->bio) { + bio->bi_next = return_bio; + return_bio = bio; + } + + if (last) + break; + } + + if (debug & DEBUG_LED_ON_TRANSFER) + set_led(card, LED_REMOVE, LED_OFF); + + if (card->check_batteries) { + card->check_batteries = 0; + check_batteries(card); + } + if (page->headcnt >= page->cnt) { + reset_page(page); + card->Active = -1; + activate(card); + } else { + /* haven't finished with this one yet */ + pr_debug("do some more\n"); + mm_start_io(card); + } + out_unlock: + spin_unlock_bh(&card->lock); + + while (return_bio) { + struct bio *bio = return_bio; + + return_bio = bio->bi_next; + bio->bi_next = NULL; + bio_endio(bio, 0); + } +} + +static int mm_make_request(struct request_queue *q, struct bio *bio) +{ + struct cardinfo *card = q->queuedata; + pr_debug("mm_make_request %llu %u\n", + (unsigned long long)bio->bi_sector, bio->bi_size); + + spin_lock_irq(&card->lock); + *card->biotail = bio; + bio->bi_next = NULL; + card->biotail = &bio->bi_next; + blk_plug_device(q); + spin_unlock_irq(&card->lock); + + return 0; +} + +static irqreturn_t mm_interrupt(int irq, void *__card) +{ + struct cardinfo *card = (struct cardinfo *) __card; + unsigned int dma_status; + unsigned short cfg_status; + +HW_TRACE(0x30); + + dma_status = le32_to_cpu(readl(card->csr_remap + DMA_STATUS_CTRL)); + + if (!(dma_status & (DMASCR_ERROR_MASK | DMASCR_CHAIN_COMPLETE))) { + /* interrupt wasn't for me ... */ + return IRQ_NONE; + } + + /* clear COMPLETION interrupts */ + if (card->flags & UM_FLAG_NO_BYTE_STATUS) + writel(cpu_to_le32(DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE), + card->csr_remap + DMA_STATUS_CTRL); + else + writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16, + card->csr_remap + DMA_STATUS_CTRL + 2); + + /* log errors and clear interrupt status */ + if (dma_status & DMASCR_ANY_ERR) { + unsigned int data_log1, data_log2; + unsigned int addr_log1, addr_log2; + unsigned char stat, count, syndrome, check; + + stat = readb(card->csr_remap + MEMCTRLCMD_ERRSTATUS); + + data_log1 = le32_to_cpu(readl(card->csr_remap + + ERROR_DATA_LOG)); + data_log2 = le32_to_cpu(readl(card->csr_remap + + ERROR_DATA_LOG + 4)); + addr_log1 = le32_to_cpu(readl(card->csr_remap + + ERROR_ADDR_LOG)); + addr_log2 = readb(card->csr_remap + ERROR_ADDR_LOG + 4); + + count = readb(card->csr_remap + ERROR_COUNT); + syndrome = readb(card->csr_remap + ERROR_SYNDROME); + check = readb(card->csr_remap + ERROR_CHECK); + + dump_dmastat(card, dma_status); + + if (stat & 0x01) + dev_printk(KERN_ERR, &card->dev->dev, + "Memory access error detected (err count %d)\n", + count); + if (stat & 0x02) + dev_printk(KERN_ERR, &card->dev->dev, + "Multi-bit EDC error\n"); + + dev_printk(KERN_ERR, &card->dev->dev, + "Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n", + addr_log2, addr_log1, data_log2, data_log1); + dev_printk(KERN_ERR, &card->dev->dev, + "Fault Check 0x%02x, Fault Syndrome 0x%02x\n", + check, syndrome); + + writeb(0, card->csr_remap + ERROR_COUNT); + } + + if (dma_status & DMASCR_PARITY_ERR_REP) { + dev_printk(KERN_ERR, &card->dev->dev, + "PARITY ERROR REPORTED\n"); + pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); + pci_write_config_word(card->dev, PCI_STATUS, cfg_status); + } + + if (dma_status & DMASCR_PARITY_ERR_DET) { + dev_printk(KERN_ERR, &card->dev->dev, + "PARITY ERROR DETECTED\n"); + pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); + pci_write_config_word(card->dev, PCI_STATUS, cfg_status); + } + + if (dma_status & DMASCR_SYSTEM_ERR_SIG) { + dev_printk(KERN_ERR, &card->dev->dev, "SYSTEM ERROR\n"); + pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); + pci_write_config_word(card->dev, PCI_STATUS, cfg_status); + } + + if (dma_status & DMASCR_TARGET_ABT) { + dev_printk(KERN_ERR, &card->dev->dev, "TARGET ABORT\n"); + pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); + pci_write_config_word(card->dev, PCI_STATUS, cfg_status); + } + + if (dma_status & DMASCR_MASTER_ABT) { + dev_printk(KERN_ERR, &card->dev->dev, "MASTER ABORT\n"); + pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); + pci_write_config_word(card->dev, PCI_STATUS, cfg_status); + } + + /* and process the DMA descriptors */ + card->dma_status = dma_status; + tasklet_schedule(&card->tasklet); + +HW_TRACE(0x36); + + return IRQ_HANDLED; +} + +/* + * If both batteries are good, no LED + * If either battery has been warned, solid LED + * If both batteries are bad, flash the LED quickly + * If either battery is bad, flash the LED semi quickly + */ +static void set_fault_to_battery_status(struct cardinfo *card) +{ + if (card->battery[0].good && card->battery[1].good) + set_led(card, LED_FAULT, LED_OFF); + else if (card->battery[0].warned || card->battery[1].warned) + set_led(card, LED_FAULT, LED_ON); + else if (!card->battery[0].good && !card->battery[1].good) + set_led(card, LED_FAULT, LED_FLASH_7_0); + else + set_led(card, LED_FAULT, LED_FLASH_3_5); +} + +static void init_battery_timer(void); + +static int check_battery(struct cardinfo *card, int battery, int status) +{ + if (status != card->battery[battery].good) { + card->battery[battery].good = !card->battery[battery].good; + card->battery[battery].last_change = jiffies; + + if (card->battery[battery].good) { + dev_printk(KERN_ERR, &card->dev->dev, + "Battery %d now good\n", battery + 1); + card->battery[battery].warned = 0; + } else + dev_printk(KERN_ERR, &card->dev->dev, + "Battery %d now FAILED\n", battery + 1); + + return 1; + } else if (!card->battery[battery].good && + !card->battery[battery].warned && + time_after_eq(jiffies, card->battery[battery].last_change + + (HZ * 60 * 60 * 5))) { + dev_printk(KERN_ERR, &card->dev->dev, + "Battery %d still FAILED after 5 hours\n", battery + 1); + card->battery[battery].warned = 1; + + return 1; + } + + return 0; +} + +static void check_batteries(struct cardinfo *card) +{ + /* NOTE: this must *never* be called while the card + * is doing (bus-to-card) DMA, or you will need the + * reset switch + */ + unsigned char status; + int ret1, ret2; + + status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY); + if (debug & DEBUG_BATTERY_POLLING) + dev_printk(KERN_DEBUG, &card->dev->dev, + "checking battery status, 1 = %s, 2 = %s\n", + (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK", + (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK"); + + ret1 = check_battery(card, 0, !(status & BATTERY_1_FAILURE)); + ret2 = check_battery(card, 1, !(status & BATTERY_2_FAILURE)); + + if (ret1 || ret2) + set_fault_to_battery_status(card); +} + +static void check_all_batteries(unsigned long ptr) +{ + int i; + + for (i = 0; i < num_cards; i++) + if (!(cards[i].flags & UM_FLAG_NO_BATT)) { + struct cardinfo *card = &cards[i]; + spin_lock_bh(&card->lock); + if (card->Active >= 0) + card->check_batteries = 1; + else + check_batteries(card); + spin_unlock_bh(&card->lock); + } + + init_battery_timer(); +} + +static void init_battery_timer(void) +{ + init_timer(&battery_timer); + battery_timer.function = check_all_batteries; + battery_timer.expires = jiffies + (HZ * 60); + add_timer(&battery_timer); +} + +static void del_battery_timer(void) +{ + del_timer(&battery_timer); +} + +/* + * Note no locks taken out here. In a worst case scenario, we could drop + * a chunk of system memory. But that should never happen, since validation + * happens at open or mount time, when locks are held. + * + * That's crap, since doing that while some partitions are opened + * or mounted will give you really nasty results. + */ +static int mm_revalidate(struct gendisk *disk) +{ + struct cardinfo *card = disk->private_data; + set_capacity(disk, card->mm_size << 1); + return 0; +} + +static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct cardinfo *card = bdev->bd_disk->private_data; + int size = card->mm_size * (1024 / MM_HARDSECT); + + /* + * get geometry: we have to fake one... trim the size to a + * multiple of 2048 (1M): tell we have 32 sectors, 64 heads, + * whatever cylinders. + */ + geo->heads = 64; + geo->sectors = 32; + geo->cylinders = size / (geo->heads * geo->sectors); + return 0; +} + +/* + * Future support for removable devices + */ +static int mm_check_change(struct gendisk *disk) +{ +/* struct cardinfo *dev = disk->private_data; */ + return 0; +} + +static struct block_device_operations mm_fops = { + .owner = THIS_MODULE, + .getgeo = mm_getgeo, + .revalidate_disk = mm_revalidate, + .media_changed = mm_check_change, +}; + +static int __devinit mm_pci_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + int ret = -ENODEV; + struct cardinfo *card = &cards[num_cards]; + unsigned char mem_present; + unsigned char batt_status; + unsigned int saved_bar, data; + unsigned long csr_base; + unsigned long csr_len; + int magic_number; + static int printed_version; + + if (!printed_version++) + printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n"); + + ret = pci_enable_device(dev); + if (ret) + return ret; + + pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8); + pci_set_master(dev); + + card->dev = dev; + + csr_base = pci_resource_start(dev, 0); + csr_len = pci_resource_len(dev, 0); + if (!csr_base || !csr_len) + return -ENODEV; + + dev_printk(KERN_INFO, &dev->dev, + "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n"); + + if (pci_set_dma_mask(dev, DMA_64BIT_MASK) && + pci_set_dma_mask(dev, DMA_32BIT_MASK)) { + dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n"); + return -ENOMEM; + } + + ret = pci_request_regions(dev, DRIVER_NAME); + if (ret) { + dev_printk(KERN_ERR, &card->dev->dev, + "Unable to request memory region\n"); + goto failed_req_csr; + } + + card->csr_remap = ioremap_nocache(csr_base, csr_len); + if (!card->csr_remap) { + dev_printk(KERN_ERR, &card->dev->dev, + "Unable to remap memory region\n"); + ret = -ENOMEM; + + goto failed_remap_csr; + } + + dev_printk(KERN_INFO, &card->dev->dev, + "CSR 0x%08lx -> 0x%p (0x%lx)\n", + csr_base, card->csr_remap, csr_len); + + switch (card->dev->device) { + case 0x5415: + card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG; + magic_number = 0x59; + break; + + case 0x5425: + card->flags |= UM_FLAG_NO_BYTE_STATUS; + magic_number = 0x5C; + break; + + case 0x6155: + card->flags |= UM_FLAG_NO_BYTE_STATUS | + UM_FLAG_NO_BATTREG | UM_FLAG_NO_BATT; + magic_number = 0x99; + break; + + default: + magic_number = 0x100; + break; + } + + if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) { + dev_printk(KERN_ERR, &card->dev->dev, "Magic number invalid\n"); + ret = -ENOMEM; + goto failed_magic; + } + + card->mm_pages[0].desc = pci_alloc_consistent(card->dev, + PAGE_SIZE * 2, + &card->mm_pages[0].page_dma); + card->mm_pages[1].desc = pci_alloc_consistent(card->dev, + PAGE_SIZE * 2, + &card->mm_pages[1].page_dma); + if (card->mm_pages[0].desc == NULL || + card->mm_pages[1].desc == NULL) { + dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n"); + goto failed_alloc; + } + reset_page(&card->mm_pages[0]); + reset_page(&card->mm_pages[1]); + card->Ready = 0; /* page 0 is ready */ + card->Active = -1; /* no page is active */ + card->bio = NULL; + card->biotail = &card->bio; + + card->queue = blk_alloc_queue(GFP_KERNEL); + if (!card->queue) + goto failed_alloc; + + blk_queue_make_request(card->queue, mm_make_request); + card->queue->queuedata = card; + card->queue->unplug_fn = mm_unplug_device; + + tasklet_init(&card->tasklet, process_page, (unsigned long)card); + + card->check_batteries = 0; + + mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY); + switch (mem_present) { + case MEM_128_MB: + card->mm_size = 1024 * 128; + break; + case MEM_256_MB: + card->mm_size = 1024 * 256; + break; + case MEM_512_MB: + card->mm_size = 1024 * 512; + break; + case MEM_1_GB: + card->mm_size = 1024 * 1024; + break; + case MEM_2_GB: + card->mm_size = 1024 * 2048; + break; + default: + card->mm_size = 0; + break; + } + + /* Clear the LED's we control */ + set_led(card, LED_REMOVE, LED_OFF); + set_led(card, LED_FAULT, LED_OFF); + + batt_status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY); + + card->battery[0].good = !(batt_status & BATTERY_1_FAILURE); + card->battery[1].good = !(batt_status & BATTERY_2_FAILURE); + card->battery[0].last_change = card->battery[1].last_change = jiffies; + + if (card->flags & UM_FLAG_NO_BATT) + dev_printk(KERN_INFO, &card->dev->dev, + "Size %d KB\n", card->mm_size); + else { + dev_printk(KERN_INFO, &card->dev->dev, + "Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n", + card->mm_size, + batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled", + card->battery[0].good ? "OK" : "FAILURE", + batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled", + card->battery[1].good ? "OK" : "FAILURE"); + + set_fault_to_battery_status(card); + } + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &saved_bar); + data = 0xffffffff; + pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, data); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &data); + pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, saved_bar); + data &= 0xfffffff0; + data = ~data; + data += 1; + + if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME, + card)) { + dev_printk(KERN_ERR, &card->dev->dev, + "Unable to allocate IRQ\n"); + ret = -ENODEV; + goto failed_req_irq; + } + + dev_printk(KERN_INFO, &card->dev->dev, + "Window size %d bytes, IRQ %d\n", data, dev->irq); + + spin_lock_init(&card->lock); + + pci_set_drvdata(dev, card); + + if (pci_write_cmd != 0x0F) /* If not Memory Write & Invalidate */ + pci_write_cmd = 0x07; /* then Memory Write command */ + + if (pci_write_cmd & 0x08) { /* use Memory Write and Invalidate */ + unsigned short cfg_command; + pci_read_config_word(dev, PCI_COMMAND, &cfg_command); + cfg_command |= 0x10; /* Memory Write & Invalidate Enable */ + pci_write_config_word(dev, PCI_COMMAND, cfg_command); + } + pci_cmds = (pci_read_cmd << 28) | (pci_write_cmd << 24); + + num_cards++; + + if (!get_userbit(card, MEMORY_INITIALIZED)) { + dev_printk(KERN_INFO, &card->dev->dev, + "memory NOT initialized. Consider over-writing whole device.\n"); + card->init_size = 0; + } else { + dev_printk(KERN_INFO, &card->dev->dev, + "memory already initialized\n"); + card->init_size = card->mm_size; + } + + /* Enable ECC */ + writeb(EDC_STORE_CORRECT, card->csr_remap + MEMCTRLCMD_ERRCTRL); + + return 0; + + failed_req_irq: + failed_alloc: + if (card->mm_pages[0].desc) + pci_free_consistent(card->dev, PAGE_SIZE*2, + card->mm_pages[0].desc, + card->mm_pages[0].page_dma); + if (card->mm_pages[1].desc) + pci_free_consistent(card->dev, PAGE_SIZE*2, + card->mm_pages[1].desc, + card->mm_pages[1].page_dma); + failed_magic: + iounmap(card->csr_remap); + failed_remap_csr: + pci_release_regions(dev); + failed_req_csr: + + return ret; +} + +static void mm_pci_remove(struct pci_dev *dev) +{ + struct cardinfo *card = pci_get_drvdata(dev); + + tasklet_kill(&card->tasklet); + free_irq(dev->irq, card); + iounmap(card->csr_remap); + + if (card->mm_pages[0].desc) + pci_free_consistent(card->dev, PAGE_SIZE*2, + card->mm_pages[0].desc, + card->mm_pages[0].page_dma); + if (card->mm_pages[1].desc) + pci_free_consistent(card->dev, PAGE_SIZE*2, + card->mm_pages[1].desc, + card->mm_pages[1].page_dma); + blk_cleanup_queue(card->queue); + + pci_release_regions(dev); + pci_disable_device(dev); +} + +static const struct pci_device_id mm_pci_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5415CN)}, + {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5425CN)}, + {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_6155)}, + { + .vendor = 0x8086, + .device = 0xB555, + .subvendor = 0x1332, + .subdevice = 0x5460, + .class = 0x050000, + .class_mask = 0, + }, { /* end: all zeroes */ } +}; + +MODULE_DEVICE_TABLE(pci, mm_pci_ids); + +static struct pci_driver mm_pci_driver = { + .name = DRIVER_NAME, + .id_table = mm_pci_ids, + .probe = mm_pci_probe, + .remove = mm_pci_remove, +}; + +static int __init mm_init(void) +{ + int retval, i; + int err; + + retval = pci_register_driver(&mm_pci_driver); + if (retval) + return -ENOMEM; + + err = major_nr = register_blkdev(0, DRIVER_NAME); + if (err < 0) { + pci_unregister_driver(&mm_pci_driver); + return -EIO; + } + + for (i = 0; i < num_cards; i++) { + mm_gendisk[i] = alloc_disk(1 << MM_SHIFT); + if (!mm_gendisk[i]) + goto out; + } + + for (i = 0; i < num_cards; i++) { + struct gendisk *disk = mm_gendisk[i]; + sprintf(disk->disk_name, "umem%c", 'a'+i); + spin_lock_init(&cards[i].lock); + disk->major = major_nr; + disk->first_minor = i << MM_SHIFT; + disk->fops = &mm_fops; + disk->private_data = &cards[i]; + disk->queue = cards[i].queue; + set_capacity(disk, cards[i].mm_size << 1); + add_disk(disk); + } + + init_battery_timer(); + printk(KERN_INFO "MM: desc_per_page = %ld\n", DESC_PER_PAGE); +/* printk("mm_init: Done. 10-19-01 9:00\n"); */ + return 0; + +out: + pci_unregister_driver(&mm_pci_driver); + unregister_blkdev(major_nr, DRIVER_NAME); + while (i--) + put_disk(mm_gendisk[i]); + return -ENOMEM; +} + +static void __exit mm_cleanup(void) +{ + int i; + + del_battery_timer(); + + for (i = 0; i < num_cards ; i++) { + del_gendisk(mm_gendisk[i]); + put_disk(mm_gendisk[i]); + } + + pci_unregister_driver(&mm_pci_driver); + + unregister_blkdev(major_nr, DRIVER_NAME); +} + +module_init(mm_init); +module_exit(mm_cleanup); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/block/umem.h b/drivers/block/umem.h new file mode 100644 index 0000000..375c689 --- /dev/null +++ b/drivers/block/umem.h @@ -0,0 +1,133 @@ + +/* + * This file contains defines for the + * Micro Memory MM5415 + * family PCI Memory Module with Battery Backup. + * + * Copyright Micro Memory INC 2001. All rights reserved. + * Release under the terms of the GNU GENERAL PUBLIC LICENSE version 2. + * See the file COPYING. + */ + +#ifndef _DRIVERS_BLOCK_MM_H +#define _DRIVERS_BLOCK_MM_H + + +#define IRQ_TIMEOUT (1 * HZ) + +/* CSR register definition */ +#define MEMCTRLSTATUS_MAGIC 0x00 +#define MM_MAGIC_VALUE (unsigned char)0x59 + +#define MEMCTRLSTATUS_BATTERY 0x04 +#define BATTERY_1_DISABLED 0x01 +#define BATTERY_1_FAILURE 0x02 +#define BATTERY_2_DISABLED 0x04 +#define BATTERY_2_FAILURE 0x08 + +#define MEMCTRLSTATUS_MEMORY 0x07 +#define MEM_128_MB 0xfe +#define MEM_256_MB 0xfc +#define MEM_512_MB 0xf8 +#define MEM_1_GB 0xf0 +#define MEM_2_GB 0xe0 + +#define MEMCTRLCMD_LEDCTRL 0x08 +#define LED_REMOVE 2 +#define LED_FAULT 4 +#define LED_POWER 6 +#define LED_FLIP 255 +#define LED_OFF 0x00 +#define LED_ON 0x01 +#define LED_FLASH_3_5 0x02 +#define LED_FLASH_7_0 0x03 +#define LED_POWER_ON 0x00 +#define LED_POWER_OFF 0x01 +#define USER_BIT1 0x01 +#define USER_BIT2 0x02 + +#define MEMORY_INITIALIZED USER_BIT1 + +#define MEMCTRLCMD_ERRCTRL 0x0C +#define EDC_NONE_DEFAULT 0x00 +#define EDC_NONE 0x01 +#define EDC_STORE_READ 0x02 +#define EDC_STORE_CORRECT 0x03 + +#define MEMCTRLCMD_ERRCNT 0x0D +#define MEMCTRLCMD_ERRSTATUS 0x0E + +#define ERROR_DATA_LOG 0x20 +#define ERROR_ADDR_LOG 0x28 +#define ERROR_COUNT 0x3D +#define ERROR_SYNDROME 0x3E +#define ERROR_CHECK 0x3F + +#define DMA_PCI_ADDR 0x40 +#define DMA_LOCAL_ADDR 0x48 +#define DMA_TRANSFER_SIZE 0x50 +#define DMA_DESCRIPTOR_ADDR 0x58 +#define DMA_SEMAPHORE_ADDR 0x60 +#define DMA_STATUS_CTRL 0x68 +#define DMASCR_GO 0x00001 +#define DMASCR_TRANSFER_READ 0x00002 +#define DMASCR_CHAIN_EN 0x00004 +#define DMASCR_SEM_EN 0x00010 +#define DMASCR_DMA_COMP_EN 0x00020 +#define DMASCR_CHAIN_COMP_EN 0x00040 +#define DMASCR_ERR_INT_EN 0x00080 +#define DMASCR_PARITY_INT_EN 0x00100 +#define DMASCR_ANY_ERR 0x00800 +#define DMASCR_MBE_ERR 0x01000 +#define DMASCR_PARITY_ERR_REP 0x02000 +#define DMASCR_PARITY_ERR_DET 0x04000 +#define DMASCR_SYSTEM_ERR_SIG 0x08000 +#define DMASCR_TARGET_ABT 0x10000 +#define DMASCR_MASTER_ABT 0x20000 +#define DMASCR_DMA_COMPLETE 0x40000 +#define DMASCR_CHAIN_COMPLETE 0x80000 + +/* +3.SOME PCs HAVE HOST BRIDGES WHICH APPARENTLY DO NOT CORRECTLY HANDLE +READ-LINE (0xE) OR READ-MULTIPLE (0xC) PCI COMMAND CODES DURING DMA +TRANSFERS. IN OTHER SYSTEMS THESE COMMAND CODES WILL CAUSE THE HOST BRIDGE +TO ALLOW LONGER BURSTS DURING DMA READ OPERATIONS. THE UPPER FOUR BITS +(31..28) OF THE DMA CSR HAVE BEEN MADE PROGRAMMABLE, SO THAT EITHER A 0x6, +AN 0xE OR A 0xC CAN BE WRITTEN TO THEM TO SET THE COMMAND CODE USED DURING +DMA READ OPERATIONS. +*/ +#define DMASCR_READ 0x60000000 +#define DMASCR_READLINE 0xE0000000 +#define DMASCR_READMULTI 0xC0000000 + + +#define DMASCR_ERROR_MASK (DMASCR_MASTER_ABT | DMASCR_TARGET_ABT | DMASCR_SYSTEM_ERR_SIG | DMASCR_PARITY_ERR_DET | DMASCR_MBE_ERR | DMASCR_ANY_ERR) +#define DMASCR_HARD_ERROR (DMASCR_MASTER_ABT | DMASCR_TARGET_ABT | DMASCR_SYSTEM_ERR_SIG | DMASCR_PARITY_ERR_DET | DMASCR_MBE_ERR) + +#define WINDOWMAP_WINNUM 0x7B + +#define DMA_READ_FROM_HOST 0 +#define DMA_WRITE_TO_HOST 1 + +struct mm_dma_desc { + __le64 pci_addr; + __le64 local_addr; + __le32 transfer_size; + u32 zero1; + __le64 next_desc_addr; + __le64 sem_addr; + __le32 control_bits; + u32 zero2; + + dma_addr_t data_dma_handle; + + /* Copy of the bits */ + __le64 sem_control_bits; +} __attribute__((aligned(8))); + +/* bits for card->flags */ +#define UM_FLAG_DMA_IN_REGS 1 +#define UM_FLAG_NO_BYTE_STATUS 2 +#define UM_FLAG_NO_BATTREG 4 +#define UM_FLAG_NO_BATT 8 +#endif diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c new file mode 100644 index 0000000..ecccf65 --- /dev/null +++ b/drivers/block/viodasd.c @@ -0,0 +1,814 @@ +/* -*- linux-c -*- + * viodasd.c + * Authors: Dave Boutcher <boutcher@us.ibm.com> + * Ryan Arnold <ryanarn@us.ibm.com> + * Colin Devilbiss <devilbis@us.ibm.com> + * Stephen Rothwell + * + * (C) Copyright 2000-2004 IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * This routine provides access to disk space (termed "DASD" in historical + * IBM terms) owned and managed by an OS/400 partition running on the + * same box as this Linux partition. + * + * All disk operations are performed by sending messages back and forth to + * the OS/400 partition. + */ +#include <linux/major.h> +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/blkdev.h> +#include <linux/genhd.h> +#include <linux/hdreg.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/dma-mapping.h> +#include <linux/completion.h> +#include <linux/device.h> +#include <linux/scatterlist.h> + +#include <asm/uaccess.h> +#include <asm/vio.h> +#include <asm/iseries/hv_types.h> +#include <asm/iseries/hv_lp_event.h> +#include <asm/iseries/hv_lp_config.h> +#include <asm/iseries/vio.h> +#include <asm/firmware.h> + +MODULE_DESCRIPTION("iSeries Virtual DASD"); +MODULE_AUTHOR("Dave Boutcher"); +MODULE_LICENSE("GPL"); + +/* + * We only support 7 partitions per physical disk....so with minor + * numbers 0-255 we get a maximum of 32 disks. + */ +#define VIOD_GENHD_NAME "iseries/vd" + +#define VIOD_VERS "1.64" + +#define VIOD_KERN_WARNING KERN_WARNING "viod: " +#define VIOD_KERN_INFO KERN_INFO "viod: " + +enum { + PARTITION_SHIFT = 3, + MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS, + MAX_DISK_NAME = FIELD_SIZEOF(struct gendisk, disk_name) +}; + +static DEFINE_SPINLOCK(viodasd_spinlock); + +#define VIOMAXREQ 16 + +#define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0]) + +struct viodasd_waitevent { + struct completion com; + int rc; + u16 sub_result; + int max_disk; /* open */ +}; + +static const struct vio_error_entry viodasd_err_table[] = { + { 0x0201, EINVAL, "Invalid Range" }, + { 0x0202, EINVAL, "Invalid Token" }, + { 0x0203, EIO, "DMA Error" }, + { 0x0204, EIO, "Use Error" }, + { 0x0205, EIO, "Release Error" }, + { 0x0206, EINVAL, "Invalid Disk" }, + { 0x0207, EBUSY, "Cant Lock" }, + { 0x0208, EIO, "Already Locked" }, + { 0x0209, EIO, "Already Unlocked" }, + { 0x020A, EIO, "Invalid Arg" }, + { 0x020B, EIO, "Bad IFS File" }, + { 0x020C, EROFS, "Read Only Device" }, + { 0x02FF, EIO, "Internal Error" }, + { 0x0000, 0, NULL }, +}; + +/* + * Figure out the biggest I/O request (in sectors) we can accept + */ +#define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA) + +/* + * Number of disk I/O requests we've sent to OS/400 + */ +static int num_req_outstanding; + +/* + * This is our internal structure for keeping track of disk devices + */ +struct viodasd_device { + u16 cylinders; + u16 tracks; + u16 sectors; + u16 bytes_per_sector; + u64 size; + int read_only; + spinlock_t q_lock; + struct gendisk *disk; + struct device *dev; +} viodasd_devices[MAX_DISKNO]; + +/* + * External open entry point. + */ +static int viodasd_open(struct block_device *bdev, fmode_t mode) +{ + struct viodasd_device *d = bdev->bd_disk->private_data; + HvLpEvent_Rc hvrc; + struct viodasd_waitevent we; + u16 flags = 0; + + if (d->read_only) { + if (mode & FMODE_WRITE) + return -EROFS; + flags = vioblockflags_ro; + } + + init_completion(&we.com); + + /* Send the open event to OS/400 */ + hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, + HvLpEvent_Type_VirtualIo, + viomajorsubtype_blockio | vioblockopen, + HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, + viopath_sourceinst(viopath_hostLp), + viopath_targetinst(viopath_hostLp), + (u64)(unsigned long)&we, VIOVERSION << 16, + ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32), + 0, 0, 0); + if (hvrc != 0) { + printk(VIOD_KERN_WARNING "HV open failed %d\n", (int)hvrc); + return -EIO; + } + + wait_for_completion(&we.com); + + /* Check the return code */ + if (we.rc != 0) { + const struct vio_error_entry *err = + vio_lookup_rc(viodasd_err_table, we.sub_result); + + printk(VIOD_KERN_WARNING + "bad rc opening disk: %d:0x%04x (%s)\n", + (int)we.rc, we.sub_result, err->msg); + return -EIO; + } + + return 0; +} + +/* + * External release entry point. + */ +static int viodasd_release(struct gendisk *disk, fmode_t mode) +{ + struct viodasd_device *d = disk->private_data; + HvLpEvent_Rc hvrc; + + /* Send the event to OS/400. We DON'T expect a response */ + hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, + HvLpEvent_Type_VirtualIo, + viomajorsubtype_blockio | vioblockclose, + HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck, + viopath_sourceinst(viopath_hostLp), + viopath_targetinst(viopath_hostLp), + 0, VIOVERSION << 16, + ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */, + 0, 0, 0); + if (hvrc != 0) + printk(VIOD_KERN_WARNING "HV close call failed %d\n", + (int)hvrc); + return 0; +} + + +/* External ioctl entry point. + */ +static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct gendisk *disk = bdev->bd_disk; + struct viodasd_device *d = disk->private_data; + + geo->sectors = d->sectors ? d->sectors : 32; + geo->heads = d->tracks ? d->tracks : 64; + geo->cylinders = d->cylinders ? d->cylinders : + get_capacity(disk) / (geo->sectors * geo->heads); + + return 0; +} + +/* + * Our file operations table + */ +static struct block_device_operations viodasd_fops = { + .owner = THIS_MODULE, + .open = viodasd_open, + .release = viodasd_release, + .getgeo = viodasd_getgeo, +}; + +/* + * End a request + */ +static void viodasd_end_request(struct request *req, int error, + int num_sectors) +{ + __blk_end_request(req, error, num_sectors << 9); +} + +/* + * Send an actual I/O request to OS/400 + */ +static int send_request(struct request *req) +{ + u64 start; + int direction; + int nsg; + u16 viocmd; + HvLpEvent_Rc hvrc; + struct vioblocklpevent *bevent; + struct HvLpEvent *hev; + struct scatterlist sg[VIOMAXBLOCKDMA]; + int sgindex; + struct viodasd_device *d; + unsigned long flags; + + start = (u64)req->sector << 9; + + if (rq_data_dir(req) == READ) { + direction = DMA_FROM_DEVICE; + viocmd = viomajorsubtype_blockio | vioblockread; + } else { + direction = DMA_TO_DEVICE; + viocmd = viomajorsubtype_blockio | vioblockwrite; + } + + d = req->rq_disk->private_data; + + /* Now build the scatter-gather list */ + sg_init_table(sg, VIOMAXBLOCKDMA); + nsg = blk_rq_map_sg(req->q, req, sg); + nsg = dma_map_sg(d->dev, sg, nsg, direction); + + spin_lock_irqsave(&viodasd_spinlock, flags); + num_req_outstanding++; + + /* This optimization handles a single DMA block */ + if (nsg == 1) + hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, + HvLpEvent_Type_VirtualIo, viocmd, + HvLpEvent_AckInd_DoAck, + HvLpEvent_AckType_ImmediateAck, + viopath_sourceinst(viopath_hostLp), + viopath_targetinst(viopath_hostLp), + (u64)(unsigned long)req, VIOVERSION << 16, + ((u64)DEVICE_NO(d) << 48), start, + ((u64)sg_dma_address(&sg[0])) << 32, + sg_dma_len(&sg[0])); + else { + bevent = (struct vioblocklpevent *) + vio_get_event_buffer(viomajorsubtype_blockio); + if (bevent == NULL) { + printk(VIOD_KERN_WARNING + "error allocating disk event buffer\n"); + goto error_ret; + } + + /* + * Now build up the actual request. Note that we store + * the pointer to the request in the correlation + * token so we can match the response up later + */ + memset(bevent, 0, sizeof(struct vioblocklpevent)); + hev = &bevent->event; + hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | + HV_LP_EVENT_INT; + hev->xType = HvLpEvent_Type_VirtualIo; + hev->xSubtype = viocmd; + hev->xSourceLp = HvLpConfig_getLpIndex(); + hev->xTargetLp = viopath_hostLp; + hev->xSizeMinus1 = + offsetof(struct vioblocklpevent, u.rw_data.dma_info) + + (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1; + hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp); + hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp); + hev->xCorrelationToken = (u64)req; + bevent->version = VIOVERSION; + bevent->disk = DEVICE_NO(d); + bevent->u.rw_data.offset = start; + + /* + * Copy just the dma information from the sg list + * into the request + */ + for (sgindex = 0; sgindex < nsg; sgindex++) { + bevent->u.rw_data.dma_info[sgindex].token = + sg_dma_address(&sg[sgindex]); + bevent->u.rw_data.dma_info[sgindex].len = + sg_dma_len(&sg[sgindex]); + } + + /* Send the request */ + hvrc = HvCallEvent_signalLpEvent(&bevent->event); + vio_free_event_buffer(viomajorsubtype_blockio, bevent); + } + + if (hvrc != HvLpEvent_Rc_Good) { + printk(VIOD_KERN_WARNING + "error sending disk event to OS/400 (rc %d)\n", + (int)hvrc); + goto error_ret; + } + spin_unlock_irqrestore(&viodasd_spinlock, flags); + return 0; + +error_ret: + num_req_outstanding--; + spin_unlock_irqrestore(&viodasd_spinlock, flags); + dma_unmap_sg(d->dev, sg, nsg, direction); + return -1; +} + +/* + * This is the external request processing routine + */ +static void do_viodasd_request(struct request_queue *q) +{ + struct request *req; + + /* + * If we already have the maximum number of requests + * outstanding to OS/400 just bail out. We'll come + * back later. + */ + while (num_req_outstanding < VIOMAXREQ) { + req = elv_next_request(q); + if (req == NULL) + return; + /* dequeue the current request from the queue */ + blkdev_dequeue_request(req); + /* check that request contains a valid command */ + if (!blk_fs_request(req)) { + viodasd_end_request(req, -EIO, req->hard_nr_sectors); + continue; + } + /* Try sending the request */ + if (send_request(req) != 0) + viodasd_end_request(req, -EIO, req->hard_nr_sectors); + } +} + +/* + * Probe a single disk and fill in the viodasd_device structure + * for it. + */ +static int probe_disk(struct viodasd_device *d) +{ + HvLpEvent_Rc hvrc; + struct viodasd_waitevent we; + int dev_no = DEVICE_NO(d); + struct gendisk *g; + struct request_queue *q; + u16 flags = 0; + +retry: + init_completion(&we.com); + + /* Send the open event to OS/400 */ + hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, + HvLpEvent_Type_VirtualIo, + viomajorsubtype_blockio | vioblockopen, + HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, + viopath_sourceinst(viopath_hostLp), + viopath_targetinst(viopath_hostLp), + (u64)(unsigned long)&we, VIOVERSION << 16, + ((u64)dev_no << 48) | ((u64)flags<< 32), + 0, 0, 0); + if (hvrc != 0) { + printk(VIOD_KERN_WARNING "bad rc on HV open %d\n", (int)hvrc); + return 0; + } + + wait_for_completion(&we.com); + + if (we.rc != 0) { + if (flags != 0) + return 0; + /* try again with read only flag set */ + flags = vioblockflags_ro; + goto retry; + } + if (we.max_disk > (MAX_DISKNO - 1)) { + static int warned; + + if (warned == 0) { + warned++; + printk(VIOD_KERN_INFO + "Only examining the first %d " + "of %d disks connected\n", + MAX_DISKNO, we.max_disk + 1); + } + } + + /* Send the close event to OS/400. We DON'T expect a response */ + hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, + HvLpEvent_Type_VirtualIo, + viomajorsubtype_blockio | vioblockclose, + HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck, + viopath_sourceinst(viopath_hostLp), + viopath_targetinst(viopath_hostLp), + 0, VIOVERSION << 16, + ((u64)dev_no << 48) | ((u64)flags << 32), + 0, 0, 0); + if (hvrc != 0) { + printk(VIOD_KERN_WARNING + "bad rc sending event to OS/400 %d\n", (int)hvrc); + return 0; + } + + if (d->dev == NULL) { + /* this is when we reprobe for new disks */ + if (vio_create_viodasd(dev_no) == NULL) { + printk(VIOD_KERN_WARNING + "cannot allocate virtual device for disk %d\n", + dev_no); + return 0; + } + /* + * The vio_create_viodasd will have recursed into this + * routine with d->dev set to the new vio device and + * will finish the setup of the disk below. + */ + return 1; + } + + /* create the request queue for the disk */ + spin_lock_init(&d->q_lock); + q = blk_init_queue(do_viodasd_request, &d->q_lock); + if (q == NULL) { + printk(VIOD_KERN_WARNING "cannot allocate queue for disk %d\n", + dev_no); + return 0; + } + g = alloc_disk(1 << PARTITION_SHIFT); + if (g == NULL) { + printk(VIOD_KERN_WARNING + "cannot allocate disk structure for disk %d\n", + dev_no); + blk_cleanup_queue(q); + return 0; + } + + d->disk = g; + blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA); + blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA); + blk_queue_max_sectors(q, VIODASD_MAXSECTORS); + g->major = VIODASD_MAJOR; + g->first_minor = dev_no << PARTITION_SHIFT; + if (dev_no >= 26) + snprintf(g->disk_name, sizeof(g->disk_name), + VIOD_GENHD_NAME "%c%c", + 'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26)); + else + snprintf(g->disk_name, sizeof(g->disk_name), + VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26)); + g->fops = &viodasd_fops; + g->queue = q; + g->private_data = d; + g->driverfs_dev = d->dev; + set_capacity(g, d->size >> 9); + + printk(VIOD_KERN_INFO "disk %d: %lu sectors (%lu MB) " + "CHS=%d/%d/%d sector size %d%s\n", + dev_no, (unsigned long)(d->size >> 9), + (unsigned long)(d->size >> 20), + (int)d->cylinders, (int)d->tracks, + (int)d->sectors, (int)d->bytes_per_sector, + d->read_only ? " (RO)" : ""); + + /* register us in the global list */ + add_disk(g); + return 1; +} + +/* returns the total number of scatterlist elements converted */ +static int block_event_to_scatterlist(const struct vioblocklpevent *bevent, + struct scatterlist *sg, int *total_len) +{ + int i, numsg; + const struct rw_data *rw_data = &bevent->u.rw_data; + static const int offset = + offsetof(struct vioblocklpevent, u.rw_data.dma_info); + static const int element_size = sizeof(rw_data->dma_info[0]); + + numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size; + if (numsg > VIOMAXBLOCKDMA) + numsg = VIOMAXBLOCKDMA; + + *total_len = 0; + sg_init_table(sg, VIOMAXBLOCKDMA); + for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) { + sg_dma_address(&sg[i]) = rw_data->dma_info[i].token; + sg_dma_len(&sg[i]) = rw_data->dma_info[i].len; + *total_len += rw_data->dma_info[i].len; + } + return i; +} + +/* + * Restart all queues, starting with the one _after_ the disk given, + * thus reducing the chance of starvation of higher numbered disks. + */ +static void viodasd_restart_all_queues_starting_from(int first_index) +{ + int i; + + for (i = first_index + 1; i < MAX_DISKNO; ++i) + if (viodasd_devices[i].disk) + blk_run_queue(viodasd_devices[i].disk->queue); + for (i = 0; i <= first_index; ++i) + if (viodasd_devices[i].disk) + blk_run_queue(viodasd_devices[i].disk->queue); +} + +/* + * For read and write requests, decrement the number of outstanding requests, + * Free the DMA buffers we allocated. + */ +static int viodasd_handle_read_write(struct vioblocklpevent *bevent) +{ + int num_sg, num_sect, pci_direction, total_len; + struct request *req; + struct scatterlist sg[VIOMAXBLOCKDMA]; + struct HvLpEvent *event = &bevent->event; + unsigned long irq_flags; + struct viodasd_device *d; + int error; + spinlock_t *qlock; + + num_sg = block_event_to_scatterlist(bevent, sg, &total_len); + num_sect = total_len >> 9; + if (event->xSubtype == (viomajorsubtype_blockio | vioblockread)) + pci_direction = DMA_FROM_DEVICE; + else + pci_direction = DMA_TO_DEVICE; + req = (struct request *)bevent->event.xCorrelationToken; + d = req->rq_disk->private_data; + + dma_unmap_sg(d->dev, sg, num_sg, pci_direction); + + /* + * Since this is running in interrupt mode, we need to make sure + * we're not stepping on any global I/O operations + */ + spin_lock_irqsave(&viodasd_spinlock, irq_flags); + num_req_outstanding--; + spin_unlock_irqrestore(&viodasd_spinlock, irq_flags); + + error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO; + if (error) { + const struct vio_error_entry *err; + err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); + printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n", + event->xRc, bevent->sub_result, err->msg); + num_sect = req->hard_nr_sectors; + } + qlock = req->q->queue_lock; + spin_lock_irqsave(qlock, irq_flags); + viodasd_end_request(req, error, num_sect); + spin_unlock_irqrestore(qlock, irq_flags); + + /* Finally, try to get more requests off of this device's queue */ + viodasd_restart_all_queues_starting_from(DEVICE_NO(d)); + + return 0; +} + +/* This routine handles incoming block LP events */ +static void handle_block_event(struct HvLpEvent *event) +{ + struct vioblocklpevent *bevent = (struct vioblocklpevent *)event; + struct viodasd_waitevent *pwe; + + if (event == NULL) + /* Notification that a partition went away! */ + return; + /* First, we should NEVER get an int here...only acks */ + if (hvlpevent_is_int(event)) { + printk(VIOD_KERN_WARNING + "Yikes! got an int in viodasd event handler!\n"); + if (hvlpevent_need_ack(event)) { + event->xRc = HvLpEvent_Rc_InvalidSubtype; + HvCallEvent_ackLpEvent(event); + } + } + + switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) { + case vioblockopen: + /* + * Handle a response to an open request. We get all the + * disk information in the response, so update it. The + * correlation token contains a pointer to a waitevent + * structure that has a completion in it. update the + * return code in the waitevent structure and post the + * completion to wake up the guy who sent the request + */ + pwe = (struct viodasd_waitevent *)event->xCorrelationToken; + pwe->rc = event->xRc; + pwe->sub_result = bevent->sub_result; + if (event->xRc == HvLpEvent_Rc_Good) { + const struct open_data *data = &bevent->u.open_data; + struct viodasd_device *device = + &viodasd_devices[bevent->disk]; + device->read_only = + bevent->flags & vioblockflags_ro; + device->size = data->disk_size; + device->cylinders = data->cylinders; + device->tracks = data->tracks; + device->sectors = data->sectors; + device->bytes_per_sector = data->bytes_per_sector; + pwe->max_disk = data->max_disk; + } + complete(&pwe->com); + break; + case vioblockclose: + break; + case vioblockread: + case vioblockwrite: + viodasd_handle_read_write(bevent); + break; + + default: + printk(VIOD_KERN_WARNING "invalid subtype!"); + if (hvlpevent_need_ack(event)) { + event->xRc = HvLpEvent_Rc_InvalidSubtype; + HvCallEvent_ackLpEvent(event); + } + } +} + +/* + * Get the driver to reprobe for more disks. + */ +static ssize_t probe_disks(struct device_driver *drv, const char *buf, + size_t count) +{ + struct viodasd_device *d; + + for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) { + if (d->disk == NULL) + probe_disk(d); + } + return count; +} +static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks); + +static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct viodasd_device *d = &viodasd_devices[vdev->unit_address]; + + d->dev = &vdev->dev; + if (!probe_disk(d)) + return -ENODEV; + return 0; +} + +static int viodasd_remove(struct vio_dev *vdev) +{ + struct viodasd_device *d; + + d = &viodasd_devices[vdev->unit_address]; + if (d->disk) { + del_gendisk(d->disk); + blk_cleanup_queue(d->disk->queue); + put_disk(d->disk); + d->disk = NULL; + } + d->dev = NULL; + return 0; +} + +/** + * viodasd_device_table: Used by vio.c to match devices that we + * support. + */ +static struct vio_device_id viodasd_device_table[] __devinitdata = { + { "block", "IBM,iSeries-viodasd" }, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, viodasd_device_table); + +static struct vio_driver viodasd_driver = { + .id_table = viodasd_device_table, + .probe = viodasd_probe, + .remove = viodasd_remove, + .driver = { + .name = "viodasd", + .owner = THIS_MODULE, + } +}; + +static int need_delete_probe; + +/* + * Initialize the whole device driver. Handle module and non-module + * versions + */ +static int __init viodasd_init(void) +{ + int rc; + + if (!firmware_has_feature(FW_FEATURE_ISERIES)) { + rc = -ENODEV; + goto early_fail; + } + + /* Try to open to our host lp */ + if (viopath_hostLp == HvLpIndexInvalid) + vio_set_hostlp(); + + if (viopath_hostLp == HvLpIndexInvalid) { + printk(VIOD_KERN_WARNING "invalid hosting partition\n"); + rc = -EIO; + goto early_fail; + } + + printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n", + viopath_hostLp); + + /* register the block device */ + rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); + if (rc) { + printk(VIOD_KERN_WARNING + "Unable to get major number %d for %s\n", + VIODASD_MAJOR, VIOD_GENHD_NAME); + goto early_fail; + } + /* Actually open the path to the hosting partition */ + rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, + VIOMAXREQ + 2); + if (rc) { + printk(VIOD_KERN_WARNING + "error opening path to host partition %d\n", + viopath_hostLp); + goto unregister_blk; + } + + /* Initialize our request handler */ + vio_setHandler(viomajorsubtype_blockio, handle_block_event); + + rc = vio_register_driver(&viodasd_driver); + if (rc) { + printk(VIOD_KERN_WARNING "vio_register_driver failed\n"); + goto unset_handler; + } + + /* + * If this call fails, it just means that we cannot dynamically + * add virtual disks, but the driver will still work fine for + * all existing disk, so ignore the failure. + */ + if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe)) + need_delete_probe = 1; + + return 0; + +unset_handler: + vio_clearHandler(viomajorsubtype_blockio); + viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2); +unregister_blk: + unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); +early_fail: + return rc; +} +module_init(viodasd_init); + +void __exit viodasd_exit(void) +{ + if (need_delete_probe) + driver_remove_file(&viodasd_driver.driver, &driver_attr_probe); + vio_unregister_driver(&viodasd_driver); + vio_clearHandler(viomajorsubtype_blockio); + viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2); + unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME); +} +module_exit(viodasd_exit); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c new file mode 100644 index 0000000..85d79a0 --- /dev/null +++ b/drivers/block/virtio_blk.c @@ -0,0 +1,372 @@ +//#define DEBUG +#include <linux/spinlock.h> +#include <linux/blkdev.h> +#include <linux/hdreg.h> +#include <linux/virtio.h> +#include <linux/virtio_blk.h> +#include <linux/scatterlist.h> + +#define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS) +#define PART_BITS 4 + +static int major, index; + +struct virtio_blk +{ + spinlock_t lock; + + struct virtio_device *vdev; + struct virtqueue *vq; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Request tracking. */ + struct list_head reqs; + + mempool_t *pool; + + /* Scatterlist: can be too big for stack. */ + struct scatterlist sg[VIRTIO_MAX_SG]; +}; + +struct virtblk_req +{ + struct list_head list; + struct request *req; + struct virtio_blk_outhdr out_hdr; + u8 status; +}; + +static void blk_done(struct virtqueue *vq) +{ + struct virtio_blk *vblk = vq->vdev->priv; + struct virtblk_req *vbr; + unsigned int len; + unsigned long flags; + + spin_lock_irqsave(&vblk->lock, flags); + while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { + int error; + switch (vbr->status) { + case VIRTIO_BLK_S_OK: + error = 0; + break; + case VIRTIO_BLK_S_UNSUPP: + error = -ENOTTY; + break; + default: + error = -EIO; + break; + } + + __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req)); + list_del(&vbr->list); + mempool_free(vbr, vblk->pool); + } + /* In case queue is stopped waiting for more buffers. */ + blk_start_queue(vblk->disk->queue); + spin_unlock_irqrestore(&vblk->lock, flags); +} + +static bool do_req(struct request_queue *q, struct virtio_blk *vblk, + struct request *req) +{ + unsigned long num, out, in; + struct virtblk_req *vbr; + + vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); + if (!vbr) + /* When another request finishes we'll try again. */ + return false; + + vbr->req = req; + if (blk_fs_request(vbr->req)) { + vbr->out_hdr.type = 0; + vbr->out_hdr.sector = vbr->req->sector; + vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); + } else if (blk_pc_request(vbr->req)) { + vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; + vbr->out_hdr.sector = 0; + vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); + } else { + /* We don't put anything else in the queue. */ + BUG(); + } + + if (blk_barrier_rq(vbr->req)) + vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; + + /* This init could be done at vblk creation time */ + sg_init_table(vblk->sg, VIRTIO_MAX_SG); + sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); + num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); + sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status)); + + if (rq_data_dir(vbr->req) == WRITE) { + vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; + out = 1 + num; + in = 1; + } else { + vbr->out_hdr.type |= VIRTIO_BLK_T_IN; + out = 1; + in = 1 + num; + } + + if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { + mempool_free(vbr, vblk->pool); + return false; + } + + list_add_tail(&vbr->list, &vblk->reqs); + return true; +} + +static void do_virtblk_request(struct request_queue *q) +{ + struct virtio_blk *vblk = NULL; + struct request *req; + unsigned int issued = 0; + + while ((req = elv_next_request(q)) != NULL) { + vblk = req->rq_disk->private_data; + BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg)); + + /* If this request fails, stop queue and wait for something to + finish to restart it. */ + if (!do_req(q, vblk, req)) { + blk_stop_queue(q); + break; + } + blkdev_dequeue_request(req); + issued++; + } + + if (issued) + vblk->vq->vq_ops->kick(vblk->vq); +} + +static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long data) +{ + return scsi_cmd_ioctl(bdev->bd_disk->queue, + bdev->bd_disk, mode, cmd, + (void __user *)data); +} + +/* We provide getgeo only to please some old bootloader/partitioning tools */ +static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) +{ + struct virtio_blk *vblk = bd->bd_disk->private_data; + struct virtio_blk_geometry vgeo; + int err; + + /* see if the host passed in geometry config */ + err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY, + offsetof(struct virtio_blk_config, geometry), + &vgeo); + + if (!err) { + geo->heads = vgeo.heads; + geo->sectors = vgeo.sectors; + geo->cylinders = vgeo.cylinders; + } else { + /* some standard values, similar to sd */ + geo->heads = 1 << 6; + geo->sectors = 1 << 5; + geo->cylinders = get_capacity(bd->bd_disk) >> 11; + } + return 0; +} + +static struct block_device_operations virtblk_fops = { + .locked_ioctl = virtblk_ioctl, + .owner = THIS_MODULE, + .getgeo = virtblk_getgeo, +}; + +static int index_to_minor(int index) +{ + return index << PART_BITS; +} + +static int virtblk_probe(struct virtio_device *vdev) +{ + struct virtio_blk *vblk; + int err; + u64 cap; + u32 v; + u32 blk_size; + + if (index_to_minor(index) >= 1 << MINORBITS) + return -ENOSPC; + + vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); + if (!vblk) { + err = -ENOMEM; + goto out; + } + + INIT_LIST_HEAD(&vblk->reqs); + spin_lock_init(&vblk->lock); + vblk->vdev = vdev; + + /* We expect one virtqueue, for output. */ + vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); + if (IS_ERR(vblk->vq)) { + err = PTR_ERR(vblk->vq); + goto out_free_vblk; + } + + vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); + if (!vblk->pool) { + err = -ENOMEM; + goto out_free_vq; + } + + /* FIXME: How many partitions? How long is a piece of string? */ + vblk->disk = alloc_disk(1 << PART_BITS); + if (!vblk->disk) { + err = -ENOMEM; + goto out_mempool; + } + + vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); + if (!vblk->disk->queue) { + err = -ENOMEM; + goto out_put_disk; + } + + if (index < 26) { + sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); + } else if (index < (26 + 1) * 26) { + sprintf(vblk->disk->disk_name, "vd%c%c", + 'a' + index / 26 - 1, 'a' + index % 26); + } else { + const unsigned int m1 = (index / 26 - 1) / 26 - 1; + const unsigned int m2 = (index / 26 - 1) % 26; + const unsigned int m3 = index % 26; + sprintf(vblk->disk->disk_name, "vd%c%c%c", + 'a' + m1, 'a' + m2, 'a' + m3); + } + + vblk->disk->major = major; + vblk->disk->first_minor = index_to_minor(index); + vblk->disk->private_data = vblk; + vblk->disk->fops = &virtblk_fops; + vblk->disk->driverfs_dev = &vdev->dev; + index++; + + /* If barriers are supported, tell block layer that queue is ordered */ + if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) + blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); + + /* If disk is read-only in the host, the guest should obey */ + if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) + set_disk_ro(vblk->disk, 1); + + /* Host must always specify the capacity. */ + vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), + &cap, sizeof(cap)); + + /* If capacity is too big, truncate with warning. */ + if ((sector_t)cap != cap) { + dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", + (unsigned long long)cap); + cap = (sector_t)-1; + } + set_capacity(vblk->disk, cap); + + /* Host can optionally specify maximum segment size and number of + * segments. */ + err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, + offsetof(struct virtio_blk_config, size_max), + &v); + if (!err) + blk_queue_max_segment_size(vblk->disk->queue, v); + + err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, + offsetof(struct virtio_blk_config, seg_max), + &v); + if (!err) + blk_queue_max_hw_segments(vblk->disk->queue, v); + + /* Host can optionally specify the block size of the device */ + err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, + offsetof(struct virtio_blk_config, blk_size), + &blk_size); + if (!err) + blk_queue_hardsect_size(vblk->disk->queue, blk_size); + + add_disk(vblk->disk); + return 0; + +out_put_disk: + put_disk(vblk->disk); +out_mempool: + mempool_destroy(vblk->pool); +out_free_vq: + vdev->config->del_vq(vblk->vq); +out_free_vblk: + kfree(vblk); +out: + return err; +} + +static void virtblk_remove(struct virtio_device *vdev) +{ + struct virtio_blk *vblk = vdev->priv; + + /* Nothing should be pending. */ + BUG_ON(!list_empty(&vblk->reqs)); + + /* Stop all the virtqueues. */ + vdev->config->reset(vdev); + + del_gendisk(vblk->disk); + blk_cleanup_queue(vblk->disk->queue); + put_disk(vblk->disk); + mempool_destroy(vblk->pool); + vdev->config->del_vq(vblk->vq); + kfree(vblk); +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, + VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, +}; + +static struct virtio_driver virtio_blk = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtblk_probe, + .remove = __devexit_p(virtblk_remove), +}; + +static int __init init(void) +{ + major = register_blkdev(0, "virtblk"); + if (major < 0) + return major; + return register_virtio_driver(&virtio_blk); +} + +static void __exit fini(void) +{ + unregister_blkdev(major, "virtblk"); + unregister_virtio_driver(&virtio_blk); +} +module_init(init); +module_exit(fini); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio block driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/block/xd.c b/drivers/block/xd.c new file mode 100644 index 0000000..64b496f --- /dev/null +++ b/drivers/block/xd.c @@ -0,0 +1,1106 @@ +/* + * This file contains the driver for an XT hard disk controller + * (at least the DTC 5150X) for Linux. + * + * Author: Pat Mackinlay, pat@it.com.au + * Date: 29/09/92 + * + * Revised: 01/01/93, ... + * + * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler, + * kevinf@agora.rain.com) + * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and + * Wim Van Dorst. + * + * Revised: 04/04/94 by Risto Kankkunen + * Moved the detection code from xd_init() to xd_geninit() as it needed + * interrupts enabled and Linus didn't want to enable them in that first + * phase. xd_geninit() is the place to do these kinds of things anyway, + * he says. + * + * Modularized: 04/10/96 by Todd Fries, tfries@umr.edu + * + * Revised: 13/12/97 by Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl + * Fixed some problems with disk initialization and module initiation. + * Added support for manual geometry setting (except Seagate controllers) + * in form: + * xd_geo=<cyl_xda>,<head_xda>,<sec_xda>[,<cyl_xdb>,<head_xdb>,<sec_xdb>] + * Recovered DMA access. Abridged messages. Added support for DTC5051CX, + * WD1002-27X & XEBEC controllers. Driver uses now some jumper settings. + * Extended ioctl() support. + * + * Bugfix: 15/02/01, Paul G. - inform queue layer of tiny xd_maxsect. + * + */ + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/mm.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/timer.h> +#include <linux/genhd.h> +#include <linux/hdreg.h> +#include <linux/ioport.h> +#include <linux/init.h> +#include <linux/wait.h> +#include <linux/blkdev.h> +#include <linux/blkpg.h> +#include <linux/delay.h> +#include <linux/io.h> + +#include <asm/system.h> +#include <asm/uaccess.h> +#include <asm/dma.h> + +#include "xd.h" + +static void __init do_xd_setup (int *integers); +#ifdef MODULE +static int xd[5] = { -1,-1,-1,-1, }; +#endif + +#define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using + "nodma" module option */ +#define XD_INIT_DISK_DELAY (30) /* 30 ms delay during disk initialization */ + +/* Above may need to be increased if a problem with the 2nd drive detection + (ST11M controller) or resetting a controller (WD) appears */ + +static XD_INFO xd_info[XD_MAXDRIVES]; + +/* If you try this driver and find that your card is not detected by the driver at bootup, you need to add your BIOS + signature and details to the following list of signatures. A BIOS signature is a string embedded into the first + few bytes of your controller's on-board ROM BIOS. To find out what yours is, use something like MS-DOS's DEBUG + command. Run DEBUG, and then you can examine your BIOS signature with: + + d xxxx:0000 + + where xxxx is the segment of your controller (like C800 or D000 or something). On the ASCII dump at the right, you should + be able to see a string mentioning the manufacturer's copyright etc. Add this string into the table below. The parameters + in the table are, in order: + + offset ; this is the offset (in bytes) from the start of your ROM where the signature starts + signature ; this is the actual text of the signature + xd_?_init_controller ; this is the controller init routine used by your controller + xd_?_init_drive ; this is the drive init routine used by your controller + + The controllers directly supported at the moment are: DTC 5150x, WD 1004A27X, ST11M/R and override. If your controller is + made by the same manufacturer as one of these, try using the same init routines as they do. If that doesn't work, your + best bet is to use the "override" routines. These routines use a "portable" method of getting the disk's geometry, and + may work with your card. If none of these seem to work, try sending me some email and I'll see what I can do <grin>. + + NOTE: You can now specify your XT controller's parameters from the command line in the form xd=TYPE,IRQ,IO,DMA. The driver + should be able to detect your drive's geometry from this info. (eg: xd=0,5,0x320,3 is the "standard"). */ + +#include <asm/page.h> +#define xd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size)) +#define xd_dma_mem_free(addr, size) free_pages(addr, get_order(size)) +static char *xd_dma_buffer; + +static XD_SIGNATURE xd_sigs[] __initdata = { + { 0x0000,"Override geometry handler",NULL,xd_override_init_drive,"n unknown" }, /* Pat Mackinlay, pat@it.com.au */ + { 0x0008,"[BXD06 (C) DTC 17-MAY-1985]",xd_dtc_init_controller,xd_dtc5150cx_init_drive," DTC 5150CX" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */ + { 0x000B,"CRD18A Not an IBM rom. (C) Copyright Data Technology Corp. 05/31/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Todd Fries, tfries@umr.edu */ + { 0x000B,"CXD23A Not an IBM ROM (C)Copyright Data Technology Corp 12/03/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Pat Mackinlay, pat@it.com.au */ + { 0x0008,"07/15/86(C) Copyright 1986 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. 1002-27X" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */ + { 0x0008,"06/24/88(C) Copyright 1988 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. WDXT-GEN2" }, /* Dan Newcombe, newcombe@aa.csc.peachnet.edu */ + { 0x0015,"SEAGATE ST11 BIOS REVISION",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Salvador Abreu, spa@fct.unl.pt */ + { 0x0010,"ST11R BIOS",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Risto Kankkunen, risto.kankkunen@cs.helsinki.fi */ + { 0x0010,"ST11 BIOS v1.7",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11R" }, /* Alan Hourihane, alanh@fairlite.demon.co.uk */ + { 0x1000,"(c)Copyright 1987 SMS",xd_omti_init_controller,xd_omti_init_drive,"n OMTI 5520" }, /* Dirk Melchers, dirk@merlin.nbg.sub.org */ + { 0x0006,"COPYRIGHT XEBEC (C) 1984",xd_xebec_init_controller,xd_xebec_init_drive," XEBEC" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */ + { 0x0008,"(C) Copyright 1984 Western Digital Corp", xd_wd_init_controller, xd_wd_init_drive," Western Dig. 1002s-wx2" }, + { 0x0008,"(C) Copyright 1986 Western Digital Corporation", xd_wd_init_controller, xd_wd_init_drive," 1986 Western Digital" }, /* jfree@sovereign.org */ +}; + +static unsigned int xd_bases[] __initdata = +{ + 0xC8000, 0xCA000, 0xCC000, + 0xCE000, 0xD0000, 0xD2000, + 0xD4000, 0xD6000, 0xD8000, + 0xDA000, 0xDC000, 0xDE000, + 0xE0000 +}; + +static DEFINE_SPINLOCK(xd_lock); + +static struct gendisk *xd_gendisk[2]; + +static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo); + +static struct block_device_operations xd_fops = { + .owner = THIS_MODULE, + .locked_ioctl = xd_ioctl, + .getgeo = xd_getgeo, +}; +static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int); +static u_char xd_drives, xd_irq = 5, xd_dma = 3, xd_maxsectors; +static u_char xd_override __initdata = 0, xd_type __initdata = 0; +static u_short xd_iobase = 0x320; +static int xd_geo[XD_MAXDRIVES*3] __initdata = { 0, }; + +static volatile int xdc_busy; +static struct timer_list xd_watchdog_int; + +static volatile u_char xd_error; +static int nodma = XD_DONT_USE_DMA; + +static struct request_queue *xd_queue; + +/* xd_init: register the block device number and set up pointer tables */ +static int __init xd_init(void) +{ + u_char i,controller; + unsigned int address; + int err; + +#ifdef MODULE + { + u_char count = 0; + for (i = 4; i > 0; i--) + if (((xd[i] = xd[i-1]) >= 0) && !count) + count = i; + if ((xd[0] = count)) + do_xd_setup(xd); + } +#endif + + init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog; + + if (!xd_dma_buffer) + xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); + if (!xd_dma_buffer) { + printk(KERN_ERR "xd: Out of memory.\n"); + return -ENOMEM; + } + + err = -EBUSY; + if (register_blkdev(XT_DISK_MAJOR, "xd")) + goto out1; + + err = -ENOMEM; + xd_queue = blk_init_queue(do_xd_request, &xd_lock); + if (!xd_queue) + goto out1a; + + if (xd_detect(&controller,&address)) { + + printk("Detected a%s controller (type %d) at address %06x\n", + xd_sigs[controller].name,controller,address); + if (!request_region(xd_iobase,4,"xd")) { + printk("xd: Ports at 0x%x are not available\n", + xd_iobase); + goto out2; + } + if (controller) + xd_sigs[controller].init_controller(address); + xd_drives = xd_initdrives(xd_sigs[controller].init_drive); + + printk("Detected %d hard drive%s (using IRQ%d & DMA%d)\n", + xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma); + } + + err = -ENODEV; + if (!xd_drives) + goto out3; + + for (i = 0; i < xd_drives; i++) { + XD_INFO *p = &xd_info[i]; + struct gendisk *disk = alloc_disk(64); + if (!disk) + goto Enomem; + p->unit = i; + disk->major = XT_DISK_MAJOR; + disk->first_minor = i<<6; + sprintf(disk->disk_name, "xd%c", i+'a'); + disk->fops = &xd_fops; + disk->private_data = p; + disk->queue = xd_queue; + set_capacity(disk, p->heads * p->cylinders * p->sectors); + printk(" %s: CHS=%d/%d/%d\n", disk->disk_name, + p->cylinders, p->heads, p->sectors); + xd_gendisk[i] = disk; + } + + err = -EBUSY; + if (request_irq(xd_irq,xd_interrupt_handler, 0, "XT hard disk", NULL)) { + printk("xd: unable to get IRQ%d\n",xd_irq); + goto out4; + } + + if (request_dma(xd_dma,"xd")) { + printk("xd: unable to get DMA%d\n",xd_dma); + goto out5; + } + + /* xd_maxsectors depends on controller - so set after detection */ + blk_queue_max_sectors(xd_queue, xd_maxsectors); + + for (i = 0; i < xd_drives; i++) + add_disk(xd_gendisk[i]); + + return 0; + +out5: + free_irq(xd_irq, NULL); +out4: + for (i = 0; i < xd_drives; i++) + put_disk(xd_gendisk[i]); +out3: + release_region(xd_iobase,4); +out2: + blk_cleanup_queue(xd_queue); +out1a: + unregister_blkdev(XT_DISK_MAJOR, "xd"); +out1: + if (xd_dma_buffer) + xd_dma_mem_free((unsigned long)xd_dma_buffer, + xd_maxsectors * 0x200); + return err; +Enomem: + err = -ENOMEM; + while (i--) + put_disk(xd_gendisk[i]); + goto out3; +} + +/* xd_detect: scan the possible BIOS ROM locations for the signature strings */ +static u_char __init xd_detect (u_char *controller, unsigned int *address) +{ + int i, j; + + if (xd_override) + { + *controller = xd_type; + *address = 0; + return(1); + } + + for (i = 0; i < ARRAY_SIZE(xd_bases); i++) { + void __iomem *p = ioremap(xd_bases[i], 0x2000); + if (!p) + continue; + for (j = 1; j < ARRAY_SIZE(xd_sigs); j++) { + const char *s = xd_sigs[j].string; + if (check_signature(p + xd_sigs[j].offset, s, strlen(s))) { + *controller = j; + xd_type = j; + *address = xd_bases[i]; + iounmap(p); + return 1; + } + } + iounmap(p); + } + return 0; +} + +/* do_xd_request: handle an incoming request */ +static void do_xd_request (struct request_queue * q) +{ + struct request *req; + + if (xdc_busy) + return; + + while ((req = elv_next_request(q)) != NULL) { + unsigned block = req->sector; + unsigned count = req->nr_sectors; + int rw = rq_data_dir(req); + XD_INFO *disk = req->rq_disk->private_data; + int res = 0; + int retry; + + if (!blk_fs_request(req)) { + end_request(req, 0); + continue; + } + if (block + count > get_capacity(req->rq_disk)) { + end_request(req, 0); + continue; + } + if (rw != READ && rw != WRITE) { + printk("do_xd_request: unknown request\n"); + end_request(req, 0); + continue; + } + for (retry = 0; (retry < XD_RETRIES) && !res; retry++) + res = xd_readwrite(rw, disk, req->buffer, block, count); + end_request(req, res); /* wrap up, 0 = fail, 1 = success */ + } +} + +static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + XD_INFO *p = bdev->bd_disk->private_data; + + geo->heads = p->heads; + geo->sectors = p->sectors; + geo->cylinders = p->cylinders; + return 0; +} + +/* xd_ioctl: handle device ioctl's */ +static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg) +{ + switch (cmd) { + case HDIO_SET_DMA: + if (!capable(CAP_SYS_ADMIN)) return -EACCES; + if (xdc_busy) return -EBUSY; + nodma = !arg; + if (nodma && xd_dma_buffer) { + xd_dma_mem_free((unsigned long)xd_dma_buffer, + xd_maxsectors * 0x200); + xd_dma_buffer = NULL; + } else if (!nodma && !xd_dma_buffer) { + xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); + if (!xd_dma_buffer) { + nodma = XD_DONT_USE_DMA; + return -ENOMEM; + } + } + return 0; + case HDIO_GET_DMA: + return put_user(!nodma, (long __user *) arg); + case HDIO_GET_MULTCOUNT: + return put_user(xd_maxsectors, (long __user *) arg); + default: + return -EINVAL; + } +} + +/* xd_readwrite: handle a read/write request */ +static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count) +{ + int drive = p->unit; + u_char cmdblk[6],sense[4]; + u_short track,cylinder; + u_char head,sector,control,mode = PIO_MODE,temp; + char **real_buffer; + register int i; + +#ifdef DEBUG_READWRITE + printk("xd_readwrite: operation = %s, drive = %d, buffer = 0x%X, block = %d, count = %d\n",operation == READ ? "read" : "write",drive,buffer,block,count); +#endif /* DEBUG_READWRITE */ + + spin_unlock_irq(&xd_lock); + + control = p->control; + if (!xd_dma_buffer) + xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); + while (count) { + temp = count < xd_maxsectors ? count : xd_maxsectors; + + track = block / p->sectors; + head = track % p->heads; + cylinder = track / p->heads; + sector = block % p->sectors; + +#ifdef DEBUG_READWRITE + printk("xd_readwrite: drive = %d, head = %d, cylinder = %d, sector = %d, count = %d\n",drive,head,cylinder,sector,temp); +#endif /* DEBUG_READWRITE */ + + if (xd_dma_buffer) { + mode = xd_setup_dma(operation == READ ? DMA_MODE_READ : DMA_MODE_WRITE,(u_char *)(xd_dma_buffer),temp * 0x200); + real_buffer = &xd_dma_buffer; + for (i=0; i < (temp * 0x200); i++) + xd_dma_buffer[i] = buffer[i]; + } + else + real_buffer = &buffer; + + xd_build(cmdblk,operation == READ ? CMD_READ : CMD_WRITE,drive,head,cylinder,sector,temp & 0xFF,control); + + switch (xd_command(cmdblk,mode,(u_char *)(*real_buffer),(u_char *)(*real_buffer),sense,XD_TIMEOUT)) { + case 1: + printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); + xd_recalibrate(drive); + spin_lock_irq(&xd_lock); + return (0); + case 2: + if (sense[0] & 0x30) { + printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); + switch ((sense[0] & 0x30) >> 4) { + case 0: printk("drive error, code = 0x%X",sense[0] & 0x0F); + break; + case 1: printk("controller error, code = 0x%X",sense[0] & 0x0F); + break; + case 2: printk("command error, code = 0x%X",sense[0] & 0x0F); + break; + case 3: printk("miscellaneous error, code = 0x%X",sense[0] & 0x0F); + break; + } + } + if (sense[0] & 0x80) + printk(" - CHS = %d/%d/%d\n",((sense[2] & 0xC0) << 2) | sense[3],sense[1] & 0x1F,sense[2] & 0x3F); + /* reported drive number = (sense[1] & 0xE0) >> 5 */ + else + printk(" - no valid disk address\n"); + spin_lock_irq(&xd_lock); + return (0); + } + if (xd_dma_buffer) + for (i=0; i < (temp * 0x200); i++) + buffer[i] = xd_dma_buffer[i]; + + count -= temp, buffer += temp * 0x200, block += temp; + } + spin_lock_irq(&xd_lock); + return (1); +} + +/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */ +static void xd_recalibrate (u_char drive) +{ + u_char cmdblk[6]; + + xd_build(cmdblk,CMD_RECALIBRATE,drive,0,0,0,0,0); + if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 8)) + printk("xd%c: warning! error recalibrating, controller may be unstable\n", 'a'+drive); +} + +/* xd_interrupt_handler: interrupt service routine */ +static irqreturn_t xd_interrupt_handler(int irq, void *dev_id) +{ + if (inb(XD_STATUS) & STAT_INTERRUPT) { /* check if it was our device */ +#ifdef DEBUG_OTHER + printk("xd_interrupt_handler: interrupt detected\n"); +#endif /* DEBUG_OTHER */ + outb(0,XD_CONTROL); /* acknowledge interrupt */ + wake_up(&xd_wait_int); /* and wake up sleeping processes */ + return IRQ_HANDLED; + } + else + printk("xd: unexpected interrupt\n"); + return IRQ_NONE; +} + +/* xd_setup_dma: set up the DMA controller for a data transfer */ +static u_char xd_setup_dma (u_char mode,u_char *buffer,u_int count) +{ + unsigned long f; + + if (nodma) + return (PIO_MODE); + if (((unsigned long) buffer & 0xFFFF0000) != (((unsigned long) buffer + count) & 0xFFFF0000)) { +#ifdef DEBUG_OTHER + printk("xd_setup_dma: using PIO, transfer overlaps 64k boundary\n"); +#endif /* DEBUG_OTHER */ + return (PIO_MODE); + } + + f=claim_dma_lock(); + disable_dma(xd_dma); + clear_dma_ff(xd_dma); + set_dma_mode(xd_dma,mode); + set_dma_addr(xd_dma, (unsigned long) buffer); + set_dma_count(xd_dma,count); + + release_dma_lock(f); + + return (DMA_MODE); /* use DMA and INT */ +} + +/* xd_build: put stuff into an array in a format suitable for the controller */ +static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control) +{ + cmdblk[0] = command; + cmdblk[1] = ((drive & 0x07) << 5) | (head & 0x1F); + cmdblk[2] = ((cylinder & 0x300) >> 2) | (sector & 0x3F); + cmdblk[3] = cylinder & 0xFF; + cmdblk[4] = count; + cmdblk[5] = control; + + return (cmdblk); +} + +static void xd_watchdog (unsigned long unused) +{ + xd_error = 1; + wake_up(&xd_wait_int); +} + +/* xd_waitport: waits until port & mask == flags or a timeout occurs. return 1 for a timeout */ +static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout) +{ + u_long expiry = jiffies + timeout; + int success; + + xdc_busy = 1; + while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) + schedule_timeout_uninterruptible(1); + xdc_busy = 0; + return (success); +} + +static inline u_int xd_wait_for_IRQ (void) +{ + unsigned long flags; + xd_watchdog_int.expires = jiffies + 8 * HZ; + add_timer(&xd_watchdog_int); + + flags=claim_dma_lock(); + enable_dma(xd_dma); + release_dma_lock(flags); + + sleep_on(&xd_wait_int); + del_timer(&xd_watchdog_int); + xdc_busy = 0; + + flags=claim_dma_lock(); + disable_dma(xd_dma); + release_dma_lock(flags); + + if (xd_error) { + printk("xd: missed IRQ - command aborted\n"); + xd_error = 0; + return (1); + } + return (0); +} + +/* xd_command: handle all data transfers necessary for a single command */ +static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout) +{ + u_char cmdblk[6],csb,complete = 0; + +#ifdef DEBUG_COMMAND + printk("xd_command: command = 0x%X, mode = 0x%X, indata = 0x%X, outdata = 0x%X, sense = 0x%X\n",command,mode,indata,outdata,sense); +#endif /* DEBUG_COMMAND */ + + outb(0,XD_SELECT); + outb(mode,XD_CONTROL); + + if (xd_waitport(XD_STATUS,STAT_SELECT,STAT_SELECT,timeout)) + return (1); + + while (!complete) { + if (xd_waitport(XD_STATUS,STAT_READY,STAT_READY,timeout)) + return (1); + + switch (inb(XD_STATUS) & (STAT_COMMAND | STAT_INPUT)) { + case 0: + if (mode == DMA_MODE) { + if (xd_wait_for_IRQ()) + return (1); + } else + outb(outdata ? *outdata++ : 0,XD_DATA); + break; + case STAT_INPUT: + if (mode == DMA_MODE) { + if (xd_wait_for_IRQ()) + return (1); + } else + if (indata) + *indata++ = inb(XD_DATA); + else + inb(XD_DATA); + break; + case STAT_COMMAND: + outb(command ? *command++ : 0,XD_DATA); + break; + case STAT_COMMAND | STAT_INPUT: + complete = 1; + break; + } + } + csb = inb(XD_DATA); + + if (xd_waitport(XD_STATUS,0,STAT_SELECT,timeout)) /* wait until deselected */ + return (1); + + if (csb & CSB_ERROR) { /* read sense data if error */ + xd_build(cmdblk,CMD_SENSE,(csb & CSB_LUN) >> 5,0,0,0,0,0); + if (xd_command(cmdblk,0,sense,NULL,NULL,XD_TIMEOUT)) + printk("xd: warning! sense command failed!\n"); + } + +#ifdef DEBUG_COMMAND + printk("xd_command: completed with csb = 0x%X\n",csb); +#endif /* DEBUG_COMMAND */ + + return (csb & CSB_ERROR); +} + +static u_char __init xd_initdrives (void (*init_drive)(u_char drive)) +{ + u_char cmdblk[6],i,count = 0; + + for (i = 0; i < XD_MAXDRIVES; i++) { + xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0); + if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) { + msleep_interruptible(XD_INIT_DISK_DELAY); + + init_drive(count); + count++; + + msleep_interruptible(XD_INIT_DISK_DELAY); + } + } + return (count); +} + +static void __init xd_manual_geo_set (u_char drive) +{ + xd_info[drive].heads = (u_char)(xd_geo[3 * drive + 1]); + xd_info[drive].cylinders = (u_short)(xd_geo[3 * drive]); + xd_info[drive].sectors = (u_char)(xd_geo[3 * drive + 2]); +} + +static void __init xd_dtc_init_controller (unsigned int address) +{ + switch (address) { + case 0x00000: + case 0xC8000: break; /*initial: 0x320 */ + case 0xCA000: xd_iobase = 0x324; + case 0xD0000: /*5150CX*/ + case 0xD8000: break; /*5150CX & 5150XL*/ + default: printk("xd_dtc_init_controller: unsupported BIOS address %06x\n",address); + break; + } + xd_maxsectors = 0x01; /* my card seems to have trouble doing multi-block transfers? */ + + outb(0,XD_RESET); /* reset the controller */ +} + + +static void __init xd_dtc5150cx_init_drive (u_char drive) +{ + /* values from controller's BIOS - BIOS chip may be removed */ + static u_short geometry_table[][4] = { + {0x200,8,0x200,0x100}, + {0x267,2,0x267,0x267}, + {0x264,4,0x264,0x80}, + {0x132,4,0x132,0x0}, + {0x132,2,0x80, 0x132}, + {0x177,8,0x177,0x0}, + {0x132,8,0x84, 0x0}, + {}, /* not used */ + {0x132,6,0x80, 0x100}, + {0x200,6,0x100,0x100}, + {0x264,2,0x264,0x80}, + {0x280,4,0x280,0x100}, + {0x2B9,3,0x2B9,0x2B9}, + {0x2B9,5,0x2B9,0x2B9}, + {0x280,6,0x280,0x100}, + {0x132,4,0x132,0x0}}; + u_char n; + + n = inb(XD_JUMPER); + n = (drive ? n : (n >> 2)) & 0x33; + n = (n | (n >> 2)) & 0x0F; + if (xd_geo[3*drive]) + xd_manual_geo_set(drive); + else + if (n != 7) { + xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */ + xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */ + xd_info[drive].sectors = 17; /* sectors */ +#if 0 + xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */ + xd_info[drive].precomp = geometry_table[n][3] /* write precomp */ + xd_info[drive].ecc = 0x0B; /* ecc length */ +#endif /* 0 */ + } + else { + printk("xd%c: undetermined drive geometry\n",'a'+drive); + return; + } + xd_info[drive].control = 5; /* control byte */ + xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B); + xd_recalibrate(drive); +} + +static void __init xd_dtc_init_drive (u_char drive) +{ + u_char cmdblk[6],buf[64]; + + xd_build(cmdblk,CMD_DTCGETGEOM,drive,0,0,0,0,0); + if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) { + xd_info[drive].heads = buf[0x0A]; /* heads */ + xd_info[drive].cylinders = ((u_short *) (buf))[0x04]; /* cylinders */ + xd_info[drive].sectors = 17; /* sectors */ + if (xd_geo[3*drive]) + xd_manual_geo_set(drive); +#if 0 + xd_info[drive].rwrite = ((u_short *) (buf + 1))[0x05]; /* reduced write */ + xd_info[drive].precomp = ((u_short *) (buf + 1))[0x06]; /* write precomp */ + xd_info[drive].ecc = buf[0x0F]; /* ecc length */ +#endif /* 0 */ + xd_info[drive].control = 0; /* control byte */ + + xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,((u_short *) (buf + 1))[0x05],((u_short *) (buf + 1))[0x06],buf[0x0F]); + xd_build(cmdblk,CMD_DTCSETSTEP,drive,0,0,0,0,7); + if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2)) + printk("xd_dtc_init_drive: error setting step rate for xd%c\n", 'a'+drive); + } + else + printk("xd_dtc_init_drive: error reading geometry for xd%c\n", 'a'+drive); +} + +static void __init xd_wd_init_controller (unsigned int address) +{ + switch (address) { + case 0x00000: + case 0xC8000: break; /*initial: 0x320 */ + case 0xCA000: xd_iobase = 0x324; break; + case 0xCC000: xd_iobase = 0x328; break; + case 0xCE000: xd_iobase = 0x32C; break; + case 0xD0000: xd_iobase = 0x328; break; /* ? */ + case 0xD8000: xd_iobase = 0x32C; break; /* ? */ + default: printk("xd_wd_init_controller: unsupported BIOS address %06x\n",address); + break; + } + xd_maxsectors = 0x01; /* this one doesn't wrap properly either... */ + + outb(0,XD_RESET); /* reset the controller */ + + msleep(XD_INIT_DISK_DELAY); +} + +static void __init xd_wd_init_drive (u_char drive) +{ + /* values from controller's BIOS - BIOS may be disabled */ + static u_short geometry_table[][4] = { + {0x264,4,0x1C2,0x1C2}, /* common part */ + {0x132,4,0x099,0x0}, + {0x267,2,0x1C2,0x1C2}, + {0x267,4,0x1C2,0x1C2}, + + {0x334,6,0x335,0x335}, /* 1004 series RLL */ + {0x30E,4,0x30F,0x3DC}, + {0x30E,2,0x30F,0x30F}, + {0x267,4,0x268,0x268}, + + {0x3D5,5,0x3D6,0x3D6}, /* 1002 series RLL */ + {0x3DB,7,0x3DC,0x3DC}, + {0x264,4,0x265,0x265}, + {0x267,4,0x268,0x268}}; + + u_char cmdblk[6],buf[0x200]; + u_char n = 0,rll,jumper_state,use_jumper_geo; + u_char wd_1002 = (xd_sigs[xd_type].string[7] == '6'); + + jumper_state = ~(inb(0x322)); + if (jumper_state & 0x40) + xd_irq = 9; + rll = (jumper_state & 0x30) ? (0x04 << wd_1002) : 0; + xd_build(cmdblk,CMD_READ,drive,0,0,0,1,0); + if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) { + xd_info[drive].heads = buf[0x1AF]; /* heads */ + xd_info[drive].cylinders = ((u_short *) (buf + 1))[0xD6]; /* cylinders */ + xd_info[drive].sectors = 17; /* sectors */ + if (xd_geo[3*drive]) + xd_manual_geo_set(drive); +#if 0 + xd_info[drive].rwrite = ((u_short *) (buf))[0xD8]; /* reduced write */ + xd_info[drive].wprecomp = ((u_short *) (buf))[0xDA]; /* write precomp */ + xd_info[drive].ecc = buf[0x1B4]; /* ecc length */ +#endif /* 0 */ + xd_info[drive].control = buf[0x1B5]; /* control byte */ + use_jumper_geo = !(xd_info[drive].heads) || !(xd_info[drive].cylinders); + if (xd_geo[3*drive]) { + xd_manual_geo_set(drive); + xd_info[drive].control = rll ? 7 : 5; + } + else if (use_jumper_geo) { + n = (((jumper_state & 0x0F) >> (drive << 1)) & 0x03) | rll; + xd_info[drive].cylinders = geometry_table[n][0]; + xd_info[drive].heads = (u_char)(geometry_table[n][1]); + xd_info[drive].control = rll ? 7 : 5; +#if 0 + xd_info[drive].rwrite = geometry_table[n][2]; + xd_info[drive].wprecomp = geometry_table[n][3]; + xd_info[drive].ecc = 0x0B; +#endif /* 0 */ + } + if (!wd_1002) { + if (use_jumper_geo) + xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders, + geometry_table[n][2],geometry_table[n][3],0x0B); + else + xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders, + ((u_short *) (buf))[0xD8],((u_short *) (buf))[0xDA],buf[0x1B4]); + } + /* 1002 based RLL controller requests converted addressing, but reports physical + (physical 26 sec., logical 17 sec.) + 1004 based ???? */ + if (rll & wd_1002) { + if ((xd_info[drive].cylinders *= 26, + xd_info[drive].cylinders /= 17) > 1023) + xd_info[drive].cylinders = 1023; /* 1024 ? */ +#if 0 + xd_info[drive].rwrite *= 26; + xd_info[drive].rwrite /= 17; + xd_info[drive].wprecomp *= 26 + xd_info[drive].wprecomp /= 17; +#endif /* 0 */ + } + } + else + printk("xd_wd_init_drive: error reading geometry for xd%c\n",'a'+drive); + +} + +static void __init xd_seagate_init_controller (unsigned int address) +{ + switch (address) { + case 0x00000: + case 0xC8000: break; /*initial: 0x320 */ + case 0xD0000: xd_iobase = 0x324; break; + case 0xD8000: xd_iobase = 0x328; break; + case 0xE0000: xd_iobase = 0x32C; break; + default: printk("xd_seagate_init_controller: unsupported BIOS address %06x\n",address); + break; + } + xd_maxsectors = 0x40; + + outb(0,XD_RESET); /* reset the controller */ +} + +static void __init xd_seagate_init_drive (u_char drive) +{ + u_char cmdblk[6],buf[0x200]; + + xd_build(cmdblk,CMD_ST11GETGEOM,drive,0,0,0,1,0); + if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) { + xd_info[drive].heads = buf[0x04]; /* heads */ + xd_info[drive].cylinders = (buf[0x02] << 8) | buf[0x03]; /* cylinders */ + xd_info[drive].sectors = buf[0x05]; /* sectors */ + xd_info[drive].control = 0; /* control byte */ + } + else + printk("xd_seagate_init_drive: error reading geometry from xd%c\n", 'a'+drive); +} + +/* Omti support courtesy Dirk Melchers */ +static void __init xd_omti_init_controller (unsigned int address) +{ + switch (address) { + case 0x00000: + case 0xC8000: break; /*initial: 0x320 */ + case 0xD0000: xd_iobase = 0x324; break; + case 0xD8000: xd_iobase = 0x328; break; + case 0xE0000: xd_iobase = 0x32C; break; + default: printk("xd_omti_init_controller: unsupported BIOS address %06x\n",address); + break; + } + + xd_maxsectors = 0x40; + + outb(0,XD_RESET); /* reset the controller */ +} + +static void __init xd_omti_init_drive (u_char drive) +{ + /* gets infos from drive */ + xd_override_init_drive(drive); + + /* set other parameters, Hardcoded, not that nice :-) */ + xd_info[drive].control = 2; +} + +/* Xebec support (AK) */ +static void __init xd_xebec_init_controller (unsigned int address) +{ +/* iobase may be set manually in range 0x300 - 0x33C + irq may be set manually to 2(9),3,4,5,6,7 + dma may be set manually to 1,2,3 + (How to detect them ???) +BIOS address may be set manually in range 0x0 - 0xF8000 +If you need non-standard settings use the xd=... command */ + + switch (address) { + case 0x00000: + case 0xC8000: /* initially: xd_iobase==0x320 */ + case 0xD0000: + case 0xD2000: + case 0xD4000: + case 0xD6000: + case 0xD8000: + case 0xDA000: + case 0xDC000: + case 0xDE000: + case 0xE0000: break; + default: printk("xd_xebec_init_controller: unsupported BIOS address %06x\n",address); + break; + } + + xd_maxsectors = 0x01; + outb(0,XD_RESET); /* reset the controller */ + + msleep(XD_INIT_DISK_DELAY); +} + +static void __init xd_xebec_init_drive (u_char drive) +{ + /* values from controller's BIOS - BIOS chip may be removed */ + static u_short geometry_table[][5] = { + {0x132,4,0x080,0x080,0x7}, + {0x132,4,0x080,0x080,0x17}, + {0x264,2,0x100,0x100,0x7}, + {0x264,2,0x100,0x100,0x17}, + {0x132,8,0x080,0x080,0x7}, + {0x132,8,0x080,0x080,0x17}, + {0x264,4,0x100,0x100,0x6}, + {0x264,4,0x100,0x100,0x17}, + {0x2BC,5,0x2BC,0x12C,0x6}, + {0x3A5,4,0x3A5,0x3A5,0x7}, + {0x26C,6,0x26C,0x26C,0x7}, + {0x200,8,0x200,0x100,0x17}, + {0x400,5,0x400,0x400,0x7}, + {0x400,6,0x400,0x400,0x7}, + {0x264,8,0x264,0x200,0x17}, + {0x33E,7,0x33E,0x200,0x7}}; + u_char n; + + n = inb(XD_JUMPER) & 0x0F; /* BIOS's drive number: same geometry + is assumed for BOTH drives */ + if (xd_geo[3*drive]) + xd_manual_geo_set(drive); + else { + xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */ + xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */ + xd_info[drive].sectors = 17; /* sectors */ +#if 0 + xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */ + xd_info[drive].precomp = geometry_table[n][3] /* write precomp */ + xd_info[drive].ecc = 0x0B; /* ecc length */ +#endif /* 0 */ + } + xd_info[drive].control = geometry_table[n][4]; /* control byte */ + xd_setparam(CMD_XBSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B); + xd_recalibrate(drive); +} + +/* xd_override_init_drive: this finds disk geometry in a "binary search" style, narrowing in on the "correct" number of heads + etc. by trying values until it gets the highest successful value. Idea courtesy Salvador Abreu (spa@fct.unl.pt). */ +static void __init xd_override_init_drive (u_char drive) +{ + u_short min[] = { 0,0,0 },max[] = { 16,1024,64 },test[] = { 0,0,0 }; + u_char cmdblk[6],i; + + if (xd_geo[3*drive]) + xd_manual_geo_set(drive); + else { + for (i = 0; i < 3; i++) { + while (min[i] != max[i] - 1) { + test[i] = (min[i] + max[i]) / 2; + xd_build(cmdblk,CMD_SEEK,drive,(u_char) test[0],(u_short) test[1],(u_char) test[2],0,0); + if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2)) + min[i] = test[i]; + else + max[i] = test[i]; + } + test[i] = min[i]; + } + xd_info[drive].heads = (u_char) min[0] + 1; + xd_info[drive].cylinders = (u_short) min[1] + 1; + xd_info[drive].sectors = (u_char) min[2] + 1; + } + xd_info[drive].control = 0; +} + +/* xd_setup: initialise controller from command line parameters */ +static void __init do_xd_setup (int *integers) +{ + switch (integers[0]) { + case 4: if (integers[4] < 0) + nodma = 1; + else if (integers[4] < 8) + xd_dma = integers[4]; + case 3: if ((integers[3] > 0) && (integers[3] <= 0x3FC)) + xd_iobase = integers[3]; + case 2: if ((integers[2] > 0) && (integers[2] < 16)) + xd_irq = integers[2]; + case 1: xd_override = 1; + if ((integers[1] >= 0) && (integers[1] < ARRAY_SIZE(xd_sigs))) + xd_type = integers[1]; + case 0: break; + default:printk("xd: too many parameters for xd\n"); + } + xd_maxsectors = 0x01; +} + +/* xd_setparam: set the drive characteristics */ +static void __init xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc) +{ + u_char cmdblk[14]; + + xd_build(cmdblk,command,drive,0,0,0,0,0); + cmdblk[6] = (u_char) (cylinders >> 8) & 0x03; + cmdblk[7] = (u_char) (cylinders & 0xFF); + cmdblk[8] = heads & 0x1F; + cmdblk[9] = (u_char) (rwrite >> 8) & 0x03; + cmdblk[10] = (u_char) (rwrite & 0xFF); + cmdblk[11] = (u_char) (wprecomp >> 8) & 0x03; + cmdblk[12] = (u_char) (wprecomp & 0xFF); + cmdblk[13] = ecc; + + /* Some controllers require geometry info as data, not command */ + + if (xd_command(cmdblk,PIO_MODE,NULL,&cmdblk[6],NULL,XD_TIMEOUT * 2)) + printk("xd: error setting characteristics for xd%c\n", 'a'+drive); +} + + +#ifdef MODULE + +module_param_array(xd, int, NULL, 0); +module_param_array(xd_geo, int, NULL, 0); +module_param(nodma, bool, 0); + +MODULE_LICENSE("GPL"); + +void cleanup_module(void) +{ + int i; + unregister_blkdev(XT_DISK_MAJOR, "xd"); + for (i = 0; i < xd_drives; i++) { + del_gendisk(xd_gendisk[i]); + put_disk(xd_gendisk[i]); + } + blk_cleanup_queue(xd_queue); + release_region(xd_iobase,4); + if (xd_drives) { + free_irq(xd_irq, NULL); + free_dma(xd_dma); + if (xd_dma_buffer) + xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200); + } +} +#else + +static int __init xd_setup (char *str) +{ + int ints[5]; + get_options (str, ARRAY_SIZE (ints), ints); + do_xd_setup (ints); + return 1; +} + +/* xd_manual_geo_init: initialise drive geometry from command line parameters + (used only for WD drives) */ +static int __init xd_manual_geo_init (char *str) +{ + int i, integers[1 + 3*XD_MAXDRIVES]; + + get_options (str, ARRAY_SIZE (integers), integers); + if (integers[0]%3 != 0) { + printk("xd: incorrect number of parameters for xd_geo\n"); + return 1; + } + for (i = 0; (i < integers[0]) && (i < 3*XD_MAXDRIVES); i++) + xd_geo[i] = integers[i+1]; + return 1; +} + +__setup ("xd=", xd_setup); +__setup ("xd_geo=", xd_manual_geo_init); + +#endif /* MODULE */ + +module_init(xd_init); +MODULE_ALIAS_BLOCKDEV_MAJOR(XT_DISK_MAJOR); diff --git a/drivers/block/xd.h b/drivers/block/xd.h new file mode 100644 index 0000000..37cacef --- /dev/null +++ b/drivers/block/xd.h @@ -0,0 +1,134 @@ +#ifndef _LINUX_XD_H +#define _LINUX_XD_H + +/* + * This file contains the definitions for the IO ports and errors etc. for XT hard disk controllers (at least the DTC 5150X). + * + * Author: Pat Mackinlay, pat@it.com.au + * Date: 29/09/92 + * + * Revised: 01/01/93, ... + * + * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler, kevinf@agora.rain.com) + * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and Wim Van Dorst. + */ + +#include <linux/interrupt.h> + +/* XT hard disk controller registers */ +#define XD_DATA (xd_iobase + 0x00) /* data RW register */ +#define XD_RESET (xd_iobase + 0x01) /* reset WO register */ +#define XD_STATUS (xd_iobase + 0x01) /* status RO register */ +#define XD_SELECT (xd_iobase + 0x02) /* select WO register */ +#define XD_JUMPER (xd_iobase + 0x02) /* jumper RO register */ +#define XD_CONTROL (xd_iobase + 0x03) /* DMAE/INTE WO register */ +#define XD_RESERVED (xd_iobase + 0x03) /* reserved */ + +/* XT hard disk controller commands (incomplete list) */ +#define CMD_TESTREADY 0x00 /* test drive ready */ +#define CMD_RECALIBRATE 0x01 /* recalibrate drive */ +#define CMD_SENSE 0x03 /* request sense */ +#define CMD_FORMATDRV 0x04 /* format drive */ +#define CMD_VERIFY 0x05 /* read verify */ +#define CMD_FORMATTRK 0x06 /* format track */ +#define CMD_FORMATBAD 0x07 /* format bad track */ +#define CMD_READ 0x08 /* read */ +#define CMD_WRITE 0x0A /* write */ +#define CMD_SEEK 0x0B /* seek */ + +/* Controller specific commands */ +#define CMD_DTCSETPARAM 0x0C /* set drive parameters (DTC 5150X & CX only?) */ +#define CMD_DTCGETECC 0x0D /* get ecc error length (DTC 5150X only?) */ +#define CMD_DTCREADBUF 0x0E /* read sector buffer (DTC 5150X only?) */ +#define CMD_DTCWRITEBUF 0x0F /* write sector buffer (DTC 5150X only?) */ +#define CMD_DTCREMAPTRK 0x11 /* assign alternate track (DTC 5150X only?) */ +#define CMD_DTCGETPARAM 0xFB /* get drive parameters (DTC 5150X only?) */ +#define CMD_DTCSETSTEP 0xFC /* set step rate (DTC 5150X only?) */ +#define CMD_DTCSETGEOM 0xFE /* set geometry data (DTC 5150X only?) */ +#define CMD_DTCGETGEOM 0xFF /* get geometry data (DTC 5150X only?) */ +#define CMD_ST11GETGEOM 0xF8 /* get geometry data (Seagate ST11R/M only?) */ +#define CMD_WDSETPARAM 0x0C /* set drive parameters (WD 1004A27X only?) */ +#define CMD_XBSETPARAM 0x0C /* set drive parameters (XEBEC only?) */ + +/* Bits for command status byte */ +#define CSB_ERROR 0x02 /* error */ +#define CSB_LUN 0x20 /* logical Unit Number */ + +/* XT hard disk controller status bits */ +#define STAT_READY 0x01 /* controller is ready */ +#define STAT_INPUT 0x02 /* data flowing from controller to host */ +#define STAT_COMMAND 0x04 /* controller in command phase */ +#define STAT_SELECT 0x08 /* controller is selected */ +#define STAT_REQUEST 0x10 /* controller requesting data */ +#define STAT_INTERRUPT 0x20 /* controller requesting interrupt */ + +/* XT hard disk controller control bits */ +#define PIO_MODE 0x00 /* control bits to set for PIO */ +#define DMA_MODE 0x03 /* control bits to set for DMA & interrupt */ + +#define XD_MAXDRIVES 2 /* maximum 2 drives */ +#define XD_TIMEOUT HZ /* 1 second timeout */ +#define XD_RETRIES 4 /* maximum 4 retries */ + +#undef DEBUG /* define for debugging output */ + +#ifdef DEBUG + #define DEBUG_STARTUP /* debug driver initialisation */ + #define DEBUG_OVERRIDE /* debug override geometry detection */ + #define DEBUG_READWRITE /* debug each read/write command */ + #define DEBUG_OTHER /* debug misc. interrupt/DMA stuff */ + #define DEBUG_COMMAND /* debug each controller command */ +#endif /* DEBUG */ + +/* this structure defines the XT drives and their types */ +typedef struct { + u_char heads; + u_short cylinders; + u_char sectors; + u_char control; + int unit; +} XD_INFO; + +/* this structure defines a ROM BIOS signature */ +typedef struct { + unsigned int offset; + const char *string; + void (*init_controller)(unsigned int address); + void (*init_drive)(u_char drive); + const char *name; +} XD_SIGNATURE; + +#ifndef MODULE +static int xd_manual_geo_init (char *command); +#endif /* MODULE */ +static u_char xd_detect (u_char *controller, unsigned int *address); +static u_char xd_initdrives (void (*init_drive)(u_char drive)); + +static void do_xd_request (struct request_queue * q); +static int xd_ioctl (struct block_device *bdev,fmode_t mode,unsigned int cmd,unsigned long arg); +static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count); +static void xd_recalibrate (u_char drive); + +static irqreturn_t xd_interrupt_handler(int irq, void *dev_id); +static u_char xd_setup_dma (u_char opcode,u_char *buffer,u_int count); +static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control); +static void xd_watchdog (unsigned long unused); +static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout); +static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout); + +/* card specific setup and geometry gathering code */ +static void xd_dtc_init_controller (unsigned int address); +static void xd_dtc5150cx_init_drive (u_char drive); +static void xd_dtc_init_drive (u_char drive); +static void xd_wd_init_controller (unsigned int address); +static void xd_wd_init_drive (u_char drive); +static void xd_seagate_init_controller (unsigned int address); +static void xd_seagate_init_drive (u_char drive); +static void xd_omti_init_controller (unsigned int address); +static void xd_omti_init_drive (u_char drive); +static void xd_xebec_init_controller (unsigned int address); +static void xd_xebec_init_drive (u_char drive); +static void xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc); +static void xd_override_init_drive (u_char drive); + +#endif /* _LINUX_XD_H */ diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c new file mode 100644 index 0000000..676278e --- /dev/null +++ b/drivers/block/xen-blkfront.c @@ -0,0 +1,1098 @@ +/* + * blkfront.c + * + * XenLinux virtual block device driver. + * + * Copyright (c) 2003-2004, Keir Fraser & Steve Hand + * Modifications by Mark A. Williamson are (c) Intel Research Cambridge + * Copyright (c) 2004, Christian Limpach + * Copyright (c) 2004, Andrew Warfield + * Copyright (c) 2005, Christopher Clark + * Copyright (c) 2005, XenSource Ltd + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <linux/interrupt.h> +#include <linux/blkdev.h> +#include <linux/hdreg.h> +#include <linux/cdrom.h> +#include <linux/module.h> +#include <linux/scatterlist.h> + +#include <xen/xenbus.h> +#include <xen/grant_table.h> +#include <xen/events.h> +#include <xen/page.h> + +#include <xen/interface/grant_table.h> +#include <xen/interface/io/blkif.h> +#include <xen/interface/io/protocols.h> + +#include <asm/xen/hypervisor.h> + +enum blkif_state { + BLKIF_STATE_DISCONNECTED, + BLKIF_STATE_CONNECTED, + BLKIF_STATE_SUSPENDED, +}; + +struct blk_shadow { + struct blkif_request req; + unsigned long request; + unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; +}; + +static struct block_device_operations xlvbd_block_fops; + +#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) + +/* + * We have one of these per vbd, whether ide, scsi or 'other'. They + * hang in private_data off the gendisk structure. We may end up + * putting all kinds of interesting stuff here :-) + */ +struct blkfront_info +{ + struct xenbus_device *xbdev; + struct gendisk *gd; + int vdevice; + blkif_vdev_t handle; + enum blkif_state connected; + int ring_ref; + struct blkif_front_ring ring; + struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + unsigned int evtchn, irq; + struct request_queue *rq; + struct work_struct work; + struct gnttab_free_callback callback; + struct blk_shadow shadow[BLK_RING_SIZE]; + unsigned long shadow_free; + int feature_barrier; + int is_ready; + + /** + * The number of people holding this device open. We won't allow a + * hot-unplug unless this is 0. + */ + int users; +}; + +static DEFINE_SPINLOCK(blkif_io_lock); + +#define MAXIMUM_OUTSTANDING_BLOCK_REQS \ + (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) +#define GRANT_INVALID_REF 0 + +#define PARTS_PER_DISK 16 +#define PARTS_PER_EXT_DISK 256 + +#define BLKIF_MAJOR(dev) ((dev)>>8) +#define BLKIF_MINOR(dev) ((dev) & 0xff) + +#define EXT_SHIFT 28 +#define EXTENDED (1<<EXT_SHIFT) +#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED)) +#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) + +#define DEV_NAME "xvd" /* name in /dev */ + +static int get_id_from_freelist(struct blkfront_info *info) +{ + unsigned long free = info->shadow_free; + BUG_ON(free > BLK_RING_SIZE); + info->shadow_free = info->shadow[free].req.id; + info->shadow[free].req.id = 0x0fffffee; /* debug */ + return free; +} + +static void add_id_to_freelist(struct blkfront_info *info, + unsigned long id) +{ + info->shadow[id].req.id = info->shadow_free; + info->shadow[id].request = 0; + info->shadow_free = id; +} + +static void blkif_restart_queue_callback(void *arg) +{ + struct blkfront_info *info = (struct blkfront_info *)arg; + schedule_work(&info->work); +} + +static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) +{ + /* We don't have real geometry info, but let's at least return + values consistent with the size of the device */ + sector_t nsect = get_capacity(bd->bd_disk); + sector_t cylinders = nsect; + + hg->heads = 0xff; + hg->sectors = 0x3f; + sector_div(cylinders, hg->heads * hg->sectors); + hg->cylinders = cylinders; + if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect) + hg->cylinders = 0xffff; + return 0; +} + +static int blkif_ioctl(struct block_device *bdev, fmode_t mode, + unsigned command, unsigned long argument) +{ + struct blkfront_info *info = bdev->bd_disk->private_data; + int i; + + dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", + command, (long)argument); + + switch (command) { + case CDROMMULTISESSION: + dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); + for (i = 0; i < sizeof(struct cdrom_multisession); i++) + if (put_user(0, (char __user *)(argument + i))) + return -EFAULT; + return 0; + + case CDROM_GET_CAPABILITY: { + struct gendisk *gd = info->gd; + if (gd->flags & GENHD_FL_CD) + return 0; + return -EINVAL; + } + + default: + /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", + command);*/ + return -EINVAL; /* same return as native Linux */ + } + + return 0; +} + +/* + * blkif_queue_request + * + * request block io + * + * id: for guest use only. + * operation: BLKIF_OP_{READ,WRITE,PROBE} + * buffer: buffer to read/write into. this should be a + * virtual address in the guest os. + */ +static int blkif_queue_request(struct request *req) +{ + struct blkfront_info *info = req->rq_disk->private_data; + unsigned long buffer_mfn; + struct blkif_request *ring_req; + unsigned long id; + unsigned int fsect, lsect; + int i, ref; + grant_ref_t gref_head; + struct scatterlist *sg; + + if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) + return 1; + + if (gnttab_alloc_grant_references( + BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { + gnttab_request_free_callback( + &info->callback, + blkif_restart_queue_callback, + info, + BLKIF_MAX_SEGMENTS_PER_REQUEST); + return 1; + } + + /* Fill out a communications ring structure. */ + ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); + id = get_id_from_freelist(info); + info->shadow[id].request = (unsigned long)req; + + ring_req->id = id; + ring_req->sector_number = (blkif_sector_t)req->sector; + ring_req->handle = info->handle; + + ring_req->operation = rq_data_dir(req) ? + BLKIF_OP_WRITE : BLKIF_OP_READ; + if (blk_barrier_rq(req)) + ring_req->operation = BLKIF_OP_WRITE_BARRIER; + + ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); + BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); + + for_each_sg(info->sg, sg, ring_req->nr_segments, i) { + buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg))); + fsect = sg->offset >> 9; + lsect = fsect + (sg->length >> 9) - 1; + /* install a grant reference. */ + ref = gnttab_claim_grant_reference(&gref_head); + BUG_ON(ref == -ENOSPC); + + gnttab_grant_foreign_access_ref( + ref, + info->xbdev->otherend_id, + buffer_mfn, + rq_data_dir(req) ); + + info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); + ring_req->seg[i] = + (struct blkif_request_segment) { + .gref = ref, + .first_sect = fsect, + .last_sect = lsect }; + } + + info->ring.req_prod_pvt++; + + /* Keep a private copy so we can reissue requests when recovering. */ + info->shadow[id].req = *ring_req; + + gnttab_free_grant_references(gref_head); + + return 0; +} + + +static inline void flush_requests(struct blkfront_info *info) +{ + int notify; + + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); + + if (notify) + notify_remote_via_irq(info->irq); +} + +/* + * do_blkif_request + * read a block; request is in a request queue + */ +static void do_blkif_request(struct request_queue *rq) +{ + struct blkfront_info *info = NULL; + struct request *req; + int queued; + + pr_debug("Entered do_blkif_request\n"); + + queued = 0; + + while ((req = elv_next_request(rq)) != NULL) { + info = req->rq_disk->private_data; + if (!blk_fs_request(req)) { + end_request(req, 0); + continue; + } + + if (RING_FULL(&info->ring)) + goto wait; + + pr_debug("do_blk_req %p: cmd %p, sec %lx, " + "(%u/%li) buffer:%p [%s]\n", + req, req->cmd, (unsigned long)req->sector, + req->current_nr_sectors, + req->nr_sectors, req->buffer, + rq_data_dir(req) ? "write" : "read"); + + + blkdev_dequeue_request(req); + if (blkif_queue_request(req)) { + blk_requeue_request(rq, req); +wait: + /* Avoid pointless unplugs. */ + blk_stop_queue(rq); + break; + } + + queued++; + } + + if (queued != 0) + flush_requests(info); +} + +static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) +{ + struct request_queue *rq; + elevator_t *old_e; + + rq = blk_init_queue(do_blkif_request, &blkif_io_lock); + if (rq == NULL) + return -1; + + old_e = rq->elevator; + if (IS_ERR_VALUE(elevator_init(rq, "noop"))) + printk(KERN_WARNING + "blkfront: Switch elevator failed, use default\n"); + else + elevator_exit(old_e); + + /* Hard sector size and max sectors impersonate the equiv. hardware. */ + blk_queue_hardsect_size(rq, sector_size); + blk_queue_max_sectors(rq, 512); + + /* Each segment in a request is up to an aligned page in size. */ + blk_queue_segment_boundary(rq, PAGE_SIZE - 1); + blk_queue_max_segment_size(rq, PAGE_SIZE); + + /* Ensure a merged request will fit in a single I/O ring slot. */ + blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); + blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); + + /* Make sure buffer addresses are sector-aligned. */ + blk_queue_dma_alignment(rq, 511); + + /* Make sure we don't use bounce buffers. */ + blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY); + + gd->queue = rq; + + return 0; +} + + +static int xlvbd_barrier(struct blkfront_info *info) +{ + int err; + + err = blk_queue_ordered(info->rq, + info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, + NULL); + + if (err) + return err; + + printk(KERN_INFO "blkfront: %s: barriers %s\n", + info->gd->disk_name, + info->feature_barrier ? "enabled" : "disabled"); + return 0; +} + + +static int xlvbd_alloc_gendisk(blkif_sector_t capacity, + struct blkfront_info *info, + u16 vdisk_info, u16 sector_size) +{ + struct gendisk *gd; + int nr_minors = 1; + int err = -ENODEV; + unsigned int offset; + int minor; + int nr_parts; + + BUG_ON(info->gd != NULL); + BUG_ON(info->rq != NULL); + + if ((info->vdevice>>EXT_SHIFT) > 1) { + /* this is above the extended range; something is wrong */ + printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice); + return -ENODEV; + } + + if (!VDEV_IS_EXTENDED(info->vdevice)) { + minor = BLKIF_MINOR(info->vdevice); + nr_parts = PARTS_PER_DISK; + } else { + minor = BLKIF_MINOR_EXT(info->vdevice); + nr_parts = PARTS_PER_EXT_DISK; + } + + if ((minor % nr_parts) == 0) + nr_minors = nr_parts; + + gd = alloc_disk(nr_minors); + if (gd == NULL) + goto out; + + offset = minor / nr_parts; + + if (nr_minors > 1) { + if (offset < 26) + sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset); + else + sprintf(gd->disk_name, "%s%c%c", DEV_NAME, + 'a' + ((offset / 26)-1), 'a' + (offset % 26)); + } else { + if (offset < 26) + sprintf(gd->disk_name, "%s%c%d", DEV_NAME, + 'a' + offset, + minor & (nr_parts - 1)); + else + sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME, + 'a' + ((offset / 26) - 1), + 'a' + (offset % 26), + minor & (nr_parts - 1)); + } + + gd->major = XENVBD_MAJOR; + gd->first_minor = minor; + gd->fops = &xlvbd_block_fops; + gd->private_data = info; + gd->driverfs_dev = &(info->xbdev->dev); + set_capacity(gd, capacity); + + if (xlvbd_init_blk_queue(gd, sector_size)) { + del_gendisk(gd); + goto out; + } + + info->rq = gd->queue; + info->gd = gd; + + if (info->feature_barrier) + xlvbd_barrier(info); + + if (vdisk_info & VDISK_READONLY) + set_disk_ro(gd, 1); + + if (vdisk_info & VDISK_REMOVABLE) + gd->flags |= GENHD_FL_REMOVABLE; + + if (vdisk_info & VDISK_CDROM) + gd->flags |= GENHD_FL_CD; + + return 0; + + out: + return err; +} + +static void kick_pending_request_queues(struct blkfront_info *info) +{ + if (!RING_FULL(&info->ring)) { + /* Re-enable calldowns. */ + blk_start_queue(info->rq); + /* Kick things off immediately. */ + do_blkif_request(info->rq); + } +} + +static void blkif_restart_queue(struct work_struct *work) +{ + struct blkfront_info *info = container_of(work, struct blkfront_info, work); + + spin_lock_irq(&blkif_io_lock); + if (info->connected == BLKIF_STATE_CONNECTED) + kick_pending_request_queues(info); + spin_unlock_irq(&blkif_io_lock); +} + +static void blkif_free(struct blkfront_info *info, int suspend) +{ + /* Prevent new requests being issued until we fix things up. */ + spin_lock_irq(&blkif_io_lock); + info->connected = suspend ? + BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; + /* No more blkif_request(). */ + if (info->rq) + blk_stop_queue(info->rq); + /* No more gnttab callback work. */ + gnttab_cancel_free_callback(&info->callback); + spin_unlock_irq(&blkif_io_lock); + + /* Flush gnttab callback work. Must be done with no locks held. */ + flush_scheduled_work(); + + /* Free resources associated with old device channel. */ + if (info->ring_ref != GRANT_INVALID_REF) { + gnttab_end_foreign_access(info->ring_ref, 0, + (unsigned long)info->ring.sring); + info->ring_ref = GRANT_INVALID_REF; + info->ring.sring = NULL; + } + if (info->irq) + unbind_from_irqhandler(info->irq, info); + info->evtchn = info->irq = 0; + +} + +static void blkif_completion(struct blk_shadow *s) +{ + int i; + for (i = 0; i < s->req.nr_segments; i++) + gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL); +} + +static irqreturn_t blkif_interrupt(int irq, void *dev_id) +{ + struct request *req; + struct blkif_response *bret; + RING_IDX i, rp; + unsigned long flags; + struct blkfront_info *info = (struct blkfront_info *)dev_id; + int error; + + spin_lock_irqsave(&blkif_io_lock, flags); + + if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { + spin_unlock_irqrestore(&blkif_io_lock, flags); + return IRQ_HANDLED; + } + + again: + rp = info->ring.sring->rsp_prod; + rmb(); /* Ensure we see queued responses up to 'rp'. */ + + for (i = info->ring.rsp_cons; i != rp; i++) { + unsigned long id; + int ret; + + bret = RING_GET_RESPONSE(&info->ring, i); + id = bret->id; + req = (struct request *)info->shadow[id].request; + + blkif_completion(&info->shadow[id]); + + add_id_to_freelist(info, id); + + error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; + switch (bret->operation) { + case BLKIF_OP_WRITE_BARRIER: + if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { + printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", + info->gd->disk_name); + error = -EOPNOTSUPP; + info->feature_barrier = 0; + xlvbd_barrier(info); + } + /* fall through */ + case BLKIF_OP_READ: + case BLKIF_OP_WRITE: + if (unlikely(bret->status != BLKIF_RSP_OKAY)) + dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " + "request: %x\n", bret->status); + + ret = __blk_end_request(req, error, blk_rq_bytes(req)); + BUG_ON(ret); + break; + default: + BUG(); + } + } + + info->ring.rsp_cons = i; + + if (i != info->ring.req_prod_pvt) { + int more_to_do; + RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); + if (more_to_do) + goto again; + } else + info->ring.sring->rsp_event = i + 1; + + kick_pending_request_queues(info); + + spin_unlock_irqrestore(&blkif_io_lock, flags); + + return IRQ_HANDLED; +} + + +static int setup_blkring(struct xenbus_device *dev, + struct blkfront_info *info) +{ + struct blkif_sring *sring; + int err; + + info->ring_ref = GRANT_INVALID_REF; + + sring = (struct blkif_sring *)__get_free_page(GFP_NOIO | __GFP_HIGH); + if (!sring) { + xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); + return -ENOMEM; + } + SHARED_RING_INIT(sring); + FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); + + sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); + + err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); + if (err < 0) { + free_page((unsigned long)sring); + info->ring.sring = NULL; + goto fail; + } + info->ring_ref = err; + + err = xenbus_alloc_evtchn(dev, &info->evtchn); + if (err) + goto fail; + + err = bind_evtchn_to_irqhandler(info->evtchn, + blkif_interrupt, + IRQF_SAMPLE_RANDOM, "blkif", info); + if (err <= 0) { + xenbus_dev_fatal(dev, err, + "bind_evtchn_to_irqhandler failed"); + goto fail; + } + info->irq = err; + + return 0; +fail: + blkif_free(info, 0); + return err; +} + + +/* Common code used when first setting up, and when resuming. */ +static int talk_to_backend(struct xenbus_device *dev, + struct blkfront_info *info) +{ + const char *message = NULL; + struct xenbus_transaction xbt; + int err; + + /* Create shared ring, alloc event channel. */ + err = setup_blkring(dev, info); + if (err) + goto out; + +again: + err = xenbus_transaction_start(&xbt); + if (err) { + xenbus_dev_fatal(dev, err, "starting transaction"); + goto destroy_blkring; + } + + err = xenbus_printf(xbt, dev->nodename, + "ring-ref", "%u", info->ring_ref); + if (err) { + message = "writing ring-ref"; + goto abort_transaction; + } + err = xenbus_printf(xbt, dev->nodename, + "event-channel", "%u", info->evtchn); + if (err) { + message = "writing event-channel"; + goto abort_transaction; + } + err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", + XEN_IO_PROTO_ABI_NATIVE); + if (err) { + message = "writing protocol"; + goto abort_transaction; + } + + err = xenbus_transaction_end(xbt, 0); + if (err) { + if (err == -EAGAIN) + goto again; + xenbus_dev_fatal(dev, err, "completing transaction"); + goto destroy_blkring; + } + + xenbus_switch_state(dev, XenbusStateInitialised); + + return 0; + + abort_transaction: + xenbus_transaction_end(xbt, 1); + if (message) + xenbus_dev_fatal(dev, err, "%s", message); + destroy_blkring: + blkif_free(info, 0); + out: + return err; +} + + +/** + * Entry point to this code when a new device is created. Allocate the basic + * structures and the ring buffer for communication with the backend, and + * inform the backend of the appropriate details for those. Switch to + * Initialised state. + */ +static int blkfront_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + int err, vdevice, i; + struct blkfront_info *info; + + /* FIXME: Use dynamic device id if this is not set. */ + err = xenbus_scanf(XBT_NIL, dev->nodename, + "virtual-device", "%i", &vdevice); + if (err != 1) { + /* go looking in the extended area instead */ + err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext", + "%i", &vdevice); + if (err != 1) { + xenbus_dev_fatal(dev, err, "reading virtual-device"); + return err; + } + } + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); + return -ENOMEM; + } + + info->xbdev = dev; + info->vdevice = vdevice; + info->connected = BLKIF_STATE_DISCONNECTED; + INIT_WORK(&info->work, blkif_restart_queue); + + for (i = 0; i < BLK_RING_SIZE; i++) + info->shadow[i].req.id = i+1; + info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; + + /* Front end dir is a number, which is used as the id. */ + info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); + dev->dev.driver_data = info; + + err = talk_to_backend(dev, info); + if (err) { + kfree(info); + dev->dev.driver_data = NULL; + return err; + } + + return 0; +} + + +static int blkif_recover(struct blkfront_info *info) +{ + int i; + struct blkif_request *req; + struct blk_shadow *copy; + int j; + + /* Stage 1: Make a safe copy of the shadow state. */ + copy = kmalloc(sizeof(info->shadow), + GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); + if (!copy) + return -ENOMEM; + memcpy(copy, info->shadow, sizeof(info->shadow)); + + /* Stage 2: Set up free list. */ + memset(&info->shadow, 0, sizeof(info->shadow)); + for (i = 0; i < BLK_RING_SIZE; i++) + info->shadow[i].req.id = i+1; + info->shadow_free = info->ring.req_prod_pvt; + info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; + + /* Stage 3: Find pending requests and requeue them. */ + for (i = 0; i < BLK_RING_SIZE; i++) { + /* Not in use? */ + if (copy[i].request == 0) + continue; + + /* Grab a request slot and copy shadow state into it. */ + req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); + *req = copy[i].req; + + /* We get a new request id, and must reset the shadow state. */ + req->id = get_id_from_freelist(info); + memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); + + /* Rewrite any grant references invalidated by susp/resume. */ + for (j = 0; j < req->nr_segments; j++) + gnttab_grant_foreign_access_ref( + req->seg[j].gref, + info->xbdev->otherend_id, + pfn_to_mfn(info->shadow[req->id].frame[j]), + rq_data_dir( + (struct request *) + info->shadow[req->id].request)); + info->shadow[req->id].req = *req; + + info->ring.req_prod_pvt++; + } + + kfree(copy); + + xenbus_switch_state(info->xbdev, XenbusStateConnected); + + spin_lock_irq(&blkif_io_lock); + + /* Now safe for us to use the shared ring */ + info->connected = BLKIF_STATE_CONNECTED; + + /* Send off requeued requests */ + flush_requests(info); + + /* Kick any other new requests queued since we resumed */ + kick_pending_request_queues(info); + + spin_unlock_irq(&blkif_io_lock); + + return 0; +} + +/** + * We are reconnecting to the backend, due to a suspend/resume, or a backend + * driver restart. We tear down our blkif structure and recreate it, but + * leave the device-layer structures intact so that this is transparent to the + * rest of the kernel. + */ +static int blkfront_resume(struct xenbus_device *dev) +{ + struct blkfront_info *info = dev->dev.driver_data; + int err; + + dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); + + blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); + + err = talk_to_backend(dev, info); + if (info->connected == BLKIF_STATE_SUSPENDED && !err) + err = blkif_recover(info); + + return err; +} + + +/* + * Invoked when the backend is finally 'ready' (and has told produced + * the details about the physical device - #sectors, size, etc). + */ +static void blkfront_connect(struct blkfront_info *info) +{ + unsigned long long sectors; + unsigned long sector_size; + unsigned int binfo; + int err; + + if ((info->connected == BLKIF_STATE_CONNECTED) || + (info->connected == BLKIF_STATE_SUSPENDED) ) + return; + + dev_dbg(&info->xbdev->dev, "%s:%s.\n", + __func__, info->xbdev->otherend); + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "sectors", "%llu", §ors, + "info", "%u", &binfo, + "sector-size", "%lu", §or_size, + NULL); + if (err) { + xenbus_dev_fatal(info->xbdev, err, + "reading backend fields at %s", + info->xbdev->otherend); + return; + } + + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-barrier", "%lu", &info->feature_barrier, + NULL); + if (err) + info->feature_barrier = 0; + + err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); + if (err) { + xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", + info->xbdev->otherend); + return; + } + + xenbus_switch_state(info->xbdev, XenbusStateConnected); + + /* Kick pending requests. */ + spin_lock_irq(&blkif_io_lock); + info->connected = BLKIF_STATE_CONNECTED; + kick_pending_request_queues(info); + spin_unlock_irq(&blkif_io_lock); + + add_disk(info->gd); + + info->is_ready = 1; +} + +/** + * Handle the change of state of the backend to Closing. We must delete our + * device-layer structures now, to ensure that writes are flushed through to + * the backend. Once is this done, we can switch to Closed in + * acknowledgement. + */ +static void blkfront_closing(struct xenbus_device *dev) +{ + struct blkfront_info *info = dev->dev.driver_data; + unsigned long flags; + + dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename); + + if (info->rq == NULL) + goto out; + + spin_lock_irqsave(&blkif_io_lock, flags); + + del_gendisk(info->gd); + + /* No more blkif_request(). */ + blk_stop_queue(info->rq); + + /* No more gnttab callback work. */ + gnttab_cancel_free_callback(&info->callback); + spin_unlock_irqrestore(&blkif_io_lock, flags); + + /* Flush gnttab callback work. Must be done with no locks held. */ + flush_scheduled_work(); + + blk_cleanup_queue(info->rq); + info->rq = NULL; + + out: + xenbus_frontend_closed(dev); +} + +/** + * Callback received when the backend's state changes. + */ +static void backend_changed(struct xenbus_device *dev, + enum xenbus_state backend_state) +{ + struct blkfront_info *info = dev->dev.driver_data; + struct block_device *bd; + + dev_dbg(&dev->dev, "blkfront:backend_changed.\n"); + + switch (backend_state) { + case XenbusStateInitialising: + case XenbusStateInitWait: + case XenbusStateInitialised: + case XenbusStateUnknown: + case XenbusStateClosed: + break; + + case XenbusStateConnected: + blkfront_connect(info); + break; + + case XenbusStateClosing: + bd = bdget_disk(info->gd, 0); + if (bd == NULL) + xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); + + mutex_lock(&bd->bd_mutex); + if (info->users > 0) + xenbus_dev_error(dev, -EBUSY, + "Device in use; refusing to close"); + else + blkfront_closing(dev); + mutex_unlock(&bd->bd_mutex); + bdput(bd); + break; + } +} + +static int blkfront_remove(struct xenbus_device *dev) +{ + struct blkfront_info *info = dev->dev.driver_data; + + dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename); + + blkif_free(info, 0); + + kfree(info); + + return 0; +} + +static int blkfront_is_ready(struct xenbus_device *dev) +{ + struct blkfront_info *info = dev->dev.driver_data; + + return info->is_ready; +} + +static int blkif_open(struct block_device *bdev, fmode_t mode) +{ + struct blkfront_info *info = bdev->bd_disk->private_data; + info->users++; + return 0; +} + +static int blkif_release(struct gendisk *disk, fmode_t mode) +{ + struct blkfront_info *info = disk->private_data; + info->users--; + if (info->users == 0) { + /* Check whether we have been instructed to close. We will + have ignored this request initially, as the device was + still mounted. */ + struct xenbus_device *dev = info->xbdev; + enum xenbus_state state = xenbus_read_driver_state(dev->otherend); + + if (state == XenbusStateClosing && info->is_ready) + blkfront_closing(dev); + } + return 0; +} + +static struct block_device_operations xlvbd_block_fops = +{ + .owner = THIS_MODULE, + .open = blkif_open, + .release = blkif_release, + .getgeo = blkif_getgeo, + .locked_ioctl = blkif_ioctl, +}; + + +static struct xenbus_device_id blkfront_ids[] = { + { "vbd" }, + { "" } +}; + +static struct xenbus_driver blkfront = { + .name = "vbd", + .owner = THIS_MODULE, + .ids = blkfront_ids, + .probe = blkfront_probe, + .remove = blkfront_remove, + .resume = blkfront_resume, + .otherend_changed = backend_changed, + .is_ready = blkfront_is_ready, +}; + +static int __init xlblk_init(void) +{ + if (!xen_domain()) + return -ENODEV; + + if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { + printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", + XENVBD_MAJOR, DEV_NAME); + return -ENODEV; + } + + return xenbus_register_frontend(&blkfront); +} +module_init(xlblk_init); + + +static void __exit xlblk_exit(void) +{ + return xenbus_unregister_driver(&blkfront); +} +module_exit(xlblk_exit); + +MODULE_DESCRIPTION("Xen virtual block device frontend"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); +MODULE_ALIAS("xen:vbd"); +MODULE_ALIAS("xenblk"); diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c new file mode 100644 index 0000000..29e1dfa --- /dev/null +++ b/drivers/block/xsysace.c @@ -0,0 +1,1284 @@ +/* + * Xilinx SystemACE device driver + * + * Copyright 2007 Secret Lab Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +/* + * The SystemACE chip is designed to configure FPGAs by loading an FPGA + * bitstream from a file on a CF card and squirting it into FPGAs connected + * to the SystemACE JTAG chain. It also has the advantage of providing an + * MPU interface which can be used to control the FPGA configuration process + * and to use the attached CF card for general purpose storage. + * + * This driver is a block device driver for the SystemACE. + * + * Initialization: + * The driver registers itself as a platform_device driver at module + * load time. The platform bus will take care of calling the + * ace_probe() method for all SystemACE instances in the system. Any + * number of SystemACE instances are supported. ace_probe() calls + * ace_setup() which initialized all data structures, reads the CF + * id structure and registers the device. + * + * Processing: + * Just about all of the heavy lifting in this driver is performed by + * a Finite State Machine (FSM). The driver needs to wait on a number + * of events; some raised by interrupts, some which need to be polled + * for. Describing all of the behaviour in a FSM seems to be the + * easiest way to keep the complexity low and make it easy to + * understand what the driver is doing. If the block ops or the + * request function need to interact with the hardware, then they + * simply need to flag the request and kick of FSM processing. + * + * The FSM itself is atomic-safe code which can be run from any + * context. The general process flow is: + * 1. obtain the ace->lock spinlock. + * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is + * cleared. + * 3. release the lock. + * + * Individual states do not sleep in any way. If a condition needs to + * be waited for then the state much clear the fsm_continue flag and + * either schedule the FSM to be run again at a later time, or expect + * an interrupt to call the FSM when the desired condition is met. + * + * In normal operation, the FSM is processed at interrupt context + * either when the driver's tasklet is scheduled, or when an irq is + * raised by the hardware. The tasklet can be scheduled at any time. + * The request method in particular schedules the tasklet when a new + * request has been indicated by the block layer. Once started, the + * FSM proceeds as far as it can processing the request until it + * needs on a hardware event. At this point, it must yield execution. + * + * A state has two options when yielding execution: + * 1. ace_fsm_yield() + * - Call if need to poll for event. + * - clears the fsm_continue flag to exit the processing loop + * - reschedules the tasklet to run again as soon as possible + * 2. ace_fsm_yieldirq() + * - Call if an irq is expected from the HW + * - clears the fsm_continue flag to exit the processing loop + * - does not reschedule the tasklet so the FSM will not be processed + * again until an irq is received. + * After calling a yield function, the state must return control back + * to the FSM main loop. + * + * Additionally, the driver maintains a kernel timer which can process + * the FSM. If the FSM gets stalled, typically due to a missed + * interrupt, then the kernel timer will expire and the driver can + * continue where it left off. + * + * To Do: + * - Add FPGA configuration control interface. + * - Request major number from lanana + */ + +#undef DEBUG + +#include <linux/module.h> +#include <linux/ctype.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/blkdev.h> +#include <linux/hdreg.h> +#include <linux/platform_device.h> +#if defined(CONFIG_OF) +#include <linux/of_device.h> +#include <linux/of_platform.h> +#endif + +MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); +MODULE_DESCRIPTION("Xilinx SystemACE device driver"); +MODULE_LICENSE("GPL"); + +/* SystemACE register definitions */ +#define ACE_BUSMODE (0x00) + +#define ACE_STATUS (0x04) +#define ACE_STATUS_CFGLOCK (0x00000001) +#define ACE_STATUS_MPULOCK (0x00000002) +#define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */ +#define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */ +#define ACE_STATUS_CFDETECT (0x00000010) +#define ACE_STATUS_DATABUFRDY (0x00000020) +#define ACE_STATUS_DATABUFMODE (0x00000040) +#define ACE_STATUS_CFGDONE (0x00000080) +#define ACE_STATUS_RDYFORCFCMD (0x00000100) +#define ACE_STATUS_CFGMODEPIN (0x00000200) +#define ACE_STATUS_CFGADDR_MASK (0x0000e000) +#define ACE_STATUS_CFBSY (0x00020000) +#define ACE_STATUS_CFRDY (0x00040000) +#define ACE_STATUS_CFDWF (0x00080000) +#define ACE_STATUS_CFDSC (0x00100000) +#define ACE_STATUS_CFDRQ (0x00200000) +#define ACE_STATUS_CFCORR (0x00400000) +#define ACE_STATUS_CFERR (0x00800000) + +#define ACE_ERROR (0x08) +#define ACE_CFGLBA (0x0c) +#define ACE_MPULBA (0x10) + +#define ACE_SECCNTCMD (0x14) +#define ACE_SECCNTCMD_RESET (0x0100) +#define ACE_SECCNTCMD_IDENTIFY (0x0200) +#define ACE_SECCNTCMD_READ_DATA (0x0300) +#define ACE_SECCNTCMD_WRITE_DATA (0x0400) +#define ACE_SECCNTCMD_ABORT (0x0600) + +#define ACE_VERSION (0x16) +#define ACE_VERSION_REVISION_MASK (0x00FF) +#define ACE_VERSION_MINOR_MASK (0x0F00) +#define ACE_VERSION_MAJOR_MASK (0xF000) + +#define ACE_CTRL (0x18) +#define ACE_CTRL_FORCELOCKREQ (0x0001) +#define ACE_CTRL_LOCKREQ (0x0002) +#define ACE_CTRL_FORCECFGADDR (0x0004) +#define ACE_CTRL_FORCECFGMODE (0x0008) +#define ACE_CTRL_CFGMODE (0x0010) +#define ACE_CTRL_CFGSTART (0x0020) +#define ACE_CTRL_CFGSEL (0x0040) +#define ACE_CTRL_CFGRESET (0x0080) +#define ACE_CTRL_DATABUFRDYIRQ (0x0100) +#define ACE_CTRL_ERRORIRQ (0x0200) +#define ACE_CTRL_CFGDONEIRQ (0x0400) +#define ACE_CTRL_RESETIRQ (0x0800) +#define ACE_CTRL_CFGPROG (0x1000) +#define ACE_CTRL_CFGADDR_MASK (0xe000) + +#define ACE_FATSTAT (0x1c) + +#define ACE_NUM_MINORS 16 +#define ACE_SECTOR_SIZE (512) +#define ACE_FIFO_SIZE (32) +#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE) + +#define ACE_BUS_WIDTH_8 0 +#define ACE_BUS_WIDTH_16 1 + +struct ace_reg_ops; + +struct ace_device { + /* driver state data */ + int id; + int media_change; + int users; + struct list_head list; + + /* finite state machine data */ + struct tasklet_struct fsm_tasklet; + uint fsm_task; /* Current activity (ACE_TASK_*) */ + uint fsm_state; /* Current state (ACE_FSM_STATE_*) */ + uint fsm_continue_flag; /* cleared to exit FSM mainloop */ + uint fsm_iter_num; + struct timer_list stall_timer; + + /* Transfer state/result, use for both id and block request */ + struct request *req; /* request being processed */ + void *data_ptr; /* pointer to I/O buffer */ + int data_count; /* number of buffers remaining */ + int data_result; /* Result of transfer; 0 := success */ + + int id_req_count; /* count of id requests */ + int id_result; + struct completion id_completion; /* used when id req finishes */ + int in_irq; + + /* Details of hardware device */ + resource_size_t physaddr; + void __iomem *baseaddr; + int irq; + int bus_width; /* 0 := 8 bit; 1 := 16 bit */ + struct ace_reg_ops *reg_ops; + int lock_count; + + /* Block device data structures */ + spinlock_t lock; + struct device *dev; + struct request_queue *queue; + struct gendisk *gd; + + /* Inserted CF card parameters */ + struct hd_driveid cf_id; +}; + +static int ace_major; + +/* --------------------------------------------------------------------- + * Low level register access + */ + +struct ace_reg_ops { + u16(*in) (struct ace_device * ace, int reg); + void (*out) (struct ace_device * ace, int reg, u16 val); + void (*datain) (struct ace_device * ace); + void (*dataout) (struct ace_device * ace); +}; + +/* 8 Bit bus width */ +static u16 ace_in_8(struct ace_device *ace, int reg) +{ + void __iomem *r = ace->baseaddr + reg; + return in_8(r) | (in_8(r + 1) << 8); +} + +static void ace_out_8(struct ace_device *ace, int reg, u16 val) +{ + void __iomem *r = ace->baseaddr + reg; + out_8(r, val); + out_8(r + 1, val >> 8); +} + +static void ace_datain_8(struct ace_device *ace) +{ + void __iomem *r = ace->baseaddr + 0x40; + u8 *dst = ace->data_ptr; + int i = ACE_FIFO_SIZE; + while (i--) + *dst++ = in_8(r++); + ace->data_ptr = dst; +} + +static void ace_dataout_8(struct ace_device *ace) +{ + void __iomem *r = ace->baseaddr + 0x40; + u8 *src = ace->data_ptr; + int i = ACE_FIFO_SIZE; + while (i--) + out_8(r++, *src++); + ace->data_ptr = src; +} + +static struct ace_reg_ops ace_reg_8_ops = { + .in = ace_in_8, + .out = ace_out_8, + .datain = ace_datain_8, + .dataout = ace_dataout_8, +}; + +/* 16 bit big endian bus attachment */ +static u16 ace_in_be16(struct ace_device *ace, int reg) +{ + return in_be16(ace->baseaddr + reg); +} + +static void ace_out_be16(struct ace_device *ace, int reg, u16 val) +{ + out_be16(ace->baseaddr + reg, val); +} + +static void ace_datain_be16(struct ace_device *ace) +{ + int i = ACE_FIFO_SIZE / 2; + u16 *dst = ace->data_ptr; + while (i--) + *dst++ = in_le16(ace->baseaddr + 0x40); + ace->data_ptr = dst; +} + +static void ace_dataout_be16(struct ace_device *ace) +{ + int i = ACE_FIFO_SIZE / 2; + u16 *src = ace->data_ptr; + while (i--) + out_le16(ace->baseaddr + 0x40, *src++); + ace->data_ptr = src; +} + +/* 16 bit little endian bus attachment */ +static u16 ace_in_le16(struct ace_device *ace, int reg) +{ + return in_le16(ace->baseaddr + reg); +} + +static void ace_out_le16(struct ace_device *ace, int reg, u16 val) +{ + out_le16(ace->baseaddr + reg, val); +} + +static void ace_datain_le16(struct ace_device *ace) +{ + int i = ACE_FIFO_SIZE / 2; + u16 *dst = ace->data_ptr; + while (i--) + *dst++ = in_be16(ace->baseaddr + 0x40); + ace->data_ptr = dst; +} + +static void ace_dataout_le16(struct ace_device *ace) +{ + int i = ACE_FIFO_SIZE / 2; + u16 *src = ace->data_ptr; + while (i--) + out_be16(ace->baseaddr + 0x40, *src++); + ace->data_ptr = src; +} + +static struct ace_reg_ops ace_reg_be16_ops = { + .in = ace_in_be16, + .out = ace_out_be16, + .datain = ace_datain_be16, + .dataout = ace_dataout_be16, +}; + +static struct ace_reg_ops ace_reg_le16_ops = { + .in = ace_in_le16, + .out = ace_out_le16, + .datain = ace_datain_le16, + .dataout = ace_dataout_le16, +}; + +static inline u16 ace_in(struct ace_device *ace, int reg) +{ + return ace->reg_ops->in(ace, reg); +} + +static inline u32 ace_in32(struct ace_device *ace, int reg) +{ + return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16); +} + +static inline void ace_out(struct ace_device *ace, int reg, u16 val) +{ + ace->reg_ops->out(ace, reg, val); +} + +static inline void ace_out32(struct ace_device *ace, int reg, u32 val) +{ + ace_out(ace, reg, val); + ace_out(ace, reg + 2, val >> 16); +} + +/* --------------------------------------------------------------------- + * Debug support functions + */ + +#if defined(DEBUG) +static void ace_dump_mem(void *base, int len) +{ + const char *ptr = base; + int i, j; + + for (i = 0; i < len; i += 16) { + printk(KERN_INFO "%.8x:", i); + for (j = 0; j < 16; j++) { + if (!(j % 4)) + printk(" "); + printk("%.2x", ptr[i + j]); + } + printk(" "); + for (j = 0; j < 16; j++) + printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.'); + printk("\n"); + } +} +#else +static inline void ace_dump_mem(void *base, int len) +{ +} +#endif + +static void ace_dump_regs(struct ace_device *ace) +{ + dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n" + KERN_INFO " status:%.8x mpu_lba:%.8x busmode:%4x\n" + KERN_INFO " error: %.8x cfg_lba:%.8x fatstat:%.4x\n", + ace_in32(ace, ACE_CTRL), + ace_in(ace, ACE_SECCNTCMD), + ace_in(ace, ACE_VERSION), + ace_in32(ace, ACE_STATUS), + ace_in32(ace, ACE_MPULBA), + ace_in(ace, ACE_BUSMODE), + ace_in32(ace, ACE_ERROR), + ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT)); +} + +void ace_fix_driveid(struct hd_driveid *id) +{ +#if defined(__BIG_ENDIAN) + u16 *buf = (void *)id; + int i; + + /* All half words have wrong byte order; swap the bytes */ + for (i = 0; i < sizeof(struct hd_driveid); i += 2, buf++) + *buf = le16_to_cpu(*buf); + + /* Some of the data values are 32bit; swap the half words */ + id->lba_capacity = ((id->lba_capacity >> 16) & 0x0000FFFF) | + ((id->lba_capacity << 16) & 0xFFFF0000); + id->spg = ((id->spg >> 16) & 0x0000FFFF) | + ((id->spg << 16) & 0xFFFF0000); +#endif +} + +/* --------------------------------------------------------------------- + * Finite State Machine (FSM) implementation + */ + +/* FSM tasks; used to direct state transitions */ +#define ACE_TASK_IDLE 0 +#define ACE_TASK_IDENTIFY 1 +#define ACE_TASK_READ 2 +#define ACE_TASK_WRITE 3 +#define ACE_FSM_NUM_TASKS 4 + +/* FSM state definitions */ +#define ACE_FSM_STATE_IDLE 0 +#define ACE_FSM_STATE_REQ_LOCK 1 +#define ACE_FSM_STATE_WAIT_LOCK 2 +#define ACE_FSM_STATE_WAIT_CFREADY 3 +#define ACE_FSM_STATE_IDENTIFY_PREPARE 4 +#define ACE_FSM_STATE_IDENTIFY_TRANSFER 5 +#define ACE_FSM_STATE_IDENTIFY_COMPLETE 6 +#define ACE_FSM_STATE_REQ_PREPARE 7 +#define ACE_FSM_STATE_REQ_TRANSFER 8 +#define ACE_FSM_STATE_REQ_COMPLETE 9 +#define ACE_FSM_STATE_ERROR 10 +#define ACE_FSM_NUM_STATES 11 + +/* Set flag to exit FSM loop and reschedule tasklet */ +static inline void ace_fsm_yield(struct ace_device *ace) +{ + dev_dbg(ace->dev, "ace_fsm_yield()\n"); + tasklet_schedule(&ace->fsm_tasklet); + ace->fsm_continue_flag = 0; +} + +/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */ +static inline void ace_fsm_yieldirq(struct ace_device *ace) +{ + dev_dbg(ace->dev, "ace_fsm_yieldirq()\n"); + + if (ace->irq == NO_IRQ) + /* No IRQ assigned, so need to poll */ + tasklet_schedule(&ace->fsm_tasklet); + ace->fsm_continue_flag = 0; +} + +/* Get the next read/write request; ending requests that we don't handle */ +struct request *ace_get_next_request(struct request_queue * q) +{ + struct request *req; + + while ((req = elv_next_request(q)) != NULL) { + if (blk_fs_request(req)) + break; + end_request(req, 0); + } + return req; +} + +static void ace_fsm_dostate(struct ace_device *ace) +{ + struct request *req; + u32 status; + u16 val; + int count; + +#if defined(DEBUG) + dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n", + ace->fsm_state, ace->id_req_count); +#endif + + switch (ace->fsm_state) { + case ACE_FSM_STATE_IDLE: + /* See if there is anything to do */ + if (ace->id_req_count || ace_get_next_request(ace->queue)) { + ace->fsm_iter_num++; + ace->fsm_state = ACE_FSM_STATE_REQ_LOCK; + mod_timer(&ace->stall_timer, jiffies + HZ); + if (!timer_pending(&ace->stall_timer)) + add_timer(&ace->stall_timer); + break; + } + del_timer(&ace->stall_timer); + ace->fsm_continue_flag = 0; + break; + + case ACE_FSM_STATE_REQ_LOCK: + if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { + /* Already have the lock, jump to next state */ + ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; + break; + } + + /* Request the lock */ + val = ace_in(ace, ACE_CTRL); + ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ); + ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK; + break; + + case ACE_FSM_STATE_WAIT_LOCK: + if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { + /* got the lock; move to next state */ + ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; + break; + } + + /* wait a bit for the lock */ + ace_fsm_yield(ace); + break; + + case ACE_FSM_STATE_WAIT_CFREADY: + status = ace_in32(ace, ACE_STATUS); + if (!(status & ACE_STATUS_RDYFORCFCMD) || + (status & ACE_STATUS_CFBSY)) { + /* CF card isn't ready; it needs to be polled */ + ace_fsm_yield(ace); + break; + } + + /* Device is ready for command; determine what to do next */ + if (ace->id_req_count) + ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE; + else + ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE; + break; + + case ACE_FSM_STATE_IDENTIFY_PREPARE: + /* Send identify command */ + ace->fsm_task = ACE_TASK_IDENTIFY; + ace->data_ptr = &ace->cf_id; + ace->data_count = ACE_BUF_PER_SECTOR; + ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY); + + /* As per datasheet, put config controller in reset */ + val = ace_in(ace, ACE_CTRL); + ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); + + /* irq handler takes over from this point; wait for the + * transfer to complete */ + ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER; + ace_fsm_yieldirq(ace); + break; + + case ACE_FSM_STATE_IDENTIFY_TRANSFER: + /* Check that the sysace is ready to receive data */ + status = ace_in32(ace, ACE_STATUS); + if (status & ACE_STATUS_CFBSY) { + dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n", + ace->fsm_task, ace->fsm_iter_num, + ace->data_count); + ace_fsm_yield(ace); + break; + } + if (!(status & ACE_STATUS_DATABUFRDY)) { + ace_fsm_yield(ace); + break; + } + + /* Transfer the next buffer */ + ace->reg_ops->datain(ace); + ace->data_count--; + + /* If there are still buffers to be transfers; jump out here */ + if (ace->data_count != 0) { + ace_fsm_yieldirq(ace); + break; + } + + /* transfer finished; kick state machine */ + dev_dbg(ace->dev, "identify finished\n"); + ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE; + break; + + case ACE_FSM_STATE_IDENTIFY_COMPLETE: + ace_fix_driveid(&ace->cf_id); + ace_dump_mem(&ace->cf_id, 512); /* Debug: Dump out disk ID */ + + if (ace->data_result) { + /* Error occured, disable the disk */ + ace->media_change = 1; + set_capacity(ace->gd, 0); + dev_err(ace->dev, "error fetching CF id (%i)\n", + ace->data_result); + } else { + ace->media_change = 0; + + /* Record disk parameters */ + set_capacity(ace->gd, ace->cf_id.lba_capacity); + dev_info(ace->dev, "capacity: %i sectors\n", + ace->cf_id.lba_capacity); + } + + /* We're done, drop to IDLE state and notify waiters */ + ace->fsm_state = ACE_FSM_STATE_IDLE; + ace->id_result = ace->data_result; + while (ace->id_req_count) { + complete(&ace->id_completion); + ace->id_req_count--; + } + break; + + case ACE_FSM_STATE_REQ_PREPARE: + req = ace_get_next_request(ace->queue); + if (!req) { + ace->fsm_state = ACE_FSM_STATE_IDLE; + break; + } + + /* Okay, it's a data request, set it up for transfer */ + dev_dbg(ace->dev, + "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n", + (unsigned long long) req->sector, req->hard_nr_sectors, + req->current_nr_sectors, rq_data_dir(req)); + + ace->req = req; + ace->data_ptr = req->buffer; + ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR; + ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF); + + count = req->hard_nr_sectors; + if (rq_data_dir(req)) { + /* Kick off write request */ + dev_dbg(ace->dev, "write data\n"); + ace->fsm_task = ACE_TASK_WRITE; + ace_out(ace, ACE_SECCNTCMD, + count | ACE_SECCNTCMD_WRITE_DATA); + } else { + /* Kick off read request */ + dev_dbg(ace->dev, "read data\n"); + ace->fsm_task = ACE_TASK_READ; + ace_out(ace, ACE_SECCNTCMD, + count | ACE_SECCNTCMD_READ_DATA); + } + + /* As per datasheet, put config controller in reset */ + val = ace_in(ace, ACE_CTRL); + ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); + + /* Move to the transfer state. The systemace will raise + * an interrupt once there is something to do + */ + ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER; + if (ace->fsm_task == ACE_TASK_READ) + ace_fsm_yieldirq(ace); /* wait for data ready */ + break; + + case ACE_FSM_STATE_REQ_TRANSFER: + /* Check that the sysace is ready to receive data */ + status = ace_in32(ace, ACE_STATUS); + if (status & ACE_STATUS_CFBSY) { + dev_dbg(ace->dev, + "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", + ace->fsm_task, ace->fsm_iter_num, + ace->req->current_nr_sectors * 16, + ace->data_count, ace->in_irq); + ace_fsm_yield(ace); /* need to poll CFBSY bit */ + break; + } + if (!(status & ACE_STATUS_DATABUFRDY)) { + dev_dbg(ace->dev, + "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", + ace->fsm_task, ace->fsm_iter_num, + ace->req->current_nr_sectors * 16, + ace->data_count, ace->in_irq); + ace_fsm_yieldirq(ace); + break; + } + + /* Transfer the next buffer */ + if (ace->fsm_task == ACE_TASK_WRITE) + ace->reg_ops->dataout(ace); + else + ace->reg_ops->datain(ace); + ace->data_count--; + + /* If there are still buffers to be transfers; jump out here */ + if (ace->data_count != 0) { + ace_fsm_yieldirq(ace); + break; + } + + /* bio finished; is there another one? */ + if (__blk_end_request(ace->req, 0, + blk_rq_cur_bytes(ace->req))) { + /* dev_dbg(ace->dev, "next block; h=%li c=%i\n", + * ace->req->hard_nr_sectors, + * ace->req->current_nr_sectors); + */ + ace->data_ptr = ace->req->buffer; + ace->data_count = ace->req->current_nr_sectors * 16; + ace_fsm_yieldirq(ace); + break; + } + + ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE; + break; + + case ACE_FSM_STATE_REQ_COMPLETE: + ace->req = NULL; + + /* Finished request; go to idle state */ + ace->fsm_state = ACE_FSM_STATE_IDLE; + break; + + default: + ace->fsm_state = ACE_FSM_STATE_IDLE; + break; + } +} + +static void ace_fsm_tasklet(unsigned long data) +{ + struct ace_device *ace = (void *)data; + unsigned long flags; + + spin_lock_irqsave(&ace->lock, flags); + + /* Loop over state machine until told to stop */ + ace->fsm_continue_flag = 1; + while (ace->fsm_continue_flag) + ace_fsm_dostate(ace); + + spin_unlock_irqrestore(&ace->lock, flags); +} + +static void ace_stall_timer(unsigned long data) +{ + struct ace_device *ace = (void *)data; + unsigned long flags; + + dev_warn(ace->dev, + "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n", + ace->fsm_state, ace->fsm_task, ace->fsm_iter_num, + ace->data_count); + spin_lock_irqsave(&ace->lock, flags); + + /* Rearm the stall timer *before* entering FSM (which may then + * delete the timer) */ + mod_timer(&ace->stall_timer, jiffies + HZ); + + /* Loop over state machine until told to stop */ + ace->fsm_continue_flag = 1; + while (ace->fsm_continue_flag) + ace_fsm_dostate(ace); + + spin_unlock_irqrestore(&ace->lock, flags); +} + +/* --------------------------------------------------------------------- + * Interrupt handling routines + */ +static int ace_interrupt_checkstate(struct ace_device *ace) +{ + u32 sreg = ace_in32(ace, ACE_STATUS); + u16 creg = ace_in(ace, ACE_CTRL); + + /* Check for error occurance */ + if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) && + (creg & ACE_CTRL_ERRORIRQ)) { + dev_err(ace->dev, "transfer failure\n"); + ace_dump_regs(ace); + return -EIO; + } + + return 0; +} + +static irqreturn_t ace_interrupt(int irq, void *dev_id) +{ + u16 creg; + struct ace_device *ace = dev_id; + + /* be safe and get the lock */ + spin_lock(&ace->lock); + ace->in_irq = 1; + + /* clear the interrupt */ + creg = ace_in(ace, ACE_CTRL); + ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ); + ace_out(ace, ACE_CTRL, creg); + + /* check for IO failures */ + if (ace_interrupt_checkstate(ace)) + ace->data_result = -EIO; + + if (ace->fsm_task == 0) { + dev_err(ace->dev, + "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n", + ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL), + ace_in(ace, ACE_SECCNTCMD)); + dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n", + ace->fsm_task, ace->fsm_state, ace->data_count); + } + + /* Loop over state machine until told to stop */ + ace->fsm_continue_flag = 1; + while (ace->fsm_continue_flag) + ace_fsm_dostate(ace); + + /* done with interrupt; drop the lock */ + ace->in_irq = 0; + spin_unlock(&ace->lock); + + return IRQ_HANDLED; +} + +/* --------------------------------------------------------------------- + * Block ops + */ +static void ace_request(struct request_queue * q) +{ + struct request *req; + struct ace_device *ace; + + req = ace_get_next_request(q); + + if (req) { + ace = req->rq_disk->private_data; + tasklet_schedule(&ace->fsm_tasklet); + } +} + +static int ace_media_changed(struct gendisk *gd) +{ + struct ace_device *ace = gd->private_data; + dev_dbg(ace->dev, "ace_media_changed(): %i\n", ace->media_change); + + return ace->media_change; +} + +static int ace_revalidate_disk(struct gendisk *gd) +{ + struct ace_device *ace = gd->private_data; + unsigned long flags; + + dev_dbg(ace->dev, "ace_revalidate_disk()\n"); + + if (ace->media_change) { + dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n"); + + spin_lock_irqsave(&ace->lock, flags); + ace->id_req_count++; + spin_unlock_irqrestore(&ace->lock, flags); + + tasklet_schedule(&ace->fsm_tasklet); + wait_for_completion(&ace->id_completion); + } + + dev_dbg(ace->dev, "revalidate complete\n"); + return ace->id_result; +} + +static int ace_open(struct block_device *bdev, fmode_t mode) +{ + struct ace_device *ace = bdev->bd_disk->private_data; + unsigned long flags; + + dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); + + spin_lock_irqsave(&ace->lock, flags); + ace->users++; + spin_unlock_irqrestore(&ace->lock, flags); + + check_disk_change(bdev); + return 0; +} + +static int ace_release(struct gendisk *disk, fmode_t mode) +{ + struct ace_device *ace = disk->private_data; + unsigned long flags; + u16 val; + + dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); + + spin_lock_irqsave(&ace->lock, flags); + ace->users--; + if (ace->users == 0) { + val = ace_in(ace, ACE_CTRL); + ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); + } + spin_unlock_irqrestore(&ace->lock, flags); + return 0; +} + +static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) +{ + struct ace_device *ace = bdev->bd_disk->private_data; + + dev_dbg(ace->dev, "ace_getgeo()\n"); + + geo->heads = ace->cf_id.heads; + geo->sectors = ace->cf_id.sectors; + geo->cylinders = ace->cf_id.cyls; + + return 0; +} + +static struct block_device_operations ace_fops = { + .owner = THIS_MODULE, + .open = ace_open, + .release = ace_release, + .media_changed = ace_media_changed, + .revalidate_disk = ace_revalidate_disk, + .getgeo = ace_getgeo, +}; + +/* -------------------------------------------------------------------- + * SystemACE device setup/teardown code + */ +static int __devinit ace_setup(struct ace_device *ace) +{ + u16 version; + u16 val; + int rc; + + dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace); + dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n", + (unsigned long long)ace->physaddr, ace->irq); + + spin_lock_init(&ace->lock); + init_completion(&ace->id_completion); + + /* + * Map the device + */ + ace->baseaddr = ioremap(ace->physaddr, 0x80); + if (!ace->baseaddr) + goto err_ioremap; + + /* + * Initialize the state machine tasklet and stall timer + */ + tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace); + setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace); + + /* + * Initialize the request queue + */ + ace->queue = blk_init_queue(ace_request, &ace->lock); + if (ace->queue == NULL) + goto err_blk_initq; + blk_queue_hardsect_size(ace->queue, 512); + + /* + * Allocate and initialize GD structure + */ + ace->gd = alloc_disk(ACE_NUM_MINORS); + if (!ace->gd) + goto err_alloc_disk; + + ace->gd->major = ace_major; + ace->gd->first_minor = ace->id * ACE_NUM_MINORS; + ace->gd->fops = &ace_fops; + ace->gd->queue = ace->queue; + ace->gd->private_data = ace; + snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); + + /* set bus width */ + if (ace->bus_width == ACE_BUS_WIDTH_16) { + /* 0x0101 should work regardless of endianess */ + ace_out_le16(ace, ACE_BUSMODE, 0x0101); + + /* read it back to determine endianess */ + if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001) + ace->reg_ops = &ace_reg_le16_ops; + else + ace->reg_ops = &ace_reg_be16_ops; + } else { + ace_out_8(ace, ACE_BUSMODE, 0x00); + ace->reg_ops = &ace_reg_8_ops; + } + + /* Make sure version register is sane */ + version = ace_in(ace, ACE_VERSION); + if ((version == 0) || (version == 0xFFFF)) + goto err_read; + + /* Put sysace in a sane state by clearing most control reg bits */ + ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE | + ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); + + /* Now we can hook up the irq handler */ + if (ace->irq != NO_IRQ) { + rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace); + if (rc) { + /* Failure - fall back to polled mode */ + dev_err(ace->dev, "request_irq failed\n"); + ace->irq = NO_IRQ; + } + } + + /* Enable interrupts */ + val = ace_in(ace, ACE_CTRL); + val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ; + ace_out(ace, ACE_CTRL, val); + + /* Print the identification */ + dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n", + (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff); + dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n", + (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq); + + ace->media_change = 1; + ace_revalidate_disk(ace->gd); + + /* Make the sysace device 'live' */ + add_disk(ace->gd); + + return 0; + +err_read: + put_disk(ace->gd); +err_alloc_disk: + blk_cleanup_queue(ace->queue); +err_blk_initq: + iounmap(ace->baseaddr); +err_ioremap: + dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n", + (unsigned long long) ace->physaddr); + return -ENOMEM; +} + +static void __devexit ace_teardown(struct ace_device *ace) +{ + if (ace->gd) { + del_gendisk(ace->gd); + put_disk(ace->gd); + } + + if (ace->queue) + blk_cleanup_queue(ace->queue); + + tasklet_kill(&ace->fsm_tasklet); + + if (ace->irq != NO_IRQ) + free_irq(ace->irq, ace); + + iounmap(ace->baseaddr); +} + +static int __devinit +ace_alloc(struct device *dev, int id, resource_size_t physaddr, + int irq, int bus_width) +{ + struct ace_device *ace; + int rc; + dev_dbg(dev, "ace_alloc(%p)\n", dev); + + if (!physaddr) { + rc = -ENODEV; + goto err_noreg; + } + + /* Allocate and initialize the ace device structure */ + ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL); + if (!ace) { + rc = -ENOMEM; + goto err_alloc; + } + + ace->dev = dev; + ace->id = id; + ace->physaddr = physaddr; + ace->irq = irq; + ace->bus_width = bus_width; + + /* Call the setup code */ + rc = ace_setup(ace); + if (rc) + goto err_setup; + + dev_set_drvdata(dev, ace); + return 0; + +err_setup: + dev_set_drvdata(dev, NULL); + kfree(ace); +err_alloc: +err_noreg: + dev_err(dev, "could not initialize device, err=%i\n", rc); + return rc; +} + +static void __devexit ace_free(struct device *dev) +{ + struct ace_device *ace = dev_get_drvdata(dev); + dev_dbg(dev, "ace_free(%p)\n", dev); + + if (ace) { + ace_teardown(ace); + dev_set_drvdata(dev, NULL); + kfree(ace); + } +} + +/* --------------------------------------------------------------------- + * Platform Bus Support + */ + +static int __devinit ace_probe(struct platform_device *dev) +{ + resource_size_t physaddr = 0; + int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */ + int id = dev->id; + int irq = NO_IRQ; + int i; + + dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); + + for (i = 0; i < dev->num_resources; i++) { + if (dev->resource[i].flags & IORESOURCE_MEM) + physaddr = dev->resource[i].start; + if (dev->resource[i].flags & IORESOURCE_IRQ) + irq = dev->resource[i].start; + } + + /* Call the bus-independant setup code */ + return ace_alloc(&dev->dev, id, physaddr, irq, bus_width); +} + +/* + * Platform bus remove() method + */ +static int __devexit ace_remove(struct platform_device *dev) +{ + ace_free(&dev->dev); + return 0; +} + +static struct platform_driver ace_platform_driver = { + .probe = ace_probe, + .remove = __devexit_p(ace_remove), + .driver = { + .owner = THIS_MODULE, + .name = "xsysace", + }, +}; + +/* --------------------------------------------------------------------- + * OF_Platform Bus Support + */ + +#if defined(CONFIG_OF) +static int __devinit +ace_of_probe(struct of_device *op, const struct of_device_id *match) +{ + struct resource res; + resource_size_t physaddr; + const u32 *id; + int irq, bus_width, rc; + + dev_dbg(&op->dev, "ace_of_probe(%p, %p)\n", op, match); + + /* device id */ + id = of_get_property(op->node, "port-number", NULL); + + /* physaddr */ + rc = of_address_to_resource(op->node, 0, &res); + if (rc) { + dev_err(&op->dev, "invalid address\n"); + return rc; + } + physaddr = res.start; + + /* irq */ + irq = irq_of_parse_and_map(op->node, 0); + + /* bus width */ + bus_width = ACE_BUS_WIDTH_16; + if (of_find_property(op->node, "8-bit", NULL)) + bus_width = ACE_BUS_WIDTH_8; + + /* Call the bus-independant setup code */ + return ace_alloc(&op->dev, id ? *id : 0, physaddr, irq, bus_width); +} + +static int __devexit ace_of_remove(struct of_device *op) +{ + ace_free(&op->dev); + return 0; +} + +/* Match table for of_platform binding */ +static struct of_device_id ace_of_match[] __devinitdata = { + { .compatible = "xlnx,opb-sysace-1.00.b", }, + { .compatible = "xlnx,opb-sysace-1.00.c", }, + { .compatible = "xlnx,xps-sysace-1.00.a", }, + {}, +}; +MODULE_DEVICE_TABLE(of, ace_of_match); + +static struct of_platform_driver ace_of_driver = { + .owner = THIS_MODULE, + .name = "xsysace", + .match_table = ace_of_match, + .probe = ace_of_probe, + .remove = __devexit_p(ace_of_remove), + .driver = { + .name = "xsysace", + }, +}; + +/* Registration helpers to keep the number of #ifdefs to a minimum */ +static inline int __init ace_of_register(void) +{ + pr_debug("xsysace: registering OF binding\n"); + return of_register_platform_driver(&ace_of_driver); +} + +static inline void __exit ace_of_unregister(void) +{ + of_unregister_platform_driver(&ace_of_driver); +} +#else /* CONFIG_OF */ +/* CONFIG_OF not enabled; do nothing helpers */ +static inline int __init ace_of_register(void) { return 0; } +static inline void __exit ace_of_unregister(void) { } +#endif /* CONFIG_OF */ + +/* --------------------------------------------------------------------- + * Module init/exit routines + */ +static int __init ace_init(void) +{ + int rc; + + ace_major = register_blkdev(ace_major, "xsysace"); + if (ace_major <= 0) { + rc = -ENOMEM; + goto err_blk; + } + + rc = ace_of_register(); + if (rc) + goto err_of; + + pr_debug("xsysace: registering platform binding\n"); + rc = platform_driver_register(&ace_platform_driver); + if (rc) + goto err_plat; + + pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major); + return 0; + +err_plat: + ace_of_unregister(); +err_of: + unregister_blkdev(ace_major, "xsysace"); +err_blk: + printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc); + return rc; +} + +static void __exit ace_exit(void) +{ + pr_debug("Unregistering Xilinx SystemACE driver\n"); + platform_driver_unregister(&ace_platform_driver); + ace_of_unregister(); + unregister_blkdev(ace_major, "xsysace"); +} + +module_init(ace_init); +module_exit(ace_exit); diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c new file mode 100644 index 0000000..80754cd --- /dev/null +++ b/drivers/block/z2ram.c @@ -0,0 +1,406 @@ +/* +** z2ram - Amiga pseudo-driver to access 16bit-RAM in ZorroII space +** as a block device, to be used as a RAM disk or swap space +** +** Copyright (C) 1994 by Ingo Wilken (Ingo.Wilken@informatik.uni-oldenburg.de) +** +** ++Geert: support for zorro_unused_z2ram, better range checking +** ++roman: translate accesses via an array +** ++Milan: support for ChipRAM usage +** ++yambo: converted to 2.0 kernel +** ++yambo: modularized and support added for 3 minor devices including: +** MAJOR MINOR DESCRIPTION +** ----- ----- ---------------------------------------------- +** 37 0 Use Zorro II and Chip ram +** 37 1 Use only Zorro II ram +** 37 2 Use only Chip ram +** 37 4-7 Use memory list entry 1-4 (first is 0) +** ++jskov: support for 1-4th memory list entry. +** +** Permission to use, copy, modify, and distribute this software and its +** documentation for any purpose and without fee is hereby granted, provided +** that the above copyright notice appear in all copies and that both that +** copyright notice and this permission notice appear in supporting +** documentation. This software is provided "as is" without express or +** implied warranty. +*/ + +#define DEVICE_NAME "Z2RAM" + +#include <linux/major.h> +#include <linux/vmalloc.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <linux/bitops.h> + +#include <asm/setup.h> +#include <asm/amigahw.h> +#include <asm/pgtable.h> + +#include <linux/zorro.h> + + +extern int m68k_realnum_memory; +extern struct mem_info m68k_memory[NUM_MEMINFO]; + +#define Z2MINOR_COMBINED (0) +#define Z2MINOR_Z2ONLY (1) +#define Z2MINOR_CHIPONLY (2) +#define Z2MINOR_MEMLIST1 (4) +#define Z2MINOR_MEMLIST2 (5) +#define Z2MINOR_MEMLIST3 (6) +#define Z2MINOR_MEMLIST4 (7) +#define Z2MINOR_COUNT (8) /* Move this down when adding a new minor */ + +#define Z2RAM_CHUNK1024 ( Z2RAM_CHUNKSIZE >> 10 ) + +static u_long *z2ram_map = NULL; +static u_long z2ram_size = 0; +static int z2_count = 0; +static int chip_count = 0; +static int list_count = 0; +static int current_device = -1; + +static DEFINE_SPINLOCK(z2ram_lock); + +static struct block_device_operations z2_fops; +static struct gendisk *z2ram_gendisk; + +static void do_z2_request(struct request_queue *q) +{ + struct request *req; + while ((req = elv_next_request(q)) != NULL) { + unsigned long start = req->sector << 9; + unsigned long len = req->current_nr_sectors << 9; + + if (start + len > z2ram_size) { + printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", + req->sector, req->current_nr_sectors); + end_request(req, 0); + continue; + } + while (len) { + unsigned long addr = start & Z2RAM_CHUNKMASK; + unsigned long size = Z2RAM_CHUNKSIZE - addr; + if (len < size) + size = len; + addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ]; + if (rq_data_dir(req) == READ) + memcpy(req->buffer, (char *)addr, size); + else + memcpy((char *)addr, req->buffer, size); + start += size; + len -= size; + } + end_request(req, 1); + } +} + +static void +get_z2ram( void ) +{ + int i; + + for ( i = 0; i < Z2RAM_SIZE / Z2RAM_CHUNKSIZE; i++ ) + { + if ( test_bit( i, zorro_unused_z2ram ) ) + { + z2_count++; + z2ram_map[ z2ram_size++ ] = + ZTWO_VADDR( Z2RAM_START ) + ( i << Z2RAM_CHUNKSHIFT ); + clear_bit( i, zorro_unused_z2ram ); + } + } + + return; +} + +static void +get_chipram( void ) +{ + + while ( amiga_chip_avail() > ( Z2RAM_CHUNKSIZE * 4 ) ) + { + chip_count++; + z2ram_map[ z2ram_size ] = + (u_long)amiga_chip_alloc( Z2RAM_CHUNKSIZE, "z2ram" ); + + if ( z2ram_map[ z2ram_size ] == 0 ) + { + break; + } + + z2ram_size++; + } + + return; +} + +static int z2_open(struct block_device *bdev, fmode_t mode) +{ + int device; + int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) * + sizeof( z2ram_map[0] ); + int max_chip_map = ( amiga_chip_size / Z2RAM_CHUNKSIZE ) * + sizeof( z2ram_map[0] ); + int rc = -ENOMEM; + + device = MINOR(bdev->bd_dev); + + if ( current_device != -1 && current_device != device ) + { + rc = -EBUSY; + goto err_out; + } + + if ( current_device == -1 ) + { + z2_count = 0; + chip_count = 0; + list_count = 0; + z2ram_size = 0; + + /* Use a specific list entry. */ + if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) { + int index = device - Z2MINOR_MEMLIST1 + 1; + unsigned long size, paddr, vaddr; + + if (index >= m68k_realnum_memory) { + printk( KERN_ERR DEVICE_NAME + ": no such entry in z2ram_map\n" ); + goto err_out; + } + + paddr = m68k_memory[index].addr; + size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE-1); + +#ifdef __powerpc__ + /* FIXME: ioremap doesn't build correct memory tables. */ + { + vfree(vmalloc (size)); + } + + vaddr = (unsigned long) __ioremap (paddr, size, + _PAGE_WRITETHRU); + +#else + vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size); +#endif + z2ram_map = + kmalloc((size/Z2RAM_CHUNKSIZE)*sizeof(z2ram_map[0]), + GFP_KERNEL); + if ( z2ram_map == NULL ) + { + printk( KERN_ERR DEVICE_NAME + ": cannot get mem for z2ram_map\n" ); + goto err_out; + } + + while (size) { + z2ram_map[ z2ram_size++ ] = vaddr; + size -= Z2RAM_CHUNKSIZE; + vaddr += Z2RAM_CHUNKSIZE; + list_count++; + } + + if ( z2ram_size != 0 ) + printk( KERN_INFO DEVICE_NAME + ": using %iK List Entry %d Memory\n", + list_count * Z2RAM_CHUNK1024, index ); + } else + + switch ( device ) + { + case Z2MINOR_COMBINED: + + z2ram_map = kmalloc( max_z2_map + max_chip_map, GFP_KERNEL ); + if ( z2ram_map == NULL ) + { + printk( KERN_ERR DEVICE_NAME + ": cannot get mem for z2ram_map\n" ); + goto err_out; + } + + get_z2ram(); + get_chipram(); + + if ( z2ram_size != 0 ) + printk( KERN_INFO DEVICE_NAME + ": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n", + z2_count * Z2RAM_CHUNK1024, + chip_count * Z2RAM_CHUNK1024, + ( z2_count + chip_count ) * Z2RAM_CHUNK1024 ); + + break; + + case Z2MINOR_Z2ONLY: + z2ram_map = kmalloc( max_z2_map, GFP_KERNEL ); + if ( z2ram_map == NULL ) + { + printk( KERN_ERR DEVICE_NAME + ": cannot get mem for z2ram_map\n" ); + goto err_out; + } + + get_z2ram(); + + if ( z2ram_size != 0 ) + printk( KERN_INFO DEVICE_NAME + ": using %iK of Zorro II RAM\n", + z2_count * Z2RAM_CHUNK1024 ); + + break; + + case Z2MINOR_CHIPONLY: + z2ram_map = kmalloc( max_chip_map, GFP_KERNEL ); + if ( z2ram_map == NULL ) + { + printk( KERN_ERR DEVICE_NAME + ": cannot get mem for z2ram_map\n" ); + goto err_out; + } + + get_chipram(); + + if ( z2ram_size != 0 ) + printk( KERN_INFO DEVICE_NAME + ": using %iK Chip RAM\n", + chip_count * Z2RAM_CHUNK1024 ); + + break; + + default: + rc = -ENODEV; + goto err_out; + + break; + } + + if ( z2ram_size == 0 ) + { + printk( KERN_NOTICE DEVICE_NAME + ": no unused ZII/Chip RAM found\n" ); + goto err_out_kfree; + } + + current_device = device; + z2ram_size <<= Z2RAM_CHUNKSHIFT; + set_capacity(z2ram_gendisk, z2ram_size >> 9); + } + + return 0; + +err_out_kfree: + kfree(z2ram_map); +err_out: + return rc; +} + +static int +z2_release(struct gendisk *disk, fmode_t mode) +{ + if ( current_device == -1 ) + return 0; + + /* + * FIXME: unmap memory + */ + + return 0; +} + +static struct block_device_operations z2_fops = +{ + .owner = THIS_MODULE, + .open = z2_open, + .release = z2_release, +}; + +static struct kobject *z2_find(dev_t dev, int *part, void *data) +{ + *part = 0; + return get_disk(z2ram_gendisk); +} + +static struct request_queue *z2_queue; + +static int __init +z2_init(void) +{ + int ret; + + if (!MACH_IS_AMIGA) + return -ENODEV; + + ret = -EBUSY; + if (register_blkdev(Z2RAM_MAJOR, DEVICE_NAME)) + goto err; + + ret = -ENOMEM; + z2ram_gendisk = alloc_disk(1); + if (!z2ram_gendisk) + goto out_disk; + + z2_queue = blk_init_queue(do_z2_request, &z2ram_lock); + if (!z2_queue) + goto out_queue; + + z2ram_gendisk->major = Z2RAM_MAJOR; + z2ram_gendisk->first_minor = 0; + z2ram_gendisk->fops = &z2_fops; + sprintf(z2ram_gendisk->disk_name, "z2ram"); + + z2ram_gendisk->queue = z2_queue; + add_disk(z2ram_gendisk); + blk_register_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT, THIS_MODULE, + z2_find, NULL, NULL); + + return 0; + +out_queue: + put_disk(z2ram_gendisk); +out_disk: + unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME); +err: + return ret; +} + +static void __exit z2_exit(void) +{ + int i, j; + blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256); + unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME); + del_gendisk(z2ram_gendisk); + put_disk(z2ram_gendisk); + blk_cleanup_queue(z2_queue); + + if ( current_device != -1 ) + { + i = 0; + + for ( j = 0 ; j < z2_count; j++ ) + { + set_bit( i++, zorro_unused_z2ram ); + } + + for ( j = 0 ; j < chip_count; j++ ) + { + if ( z2ram_map[ i ] ) + { + amiga_chip_free( (void *) z2ram_map[ i++ ] ); + } + } + + if ( z2ram_map != NULL ) + { + kfree( z2ram_map ); + } + } + + return; +} + +module_init(z2_init); +module_exit(z2_exit); +MODULE_LICENSE("GPL"); |