diff options
author | Timothy Pearson <tpearson@raptorengineering.com> | 2019-05-11 15:12:49 -0500 |
---|---|---|
committer | Timothy Pearson <tpearson@raptorengineering.com> | 2019-05-11 15:12:49 -0500 |
commit | 9e80202352dd49bdd9e67b8b906d86f058431505 (patch) | |
tree | 5673c17aad6e3833da8c4ff21b5a11f666ec9fbe /src/hw/ide | |
download | hqemu-9e80202352dd49bdd9e67b8b906d86f058431505.zip hqemu-9e80202352dd49bdd9e67b8b906d86f058431505.tar.gz |
Diffstat (limited to 'src/hw/ide')
-rw-r--r-- | src/hw/ide/Makefile.objs | 12 | ||||
-rw-r--r-- | src/hw/ide/ahci.c | 1817 | ||||
-rw-r--r-- | src/hw/ide/ahci.h | 405 | ||||
-rw-r--r-- | src/hw/ide/atapi.c | 1369 | ||||
-rw-r--r-- | src/hw/ide/cmd646.c | 434 | ||||
-rw-r--r-- | src/hw/ide/core.c | 2776 | ||||
-rw-r--r-- | src/hw/ide/ich.c | 190 | ||||
-rw-r--r-- | src/hw/ide/internal.h | 599 | ||||
-rw-r--r-- | src/hw/ide/isa.c | 134 | ||||
-rw-r--r-- | src/hw/ide/macio.c | 636 | ||||
-rw-r--r-- | src/hw/ide/microdrive.c | 637 | ||||
-rw-r--r-- | src/hw/ide/mmio.c | 183 | ||||
-rw-r--r-- | src/hw/ide/pci.c | 497 | ||||
-rw-r--r-- | src/hw/ide/pci.h | 76 | ||||
-rw-r--r-- | src/hw/ide/piix.c | 305 | ||||
-rw-r--r-- | src/hw/ide/qdev.c | 367 | ||||
-rw-r--r-- | src/hw/ide/via.c | 235 |
17 files changed, 10672 insertions, 0 deletions
diff --git a/src/hw/ide/Makefile.objs b/src/hw/ide/Makefile.objs new file mode 100644 index 0000000..729e9bd --- /dev/null +++ b/src/hw/ide/Makefile.objs @@ -0,0 +1,12 @@ +common-obj-$(CONFIG_IDE_CORE) += core.o atapi.o +common-obj-$(CONFIG_IDE_QDEV) += qdev.o +common-obj-$(CONFIG_IDE_PCI) += pci.o +common-obj-$(CONFIG_IDE_ISA) += isa.o +common-obj-$(CONFIG_IDE_PIIX) += piix.o +common-obj-$(CONFIG_IDE_CMD646) += cmd646.o +common-obj-$(CONFIG_IDE_MACIO) += macio.o +common-obj-$(CONFIG_IDE_MMIO) += mmio.o +common-obj-$(CONFIG_IDE_VIA) += via.o +common-obj-$(CONFIG_MICRODRIVE) += microdrive.o +common-obj-$(CONFIG_AHCI) += ahci.o +common-obj-$(CONFIG_AHCI) += ich.o diff --git a/src/hw/ide/ahci.c b/src/hw/ide/ahci.c new file mode 100644 index 0000000..cdc9299 --- /dev/null +++ b/src/hw/ide/ahci.c @@ -0,0 +1,1817 @@ +/* + * QEMU AHCI Emulation + * + * Copyright (c) 2010 qiaochong@loongson.cn + * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com> + * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de> + * Copyright (c) 2010 Alexander Graf <agraf@suse.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#include <hw/hw.h> +#include <hw/pci/msi.h> +#include <hw/i386/pc.h> +#include <hw/pci/pci.h> + +#include "qemu/error-report.h" +#include "sysemu/block-backend.h" +#include "sysemu/dma.h" +#include "internal.h" +#include <hw/ide/pci.h> +#include <hw/ide/ahci.h> + +#define DEBUG_AHCI 0 + +#define DPRINTF(port, fmt, ...) \ +do { \ + if (DEBUG_AHCI) { \ + fprintf(stderr, "ahci: %s: [%d] ", __func__, port); \ + fprintf(stderr, fmt, ## __VA_ARGS__); \ + } \ +} while (0) + +static void check_cmd(AHCIState *s, int port); +static int handle_cmd(AHCIState *s, int port, uint8_t slot); +static void ahci_reset_port(AHCIState *s, int port); +static bool ahci_write_fis_d2h(AHCIDevice *ad); +static void ahci_init_d2h(AHCIDevice *ad); +static int ahci_dma_prepare_buf(IDEDMA *dma, int32_t limit); +static bool ahci_map_clb_address(AHCIDevice *ad); +static bool ahci_map_fis_address(AHCIDevice *ad); +static void ahci_unmap_clb_address(AHCIDevice *ad); +static void ahci_unmap_fis_address(AHCIDevice *ad); + + +static uint32_t ahci_port_read(AHCIState *s, int port, int offset) +{ + uint32_t val; + AHCIPortRegs *pr; + pr = &s->dev[port].port_regs; + + switch (offset) { + case PORT_LST_ADDR: + val = pr->lst_addr; + break; + case PORT_LST_ADDR_HI: + val = pr->lst_addr_hi; + break; + case PORT_FIS_ADDR: + val = pr->fis_addr; + break; + case PORT_FIS_ADDR_HI: + val = pr->fis_addr_hi; + break; + case PORT_IRQ_STAT: + val = pr->irq_stat; + break; + case PORT_IRQ_MASK: + val = pr->irq_mask; + break; + case PORT_CMD: + val = pr->cmd; + break; + case PORT_TFDATA: + val = pr->tfdata; + break; + case PORT_SIG: + val = pr->sig; + break; + case PORT_SCR_STAT: + if (s->dev[port].port.ifs[0].blk) { + val = SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP | + SATA_SCR_SSTATUS_SPD_GEN1 | SATA_SCR_SSTATUS_IPM_ACTIVE; + } else { + val = SATA_SCR_SSTATUS_DET_NODEV; + } + break; + case PORT_SCR_CTL: + val = pr->scr_ctl; + break; + case PORT_SCR_ERR: + val = pr->scr_err; + break; + case PORT_SCR_ACT: + val = pr->scr_act; + break; + case PORT_CMD_ISSUE: + val = pr->cmd_issue; + break; + case PORT_RESERVED: + default: + val = 0; + } + DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val); + return val; + +} + +static void ahci_irq_raise(AHCIState *s, AHCIDevice *dev) +{ + DeviceState *dev_state = s->container; + PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state), + TYPE_PCI_DEVICE); + + DPRINTF(0, "raise irq\n"); + + if (pci_dev && msi_enabled(pci_dev)) { + msi_notify(pci_dev, 0); + } else { + qemu_irq_raise(s->irq); + } +} + +static void ahci_irq_lower(AHCIState *s, AHCIDevice *dev) +{ + DeviceState *dev_state = s->container; + PCIDevice *pci_dev = (PCIDevice *) object_dynamic_cast(OBJECT(dev_state), + TYPE_PCI_DEVICE); + + DPRINTF(0, "lower irq\n"); + + if (!pci_dev || !msi_enabled(pci_dev)) { + qemu_irq_lower(s->irq); + } +} + +static void ahci_check_irq(AHCIState *s) +{ + int i; + + DPRINTF(-1, "check irq %#x\n", s->control_regs.irqstatus); + + s->control_regs.irqstatus = 0; + for (i = 0; i < s->ports; i++) { + AHCIPortRegs *pr = &s->dev[i].port_regs; + if (pr->irq_stat & pr->irq_mask) { + s->control_regs.irqstatus |= (1 << i); + } + } + + if (s->control_regs.irqstatus && + (s->control_regs.ghc & HOST_CTL_IRQ_EN)) { + ahci_irq_raise(s, NULL); + } else { + ahci_irq_lower(s, NULL); + } +} + +static void ahci_trigger_irq(AHCIState *s, AHCIDevice *d, + int irq_type) +{ + DPRINTF(d->port_no, "trigger irq %#x -> %x\n", + irq_type, d->port_regs.irq_mask & irq_type); + + d->port_regs.irq_stat |= irq_type; + ahci_check_irq(s); +} + +static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr, + uint32_t wanted) +{ + hwaddr len = wanted; + + if (*ptr) { + dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); + } + + *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE); + if (len < wanted) { + dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); + *ptr = NULL; + } +} + +/** + * Check the cmd register to see if we should start or stop + * the DMA or FIS RX engines. + * + * @ad: Device to engage. + * @allow_stop: Allow device to transition from started to stopped? + * 'no' is useful for migration post_load, which does not expect a transition. + * + * @return 0 on success, -1 on error. + */ +static int ahci_cond_start_engines(AHCIDevice *ad, bool allow_stop) +{ + AHCIPortRegs *pr = &ad->port_regs; + + if (pr->cmd & PORT_CMD_START) { + if (ahci_map_clb_address(ad)) { + pr->cmd |= PORT_CMD_LIST_ON; + } else { + error_report("AHCI: Failed to start DMA engine: " + "bad command list buffer address"); + return -1; + } + } else if (pr->cmd & PORT_CMD_LIST_ON) { + if (allow_stop) { + ahci_unmap_clb_address(ad); + pr->cmd = pr->cmd & ~(PORT_CMD_LIST_ON); + } else { + error_report("AHCI: DMA engine should be off, " + "but appears to still be running"); + return -1; + } + } + + if (pr->cmd & PORT_CMD_FIS_RX) { + if (ahci_map_fis_address(ad)) { + pr->cmd |= PORT_CMD_FIS_ON; + } else { + error_report("AHCI: Failed to start FIS receive engine: " + "bad FIS receive buffer address"); + return -1; + } + } else if (pr->cmd & PORT_CMD_FIS_ON) { + if (allow_stop) { + ahci_unmap_fis_address(ad); + pr->cmd = pr->cmd & ~(PORT_CMD_FIS_ON); + } else { + error_report("AHCI: FIS receive engine should be off, " + "but appears to still be running"); + return -1; + } + } + + return 0; +} + +static void ahci_port_write(AHCIState *s, int port, int offset, uint32_t val) +{ + AHCIPortRegs *pr = &s->dev[port].port_regs; + + DPRINTF(port, "offset: 0x%x val: 0x%x\n", offset, val); + switch (offset) { + case PORT_LST_ADDR: + pr->lst_addr = val; + break; + case PORT_LST_ADDR_HI: + pr->lst_addr_hi = val; + break; + case PORT_FIS_ADDR: + pr->fis_addr = val; + break; + case PORT_FIS_ADDR_HI: + pr->fis_addr_hi = val; + break; + case PORT_IRQ_STAT: + pr->irq_stat &= ~val; + ahci_check_irq(s); + break; + case PORT_IRQ_MASK: + pr->irq_mask = val & 0xfdc000ff; + ahci_check_irq(s); + break; + case PORT_CMD: + /* Block any Read-only fields from being set; + * including LIST_ON and FIS_ON. + * The spec requires to set ICC bits to zero after the ICC change + * is done. We don't support ICC state changes, therefore always + * force the ICC bits to zero. + */ + pr->cmd = (pr->cmd & PORT_CMD_RO_MASK) | + (val & ~(PORT_CMD_RO_MASK|PORT_CMD_ICC_MASK)); + + /* Check FIS RX and CLB engines, allow transition to false: */ + ahci_cond_start_engines(&s->dev[port], true); + + /* XXX usually the FIS would be pending on the bus here and + issuing deferred until the OS enables FIS receival. + Instead, we only submit it once - which works in most + cases, but is a hack. */ + if ((pr->cmd & PORT_CMD_FIS_ON) && + !s->dev[port].init_d2h_sent) { + ahci_init_d2h(&s->dev[port]); + } + + check_cmd(s, port); + break; + case PORT_TFDATA: + /* Read Only. */ + break; + case PORT_SIG: + /* Read Only */ + break; + case PORT_SCR_STAT: + /* Read Only */ + break; + case PORT_SCR_CTL: + if (((pr->scr_ctl & AHCI_SCR_SCTL_DET) == 1) && + ((val & AHCI_SCR_SCTL_DET) == 0)) { + ahci_reset_port(s, port); + } + pr->scr_ctl = val; + break; + case PORT_SCR_ERR: + pr->scr_err &= ~val; + break; + case PORT_SCR_ACT: + /* RW1 */ + pr->scr_act |= val; + break; + case PORT_CMD_ISSUE: + pr->cmd_issue |= val; + check_cmd(s, port); + break; + default: + break; + } +} + +static uint64_t ahci_mem_read_32(void *opaque, hwaddr addr) +{ + AHCIState *s = opaque; + uint32_t val = 0; + + if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) { + switch (addr) { + case HOST_CAP: + val = s->control_regs.cap; + break; + case HOST_CTL: + val = s->control_regs.ghc; + break; + case HOST_IRQ_STAT: + val = s->control_regs.irqstatus; + break; + case HOST_PORTS_IMPL: + val = s->control_regs.impl; + break; + case HOST_VERSION: + val = s->control_regs.version; + break; + } + + DPRINTF(-1, "(addr 0x%08X), val 0x%08X\n", (unsigned) addr, val); + } else if ((addr >= AHCI_PORT_REGS_START_ADDR) && + (addr < (AHCI_PORT_REGS_START_ADDR + + (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) { + val = ahci_port_read(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7, + addr & AHCI_PORT_ADDR_OFFSET_MASK); + } + + return val; +} + + +/** + * AHCI 1.3 section 3 ("HBA Memory Registers") + * Support unaligned 8/16/32 bit reads, and 64 bit aligned reads. + * Caller is responsible for masking unwanted higher order bytes. + */ +static uint64_t ahci_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + hwaddr aligned = addr & ~0x3; + int ofst = addr - aligned; + uint64_t lo = ahci_mem_read_32(opaque, aligned); + uint64_t hi; + uint64_t val; + + /* if < 8 byte read does not cross 4 byte boundary */ + if (ofst + size <= 4) { + val = lo >> (ofst * 8); + } else { + g_assert_cmpint(size, >, 1); + + /* If the 64bit read is unaligned, we will produce undefined + * results. AHCI does not support unaligned 64bit reads. */ + hi = ahci_mem_read_32(opaque, aligned + 4); + val = (hi << 32 | lo) >> (ofst * 8); + } + + DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n", + addr, val, size); + return val; +} + + +static void ahci_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + AHCIState *s = opaque; + + DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n", + addr, val, size); + + /* Only aligned reads are allowed on AHCI */ + if (addr & 3) { + fprintf(stderr, "ahci: Mis-aligned write to addr 0x" + TARGET_FMT_plx "\n", addr); + return; + } + + if (addr < AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR) { + DPRINTF(-1, "(addr 0x%08X), val 0x%08"PRIX64"\n", (unsigned) addr, val); + + switch (addr) { + case HOST_CAP: /* R/WO, RO */ + /* FIXME handle R/WO */ + break; + case HOST_CTL: /* R/W */ + if (val & HOST_CTL_RESET) { + DPRINTF(-1, "HBA Reset\n"); + ahci_reset(s); + } else { + s->control_regs.ghc = (val & 0x3) | HOST_CTL_AHCI_EN; + ahci_check_irq(s); + } + break; + case HOST_IRQ_STAT: /* R/WC, RO */ + s->control_regs.irqstatus &= ~val; + ahci_check_irq(s); + break; + case HOST_PORTS_IMPL: /* R/WO, RO */ + /* FIXME handle R/WO */ + break; + case HOST_VERSION: /* RO */ + /* FIXME report write? */ + break; + default: + DPRINTF(-1, "write to unknown register 0x%x\n", (unsigned)addr); + } + } else if ((addr >= AHCI_PORT_REGS_START_ADDR) && + (addr < (AHCI_PORT_REGS_START_ADDR + + (s->ports * AHCI_PORT_ADDR_OFFSET_LEN)))) { + ahci_port_write(s, (addr - AHCI_PORT_REGS_START_ADDR) >> 7, + addr & AHCI_PORT_ADDR_OFFSET_MASK, val); + } + +} + +static const MemoryRegionOps ahci_mem_ops = { + .read = ahci_mem_read, + .write = ahci_mem_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t ahci_idp_read(void *opaque, hwaddr addr, + unsigned size) +{ + AHCIState *s = opaque; + + if (addr == s->idp_offset) { + /* index register */ + return s->idp_index; + } else if (addr == s->idp_offset + 4) { + /* data register - do memory read at location selected by index */ + return ahci_mem_read(opaque, s->idp_index, size); + } else { + return 0; + } +} + +static void ahci_idp_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + AHCIState *s = opaque; + + if (addr == s->idp_offset) { + /* index register - mask off reserved bits */ + s->idp_index = (uint32_t)val & ((AHCI_MEM_BAR_SIZE - 1) & ~3); + } else if (addr == s->idp_offset + 4) { + /* data register - do memory write at location selected by index */ + ahci_mem_write(opaque, s->idp_index, val, size); + } +} + +static const MemoryRegionOps ahci_idp_ops = { + .read = ahci_idp_read, + .write = ahci_idp_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + + +static void ahci_reg_init(AHCIState *s) +{ + int i; + + s->control_regs.cap = (s->ports - 1) | + (AHCI_NUM_COMMAND_SLOTS << 8) | + (AHCI_SUPPORTED_SPEED_GEN1 << AHCI_SUPPORTED_SPEED) | + HOST_CAP_NCQ | HOST_CAP_AHCI; + + s->control_regs.impl = (1 << s->ports) - 1; + + s->control_regs.version = AHCI_VERSION_1_0; + + for (i = 0; i < s->ports; i++) { + s->dev[i].port_state = STATE_RUN; + } +} + +static void check_cmd(AHCIState *s, int port) +{ + AHCIPortRegs *pr = &s->dev[port].port_regs; + uint8_t slot; + + if ((pr->cmd & PORT_CMD_START) && pr->cmd_issue) { + for (slot = 0; (slot < 32) && pr->cmd_issue; slot++) { + if ((pr->cmd_issue & (1U << slot)) && + !handle_cmd(s, port, slot)) { + pr->cmd_issue &= ~(1U << slot); + } + } + } +} + +static void ahci_check_cmd_bh(void *opaque) +{ + AHCIDevice *ad = opaque; + + qemu_bh_delete(ad->check_bh); + ad->check_bh = NULL; + + if ((ad->busy_slot != -1) && + !(ad->port.ifs[0].status & (BUSY_STAT|DRQ_STAT))) { + /* no longer busy */ + ad->port_regs.cmd_issue &= ~(1 << ad->busy_slot); + ad->busy_slot = -1; + } + + check_cmd(ad->hba, ad->port_no); +} + +static void ahci_init_d2h(AHCIDevice *ad) +{ + IDEState *ide_state = &ad->port.ifs[0]; + AHCIPortRegs *pr = &ad->port_regs; + + if (ad->init_d2h_sent) { + return; + } + + if (ahci_write_fis_d2h(ad)) { + ad->init_d2h_sent = true; + /* We're emulating receiving the first Reg H2D Fis from the device; + * Update the SIG register, but otherwise proceed as normal. */ + pr->sig = ((uint32_t)ide_state->hcyl << 24) | + (ide_state->lcyl << 16) | + (ide_state->sector << 8) | + (ide_state->nsector & 0xFF); + } +} + +static void ahci_set_signature(AHCIDevice *ad, uint32_t sig) +{ + IDEState *s = &ad->port.ifs[0]; + s->hcyl = sig >> 24 & 0xFF; + s->lcyl = sig >> 16 & 0xFF; + s->sector = sig >> 8 & 0xFF; + s->nsector = sig & 0xFF; + + DPRINTF(ad->port_no, "set hcyl:lcyl:sect:nsect = 0x%08x\n", sig); +} + +static void ahci_reset_port(AHCIState *s, int port) +{ + AHCIDevice *d = &s->dev[port]; + AHCIPortRegs *pr = &d->port_regs; + IDEState *ide_state = &d->port.ifs[0]; + int i; + + DPRINTF(port, "reset port\n"); + + ide_bus_reset(&d->port); + ide_state->ncq_queues = AHCI_MAX_CMDS; + + pr->scr_stat = 0; + pr->scr_err = 0; + pr->scr_act = 0; + pr->tfdata = 0x7F; + pr->sig = 0xFFFFFFFF; + d->busy_slot = -1; + d->init_d2h_sent = false; + + ide_state = &s->dev[port].port.ifs[0]; + if (!ide_state->blk) { + return; + } + + /* reset ncq queue */ + for (i = 0; i < AHCI_MAX_CMDS; i++) { + NCQTransferState *ncq_tfs = &s->dev[port].ncq_tfs[i]; + ncq_tfs->halt = false; + if (!ncq_tfs->used) { + continue; + } + + if (ncq_tfs->aiocb) { + blk_aio_cancel(ncq_tfs->aiocb); + ncq_tfs->aiocb = NULL; + } + + /* Maybe we just finished the request thanks to blk_aio_cancel() */ + if (!ncq_tfs->used) { + continue; + } + + qemu_sglist_destroy(&ncq_tfs->sglist); + ncq_tfs->used = 0; + } + + s->dev[port].port_state = STATE_RUN; + if (ide_state->drive_kind == IDE_CD) { + ahci_set_signature(d, SATA_SIGNATURE_CDROM);\ + ide_state->status = SEEK_STAT | WRERR_STAT | READY_STAT; + } else { + ahci_set_signature(d, SATA_SIGNATURE_DISK); + ide_state->status = SEEK_STAT | WRERR_STAT; + } + + ide_state->error = 1; + ahci_init_d2h(d); +} + +static void debug_print_fis(uint8_t *fis, int cmd_len) +{ +#if DEBUG_AHCI + int i; + + fprintf(stderr, "fis:"); + for (i = 0; i < cmd_len; i++) { + if ((i & 0xf) == 0) { + fprintf(stderr, "\n%02x:",i); + } + fprintf(stderr, "%02x ",fis[i]); + } + fprintf(stderr, "\n"); +#endif +} + +static bool ahci_map_fis_address(AHCIDevice *ad) +{ + AHCIPortRegs *pr = &ad->port_regs; + map_page(ad->hba->as, &ad->res_fis, + ((uint64_t)pr->fis_addr_hi << 32) | pr->fis_addr, 256); + return ad->res_fis != NULL; +} + +static void ahci_unmap_fis_address(AHCIDevice *ad) +{ + if (ad->res_fis == NULL) { + DPRINTF(ad->port_no, "Attempt to unmap NULL FIS address\n"); + return; + } + dma_memory_unmap(ad->hba->as, ad->res_fis, 256, + DMA_DIRECTION_FROM_DEVICE, 256); + ad->res_fis = NULL; +} + +static bool ahci_map_clb_address(AHCIDevice *ad) +{ + AHCIPortRegs *pr = &ad->port_regs; + ad->cur_cmd = NULL; + map_page(ad->hba->as, &ad->lst, + ((uint64_t)pr->lst_addr_hi << 32) | pr->lst_addr, 1024); + return ad->lst != NULL; +} + +static void ahci_unmap_clb_address(AHCIDevice *ad) +{ + if (ad->lst == NULL) { + DPRINTF(ad->port_no, "Attempt to unmap NULL CLB address\n"); + return; + } + dma_memory_unmap(ad->hba->as, ad->lst, 1024, + DMA_DIRECTION_FROM_DEVICE, 1024); + ad->lst = NULL; +} + +static void ahci_write_fis_sdb(AHCIState *s, NCQTransferState *ncq_tfs) +{ + AHCIDevice *ad = ncq_tfs->drive; + AHCIPortRegs *pr = &ad->port_regs; + IDEState *ide_state; + SDBFIS *sdb_fis; + + if (!ad->res_fis || + !(pr->cmd & PORT_CMD_FIS_RX)) { + return; + } + + sdb_fis = (SDBFIS *)&ad->res_fis[RES_FIS_SDBFIS]; + ide_state = &ad->port.ifs[0]; + + sdb_fis->type = SATA_FIS_TYPE_SDB; + /* Interrupt pending & Notification bit */ + sdb_fis->flags = 0x40; /* Interrupt bit, always 1 for NCQ */ + sdb_fis->status = ide_state->status & 0x77; + sdb_fis->error = ide_state->error; + /* update SAct field in SDB_FIS */ + sdb_fis->payload = cpu_to_le32(ad->finished); + + /* Update shadow registers (except BSY 0x80 and DRQ 0x08) */ + pr->tfdata = (ad->port.ifs[0].error << 8) | + (ad->port.ifs[0].status & 0x77) | + (pr->tfdata & 0x88); + pr->scr_act &= ~ad->finished; + ad->finished = 0; + + /* Trigger IRQ if interrupt bit is set (which currently, it always is) */ + if (sdb_fis->flags & 0x40) { + ahci_trigger_irq(s, ad, PORT_IRQ_SDB_FIS); + } +} + +static void ahci_write_fis_pio(AHCIDevice *ad, uint16_t len) +{ + AHCIPortRegs *pr = &ad->port_regs; + uint8_t *pio_fis; + IDEState *s = &ad->port.ifs[0]; + + if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { + return; + } + + pio_fis = &ad->res_fis[RES_FIS_PSFIS]; + + pio_fis[0] = SATA_FIS_TYPE_PIO_SETUP; + pio_fis[1] = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0); + pio_fis[2] = s->status; + pio_fis[3] = s->error; + + pio_fis[4] = s->sector; + pio_fis[5] = s->lcyl; + pio_fis[6] = s->hcyl; + pio_fis[7] = s->select; + pio_fis[8] = s->hob_sector; + pio_fis[9] = s->hob_lcyl; + pio_fis[10] = s->hob_hcyl; + pio_fis[11] = 0; + pio_fis[12] = s->nsector & 0xFF; + pio_fis[13] = (s->nsector >> 8) & 0xFF; + pio_fis[14] = 0; + pio_fis[15] = s->status; + pio_fis[16] = len & 255; + pio_fis[17] = len >> 8; + pio_fis[18] = 0; + pio_fis[19] = 0; + + /* Update shadow registers: */ + pr->tfdata = (ad->port.ifs[0].error << 8) | + ad->port.ifs[0].status; + + if (pio_fis[2] & ERR_STAT) { + ahci_trigger_irq(ad->hba, ad, PORT_IRQ_TF_ERR); + } + + ahci_trigger_irq(ad->hba, ad, PORT_IRQ_PIOS_FIS); +} + +static bool ahci_write_fis_d2h(AHCIDevice *ad) +{ + AHCIPortRegs *pr = &ad->port_regs; + uint8_t *d2h_fis; + int i; + IDEState *s = &ad->port.ifs[0]; + + if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) { + return false; + } + + d2h_fis = &ad->res_fis[RES_FIS_RFIS]; + + d2h_fis[0] = SATA_FIS_TYPE_REGISTER_D2H; + d2h_fis[1] = (ad->hba->control_regs.irqstatus ? (1 << 6) : 0); + d2h_fis[2] = s->status; + d2h_fis[3] = s->error; + + d2h_fis[4] = s->sector; + d2h_fis[5] = s->lcyl; + d2h_fis[6] = s->hcyl; + d2h_fis[7] = s->select; + d2h_fis[8] = s->hob_sector; + d2h_fis[9] = s->hob_lcyl; + d2h_fis[10] = s->hob_hcyl; + d2h_fis[11] = 0; + d2h_fis[12] = s->nsector & 0xFF; + d2h_fis[13] = (s->nsector >> 8) & 0xFF; + for (i = 14; i < 20; i++) { + d2h_fis[i] = 0; + } + + /* Update shadow registers: */ + pr->tfdata = (ad->port.ifs[0].error << 8) | + ad->port.ifs[0].status; + + if (d2h_fis[2] & ERR_STAT) { + ahci_trigger_irq(ad->hba, ad, PORT_IRQ_TF_ERR); + } + + ahci_trigger_irq(ad->hba, ad, PORT_IRQ_D2H_REG_FIS); + return true; +} + +static int prdt_tbl_entry_size(const AHCI_SG *tbl) +{ + /* flags_size is zero-based */ + return (le32_to_cpu(tbl->flags_size) & AHCI_PRDT_SIZE_MASK) + 1; +} + +/** + * Fetch entries in a guest-provided PRDT and convert it into a QEMU SGlist. + * @ad: The AHCIDevice for whom we are building the SGList. + * @sglist: The SGList target to add PRD entries to. + * @cmd: The AHCI Command Header that describes where the PRDT is. + * @limit: The remaining size of the S/ATA transaction, in bytes. + * @offset: The number of bytes already transferred, in bytes. + * + * The AHCI PRDT can describe up to 256GiB. S/ATA only support transactions of + * up to 32MiB as of ATA8-ACS3 rev 1b, assuming a 512 byte sector size. We stop + * building the sglist from the PRDT as soon as we hit @limit bytes, + * which is <= INT32_MAX/2GiB. + */ +static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, + AHCICmdHdr *cmd, int64_t limit, uint64_t offset) +{ + uint16_t opts = le16_to_cpu(cmd->opts); + uint16_t prdtl = le16_to_cpu(cmd->prdtl); + uint64_t cfis_addr = le64_to_cpu(cmd->tbl_addr); + uint64_t prdt_addr = cfis_addr + 0x80; + dma_addr_t prdt_len = (prdtl * sizeof(AHCI_SG)); + dma_addr_t real_prdt_len = prdt_len; + uint8_t *prdt; + int i; + int r = 0; + uint64_t sum = 0; + int off_idx = -1; + int64_t off_pos = -1; + int tbl_entry_size; + IDEBus *bus = &ad->port; + BusState *qbus = BUS(bus); + + if (!prdtl) { + DPRINTF(ad->port_no, "no sg list given by guest: 0x%08x\n", opts); + return -1; + } + + /* map PRDT */ + if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len, + DMA_DIRECTION_TO_DEVICE))){ + DPRINTF(ad->port_no, "map failed\n"); + return -1; + } + + if (prdt_len < real_prdt_len) { + DPRINTF(ad->port_no, "mapped less than expected\n"); + r = -1; + goto out; + } + + /* Get entries in the PRDT, init a qemu sglist accordingly */ + if (prdtl > 0) { + AHCI_SG *tbl = (AHCI_SG *)prdt; + sum = 0; + for (i = 0; i < prdtl; i++) { + tbl_entry_size = prdt_tbl_entry_size(&tbl[i]); + if (offset < (sum + tbl_entry_size)) { + off_idx = i; + off_pos = offset - sum; + break; + } + sum += tbl_entry_size; + } + if ((off_idx == -1) || (off_pos < 0) || (off_pos > tbl_entry_size)) { + DPRINTF(ad->port_no, "%s: Incorrect offset! " + "off_idx: %d, off_pos: %"PRId64"\n", + __func__, off_idx, off_pos); + r = -1; + goto out; + } + + qemu_sglist_init(sglist, qbus->parent, (prdtl - off_idx), + ad->hba->as); + qemu_sglist_add(sglist, le64_to_cpu(tbl[off_idx].addr) + off_pos, + MIN(prdt_tbl_entry_size(&tbl[off_idx]) - off_pos, + limit)); + + for (i = off_idx + 1; i < prdtl && sglist->size < limit; i++) { + qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr), + MIN(prdt_tbl_entry_size(&tbl[i]), + limit - sglist->size)); + } + } + +out: + dma_memory_unmap(ad->hba->as, prdt, prdt_len, + DMA_DIRECTION_TO_DEVICE, prdt_len); + return r; +} + +static void ncq_err(NCQTransferState *ncq_tfs) +{ + IDEState *ide_state = &ncq_tfs->drive->port.ifs[0]; + + ide_state->error = ABRT_ERR; + ide_state->status = READY_STAT | ERR_STAT; + ncq_tfs->drive->port_regs.scr_err |= (1 << ncq_tfs->tag); + ncq_tfs->used = 0; +} + +static void ncq_finish(NCQTransferState *ncq_tfs) +{ + /* If we didn't error out, set our finished bit. Errored commands + * do not get a bit set for the SDB FIS ACT register, nor do they + * clear the outstanding bit in scr_act (PxSACT). */ + if (!(ncq_tfs->drive->port_regs.scr_err & (1 << ncq_tfs->tag))) { + ncq_tfs->drive->finished |= (1 << ncq_tfs->tag); + } + + ahci_write_fis_sdb(ncq_tfs->drive->hba, ncq_tfs); + + DPRINTF(ncq_tfs->drive->port_no, "NCQ transfer tag %d finished\n", + ncq_tfs->tag); + + block_acct_done(blk_get_stats(ncq_tfs->drive->port.ifs[0].blk), + &ncq_tfs->acct); + qemu_sglist_destroy(&ncq_tfs->sglist); + ncq_tfs->used = 0; +} + +static void ncq_cb(void *opaque, int ret) +{ + NCQTransferState *ncq_tfs = (NCQTransferState *)opaque; + IDEState *ide_state = &ncq_tfs->drive->port.ifs[0]; + + if (ret == -ECANCELED) { + return; + } + + if (ret < 0) { + bool is_read = ncq_tfs->cmd == READ_FPDMA_QUEUED; + BlockErrorAction action = blk_get_error_action(ide_state->blk, + is_read, -ret); + if (action == BLOCK_ERROR_ACTION_STOP) { + ncq_tfs->halt = true; + ide_state->bus->error_status = IDE_RETRY_HBA; + } else if (action == BLOCK_ERROR_ACTION_REPORT) { + ncq_err(ncq_tfs); + } + blk_error_action(ide_state->blk, action, is_read, -ret); + } else { + ide_state->status = READY_STAT | SEEK_STAT; + } + + if (!ncq_tfs->halt) { + ncq_finish(ncq_tfs); + } +} + +static int is_ncq(uint8_t ata_cmd) +{ + /* Based on SATA 3.2 section 13.6.3.2 */ + switch (ata_cmd) { + case READ_FPDMA_QUEUED: + case WRITE_FPDMA_QUEUED: + case NCQ_NON_DATA: + case RECEIVE_FPDMA_QUEUED: + case SEND_FPDMA_QUEUED: + return 1; + default: + return 0; + } +} + +static void execute_ncq_command(NCQTransferState *ncq_tfs) +{ + AHCIDevice *ad = ncq_tfs->drive; + IDEState *ide_state = &ad->port.ifs[0]; + int port = ad->port_no; + + g_assert(is_ncq(ncq_tfs->cmd)); + ncq_tfs->halt = false; + + switch (ncq_tfs->cmd) { + case READ_FPDMA_QUEUED: + DPRINTF(port, "NCQ reading %d sectors from LBA %"PRId64", tag %d\n", + ncq_tfs->sector_count, ncq_tfs->lba, ncq_tfs->tag); + + DPRINTF(port, "tag %d aio read %"PRId64"\n", + ncq_tfs->tag, ncq_tfs->lba); + + dma_acct_start(ide_state->blk, &ncq_tfs->acct, + &ncq_tfs->sglist, BLOCK_ACCT_READ); + ncq_tfs->aiocb = dma_blk_read(ide_state->blk, &ncq_tfs->sglist, + ncq_tfs->lba, ncq_cb, ncq_tfs); + break; + case WRITE_FPDMA_QUEUED: + DPRINTF(port, "NCQ writing %d sectors to LBA %"PRId64", tag %d\n", + ncq_tfs->sector_count, ncq_tfs->lba, ncq_tfs->tag); + + DPRINTF(port, "tag %d aio write %"PRId64"\n", + ncq_tfs->tag, ncq_tfs->lba); + + dma_acct_start(ide_state->blk, &ncq_tfs->acct, + &ncq_tfs->sglist, BLOCK_ACCT_WRITE); + ncq_tfs->aiocb = dma_blk_write(ide_state->blk, &ncq_tfs->sglist, + ncq_tfs->lba, ncq_cb, ncq_tfs); + break; + default: + DPRINTF(port, "error: unsupported NCQ command (0x%02x) received\n", + ncq_tfs->cmd); + qemu_sglist_destroy(&ncq_tfs->sglist); + ncq_err(ncq_tfs); + } +} + + +static void process_ncq_command(AHCIState *s, int port, uint8_t *cmd_fis, + uint8_t slot) +{ + AHCIDevice *ad = &s->dev[port]; + IDEState *ide_state = &ad->port.ifs[0]; + NCQFrame *ncq_fis = (NCQFrame*)cmd_fis; + uint8_t tag = ncq_fis->tag >> 3; + NCQTransferState *ncq_tfs = &ad->ncq_tfs[tag]; + size_t size; + + g_assert(is_ncq(ncq_fis->command)); + if (ncq_tfs->used) { + /* error - already in use */ + fprintf(stderr, "%s: tag %d already used\n", __FUNCTION__, tag); + return; + } + + ncq_tfs->used = 1; + ncq_tfs->drive = ad; + ncq_tfs->slot = slot; + ncq_tfs->cmdh = &((AHCICmdHdr *)ad->lst)[slot]; + ncq_tfs->cmd = ncq_fis->command; + ncq_tfs->lba = ((uint64_t)ncq_fis->lba5 << 40) | + ((uint64_t)ncq_fis->lba4 << 32) | + ((uint64_t)ncq_fis->lba3 << 24) | + ((uint64_t)ncq_fis->lba2 << 16) | + ((uint64_t)ncq_fis->lba1 << 8) | + (uint64_t)ncq_fis->lba0; + ncq_tfs->tag = tag; + + /* Sanity-check the NCQ packet */ + if (tag != slot) { + DPRINTF(port, "Warn: NCQ slot (%d) did not match the given tag (%d)\n", + slot, tag); + } + + if (ncq_fis->aux0 || ncq_fis->aux1 || ncq_fis->aux2 || ncq_fis->aux3) { + DPRINTF(port, "Warn: Attempt to use NCQ auxiliary fields.\n"); + } + if (ncq_fis->prio || ncq_fis->icc) { + DPRINTF(port, "Warn: Unsupported attempt to use PRIO/ICC fields\n"); + } + if (ncq_fis->fua & NCQ_FIS_FUA_MASK) { + DPRINTF(port, "Warn: Unsupported attempt to use Force Unit Access\n"); + } + if (ncq_fis->tag & NCQ_FIS_RARC_MASK) { + DPRINTF(port, "Warn: Unsupported attempt to use Rebuild Assist\n"); + } + + ncq_tfs->sector_count = ((ncq_fis->sector_count_high << 8) | + ncq_fis->sector_count_low); + if (!ncq_tfs->sector_count) { + ncq_tfs->sector_count = 0x10000; + } + size = ncq_tfs->sector_count * 512; + ahci_populate_sglist(ad, &ncq_tfs->sglist, ncq_tfs->cmdh, size, 0); + + if (ncq_tfs->sglist.size < size) { + error_report("ahci: PRDT length for NCQ command (0x%zx) " + "is smaller than the requested size (0x%zx)", + ncq_tfs->sglist.size, size); + qemu_sglist_destroy(&ncq_tfs->sglist); + ncq_err(ncq_tfs); + ahci_trigger_irq(ad->hba, ad, PORT_IRQ_OVERFLOW); + return; + } else if (ncq_tfs->sglist.size != size) { + DPRINTF(port, "Warn: PRDTL (0x%zx)" + " does not match requested size (0x%zx)", + ncq_tfs->sglist.size, size); + } + + DPRINTF(port, "NCQ transfer LBA from %"PRId64" to %"PRId64", " + "drive max %"PRId64"\n", + ncq_tfs->lba, ncq_tfs->lba + ncq_tfs->sector_count - 1, + ide_state->nb_sectors - 1); + + execute_ncq_command(ncq_tfs); +} + +static AHCICmdHdr *get_cmd_header(AHCIState *s, uint8_t port, uint8_t slot) +{ + if (port >= s->ports || slot >= AHCI_MAX_CMDS) { + return NULL; + } + + return s->dev[port].lst ? &((AHCICmdHdr *)s->dev[port].lst)[slot] : NULL; +} + +static void handle_reg_h2d_fis(AHCIState *s, int port, + uint8_t slot, uint8_t *cmd_fis) +{ + IDEState *ide_state = &s->dev[port].port.ifs[0]; + AHCICmdHdr *cmd = get_cmd_header(s, port, slot); + uint16_t opts = le16_to_cpu(cmd->opts); + + if (cmd_fis[1] & 0x0F) { + DPRINTF(port, "Port Multiplier not supported." + " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n", + cmd_fis[0], cmd_fis[1], cmd_fis[2]); + return; + } + + if (cmd_fis[1] & 0x70) { + DPRINTF(port, "Reserved flags set in H2D Register FIS." + " cmd_fis[0]=%02x cmd_fis[1]=%02x cmd_fis[2]=%02x\n", + cmd_fis[0], cmd_fis[1], cmd_fis[2]); + return; + } + + if (!(cmd_fis[1] & SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER)) { + switch (s->dev[port].port_state) { + case STATE_RUN: + if (cmd_fis[15] & ATA_SRST) { + s->dev[port].port_state = STATE_RESET; + } + break; + case STATE_RESET: + if (!(cmd_fis[15] & ATA_SRST)) { + ahci_reset_port(s, port); + } + break; + } + return; + } + + /* Check for NCQ command */ + if (is_ncq(cmd_fis[2])) { + process_ncq_command(s, port, cmd_fis, slot); + return; + } + + /* Decompose the FIS: + * AHCI does not interpret FIS packets, it only forwards them. + * SATA 1.0 describes how to decode LBA28 and CHS FIS packets. + * Later specifications, e.g, SATA 3.2, describe LBA48 FIS packets. + * + * ATA4 describes sector number for LBA28/CHS commands. + * ATA6 describes sector number for LBA48 commands. + * ATA8 deprecates CHS fully, describing only LBA28/48. + * + * We dutifully convert the FIS into IDE registers, and allow the + * core layer to interpret them as needed. */ + ide_state->feature = cmd_fis[3]; + ide_state->sector = cmd_fis[4]; /* LBA 7:0 */ + ide_state->lcyl = cmd_fis[5]; /* LBA 15:8 */ + ide_state->hcyl = cmd_fis[6]; /* LBA 23:16 */ + ide_state->select = cmd_fis[7]; /* LBA 27:24 (LBA28) */ + ide_state->hob_sector = cmd_fis[8]; /* LBA 31:24 */ + ide_state->hob_lcyl = cmd_fis[9]; /* LBA 39:32 */ + ide_state->hob_hcyl = cmd_fis[10]; /* LBA 47:40 */ + ide_state->hob_feature = cmd_fis[11]; + ide_state->nsector = (int64_t)((cmd_fis[13] << 8) | cmd_fis[12]); + /* 14, 16, 17, 18, 19: Reserved (SATA 1.0) */ + /* 15: Only valid when UPDATE_COMMAND not set. */ + + /* Copy the ACMD field (ATAPI packet, if any) from the AHCI command + * table to ide_state->io_buffer */ + if (opts & AHCI_CMD_ATAPI) { + memcpy(ide_state->io_buffer, &cmd_fis[AHCI_COMMAND_TABLE_ACMD], 0x10); + debug_print_fis(ide_state->io_buffer, 0x10); + s->dev[port].done_atapi_packet = false; + /* XXX send PIO setup FIS */ + } + + ide_state->error = 0; + + /* Reset transferred byte counter */ + cmd->status = 0; + + /* We're ready to process the command in FIS byte 2. */ + ide_exec_cmd(&s->dev[port].port, cmd_fis[2]); +} + +static int handle_cmd(AHCIState *s, int port, uint8_t slot) +{ + IDEState *ide_state; + uint64_t tbl_addr; + AHCICmdHdr *cmd; + uint8_t *cmd_fis; + dma_addr_t cmd_len; + + if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { + /* Engine currently busy, try again later */ + DPRINTF(port, "engine busy\n"); + return -1; + } + + if (!s->dev[port].lst) { + DPRINTF(port, "error: lst not given but cmd handled"); + return -1; + } + cmd = get_cmd_header(s, port, slot); + /* remember current slot handle for later */ + s->dev[port].cur_cmd = cmd; + + /* The device we are working for */ + ide_state = &s->dev[port].port.ifs[0]; + if (!ide_state->blk) { + DPRINTF(port, "error: guest accessed unused port"); + return -1; + } + + tbl_addr = le64_to_cpu(cmd->tbl_addr); + cmd_len = 0x80; + cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len, + DMA_DIRECTION_FROM_DEVICE); + if (!cmd_fis) { + DPRINTF(port, "error: guest passed us an invalid cmd fis\n"); + return -1; + } else if (cmd_len != 0x80) { + ahci_trigger_irq(s, &s->dev[port], PORT_IRQ_HBUS_ERR); + DPRINTF(port, "error: dma_memory_map failed: " + "(len(%02"PRIx64") != 0x80)\n", + cmd_len); + goto out; + } + debug_print_fis(cmd_fis, 0x80); + + switch (cmd_fis[0]) { + case SATA_FIS_TYPE_REGISTER_H2D: + handle_reg_h2d_fis(s, port, slot, cmd_fis); + break; + default: + DPRINTF(port, "unknown command cmd_fis[0]=%02x cmd_fis[1]=%02x " + "cmd_fis[2]=%02x\n", cmd_fis[0], cmd_fis[1], + cmd_fis[2]); + break; + } + +out: + dma_memory_unmap(s->as, cmd_fis, cmd_len, DMA_DIRECTION_FROM_DEVICE, + cmd_len); + + if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) { + /* async command, complete later */ + s->dev[port].busy_slot = slot; + return -1; + } + + /* done handling the command */ + return 0; +} + +/* DMA dev <-> ram */ +static void ahci_start_transfer(IDEDMA *dma) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + IDEState *s = &ad->port.ifs[0]; + uint32_t size = (uint32_t)(s->data_end - s->data_ptr); + /* write == ram -> device */ + uint16_t opts = le16_to_cpu(ad->cur_cmd->opts); + int is_write = opts & AHCI_CMD_WRITE; + int is_atapi = opts & AHCI_CMD_ATAPI; + int has_sglist = 0; + + if (is_atapi && !ad->done_atapi_packet) { + /* already prepopulated iobuffer */ + ad->done_atapi_packet = true; + size = 0; + goto out; + } + + if (ahci_dma_prepare_buf(dma, size)) { + has_sglist = 1; + } + + DPRINTF(ad->port_no, "%sing %d bytes on %s w/%s sglist\n", + is_write ? "writ" : "read", size, is_atapi ? "atapi" : "ata", + has_sglist ? "" : "o"); + + if (has_sglist && size) { + if (is_write) { + dma_buf_write(s->data_ptr, size, &s->sg); + } else { + dma_buf_read(s->data_ptr, size, &s->sg); + } + } + +out: + /* declare that we processed everything */ + s->data_ptr = s->data_end; + + /* Update number of transferred bytes, destroy sglist */ + dma_buf_commit(s, size); + + s->end_transfer_func(s); + + if (!(s->status & DRQ_STAT)) { + /* done with PIO send/receive */ + ahci_write_fis_pio(ad, le32_to_cpu(ad->cur_cmd->status)); + } +} + +static void ahci_start_dma(IDEDMA *dma, IDEState *s, + BlockCompletionFunc *dma_cb) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + DPRINTF(ad->port_no, "\n"); + s->io_buffer_offset = 0; + dma_cb(s, 0); +} + +static void ahci_restart_dma(IDEDMA *dma) +{ + /* Nothing to do, ahci_start_dma already resets s->io_buffer_offset. */ +} + +/** + * IDE/PIO restarts are handled by the core layer, but NCQ commands + * need an extra kick from the AHCI HBA. + */ +static void ahci_restart(IDEDMA *dma) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + int i; + + for (i = 0; i < AHCI_MAX_CMDS; i++) { + NCQTransferState *ncq_tfs = &ad->ncq_tfs[i]; + if (ncq_tfs->halt) { + execute_ncq_command(ncq_tfs); + } + } +} + +/** + * Called in DMA and PIO R/W chains to read the PRDT. + * Not shared with NCQ pathways. + */ +static int32_t ahci_dma_prepare_buf(IDEDMA *dma, int32_t limit) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + IDEState *s = &ad->port.ifs[0]; + + if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd, + limit, s->io_buffer_offset) == -1) { + DPRINTF(ad->port_no, "ahci_dma_prepare_buf failed.\n"); + return -1; + } + s->io_buffer_size = s->sg.size; + + DPRINTF(ad->port_no, "len=%#x\n", s->io_buffer_size); + return s->io_buffer_size; +} + +/** + * Updates the command header with a bytes-read value. + * Called via dma_buf_commit, for both DMA and PIO paths. + * sglist destruction is handled within dma_buf_commit. + */ +static void ahci_commit_buf(IDEDMA *dma, uint32_t tx_bytes) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + + tx_bytes += le32_to_cpu(ad->cur_cmd->status); + ad->cur_cmd->status = cpu_to_le32(tx_bytes); +} + +static int ahci_dma_rw_buf(IDEDMA *dma, int is_write) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + IDEState *s = &ad->port.ifs[0]; + uint8_t *p = s->io_buffer + s->io_buffer_index; + int l = s->io_buffer_size - s->io_buffer_index; + + if (ahci_populate_sglist(ad, &s->sg, ad->cur_cmd, l, s->io_buffer_offset)) { + return 0; + } + + if (is_write) { + dma_buf_read(p, l, &s->sg); + } else { + dma_buf_write(p, l, &s->sg); + } + + /* free sglist, update byte count */ + dma_buf_commit(s, l); + + s->io_buffer_index += l; + + DPRINTF(ad->port_no, "len=%#x\n", l); + + return 1; +} + +static void ahci_cmd_done(IDEDMA *dma) +{ + AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); + + DPRINTF(ad->port_no, "cmd done\n"); + + /* update d2h status */ + ahci_write_fis_d2h(ad); + + if (!ad->check_bh) { + /* maybe we still have something to process, check later */ + ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); + qemu_bh_schedule(ad->check_bh); + } +} + +static void ahci_irq_set(void *opaque, int n, int level) +{ +} + +static const IDEDMAOps ahci_dma_ops = { + .start_dma = ahci_start_dma, + .restart = ahci_restart, + .restart_dma = ahci_restart_dma, + .start_transfer = ahci_start_transfer, + .prepare_buf = ahci_dma_prepare_buf, + .commit_buf = ahci_commit_buf, + .rw_buf = ahci_dma_rw_buf, + .cmd_done = ahci_cmd_done, +}; + +void ahci_init(AHCIState *s, DeviceState *qdev) +{ + s->container = qdev; + /* XXX BAR size should be 1k, but that breaks, so bump it to 4k for now */ + memory_region_init_io(&s->mem, OBJECT(qdev), &ahci_mem_ops, s, + "ahci", AHCI_MEM_BAR_SIZE); + memory_region_init_io(&s->idp, OBJECT(qdev), &ahci_idp_ops, s, + "ahci-idp", 32); +} + +void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports) +{ + qemu_irq *irqs; + int i; + + s->as = as; + s->ports = ports; + s->dev = g_new0(AHCIDevice, ports); + ahci_reg_init(s); + irqs = qemu_allocate_irqs(ahci_irq_set, s, s->ports); + for (i = 0; i < s->ports; i++) { + AHCIDevice *ad = &s->dev[i]; + + ide_bus_new(&ad->port, sizeof(ad->port), qdev, i, 1); + ide_init2(&ad->port, irqs[i]); + + ad->hba = s; + ad->port_no = i; + ad->port.dma = &ad->dma; + ad->port.dma->ops = &ahci_dma_ops; + ide_register_restart_cb(&ad->port); + } +} + +void ahci_uninit(AHCIState *s) +{ + g_free(s->dev); +} + +void ahci_reset(AHCIState *s) +{ + AHCIPortRegs *pr; + int i; + + s->control_regs.irqstatus = 0; + /* AHCI Enable (AE) + * The implementation of this bit is dependent upon the value of the + * CAP.SAM bit. If CAP.SAM is '0', then GHC.AE shall be read-write and + * shall have a reset value of '0'. If CAP.SAM is '1', then AE shall be + * read-only and shall have a reset value of '1'. + * + * We set HOST_CAP_AHCI so we must enable AHCI at reset. + */ + s->control_regs.ghc = HOST_CTL_AHCI_EN; + + for (i = 0; i < s->ports; i++) { + pr = &s->dev[i].port_regs; + pr->irq_stat = 0; + pr->irq_mask = 0; + pr->scr_ctl = 0; + pr->cmd = PORT_CMD_SPIN_UP | PORT_CMD_POWER_ON; + ahci_reset_port(s, i); + } +} + +static const VMStateDescription vmstate_ncq_tfs = { + .name = "ncq state", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(sector_count, NCQTransferState), + VMSTATE_UINT64(lba, NCQTransferState), + VMSTATE_UINT8(tag, NCQTransferState), + VMSTATE_UINT8(cmd, NCQTransferState), + VMSTATE_UINT8(slot, NCQTransferState), + VMSTATE_BOOL(used, NCQTransferState), + VMSTATE_BOOL(halt, NCQTransferState), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_ahci_device = { + .name = "ahci port", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_IDE_BUS(port, AHCIDevice), + VMSTATE_IDE_DRIVE(port.ifs[0], AHCIDevice), + VMSTATE_UINT32(port_state, AHCIDevice), + VMSTATE_UINT32(finished, AHCIDevice), + VMSTATE_UINT32(port_regs.lst_addr, AHCIDevice), + VMSTATE_UINT32(port_regs.lst_addr_hi, AHCIDevice), + VMSTATE_UINT32(port_regs.fis_addr, AHCIDevice), + VMSTATE_UINT32(port_regs.fis_addr_hi, AHCIDevice), + VMSTATE_UINT32(port_regs.irq_stat, AHCIDevice), + VMSTATE_UINT32(port_regs.irq_mask, AHCIDevice), + VMSTATE_UINT32(port_regs.cmd, AHCIDevice), + VMSTATE_UINT32(port_regs.tfdata, AHCIDevice), + VMSTATE_UINT32(port_regs.sig, AHCIDevice), + VMSTATE_UINT32(port_regs.scr_stat, AHCIDevice), + VMSTATE_UINT32(port_regs.scr_ctl, AHCIDevice), + VMSTATE_UINT32(port_regs.scr_err, AHCIDevice), + VMSTATE_UINT32(port_regs.scr_act, AHCIDevice), + VMSTATE_UINT32(port_regs.cmd_issue, AHCIDevice), + VMSTATE_BOOL(done_atapi_packet, AHCIDevice), + VMSTATE_INT32(busy_slot, AHCIDevice), + VMSTATE_BOOL(init_d2h_sent, AHCIDevice), + VMSTATE_STRUCT_ARRAY(ncq_tfs, AHCIDevice, AHCI_MAX_CMDS, + 1, vmstate_ncq_tfs, NCQTransferState), + VMSTATE_END_OF_LIST() + }, +}; + +static int ahci_state_post_load(void *opaque, int version_id) +{ + int i, j; + struct AHCIDevice *ad; + NCQTransferState *ncq_tfs; + AHCIState *s = opaque; + + for (i = 0; i < s->ports; i++) { + ad = &s->dev[i]; + + /* Only remap the CLB address if appropriate, disallowing a state + * transition from 'on' to 'off' it should be consistent here. */ + if (ahci_cond_start_engines(ad, false) != 0) { + return -1; + } + + for (j = 0; j < AHCI_MAX_CMDS; j++) { + ncq_tfs = &ad->ncq_tfs[j]; + ncq_tfs->drive = ad; + + if (ncq_tfs->used != ncq_tfs->halt) { + return -1; + } + if (!ncq_tfs->halt) { + continue; + } + if (!is_ncq(ncq_tfs->cmd)) { + return -1; + } + if (ncq_tfs->slot != ncq_tfs->tag) { + return -1; + } + /* If ncq_tfs->halt is justly set, the engine should be engaged, + * and the command list buffer should be mapped. */ + ncq_tfs->cmdh = get_cmd_header(s, i, ncq_tfs->slot); + if (!ncq_tfs->cmdh) { + return -1; + } + ahci_populate_sglist(ncq_tfs->drive, &ncq_tfs->sglist, + ncq_tfs->cmdh, ncq_tfs->sector_count * 512, + 0); + if (ncq_tfs->sector_count != ncq_tfs->sglist.size >> 9) { + return -1; + } + } + + + /* + * If an error is present, ad->busy_slot will be valid and not -1. + * In this case, an operation is waiting to resume and will re-check + * for additional AHCI commands to execute upon completion. + * + * In the case where no error was present, busy_slot will be -1, + * and we should check to see if there are additional commands waiting. + */ + if (ad->busy_slot == -1) { + check_cmd(s, i); + } else { + /* We are in the middle of a command, and may need to access + * the command header in guest memory again. */ + if (ad->busy_slot < 0 || ad->busy_slot >= AHCI_MAX_CMDS) { + return -1; + } + ad->cur_cmd = get_cmd_header(s, i, ad->busy_slot); + } + } + + return 0; +} + +const VMStateDescription vmstate_ahci = { + .name = "ahci", + .version_id = 1, + .post_load = ahci_state_post_load, + .fields = (VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev, AHCIState, ports, + vmstate_ahci_device, AHCIDevice), + VMSTATE_UINT32(control_regs.cap, AHCIState), + VMSTATE_UINT32(control_regs.ghc, AHCIState), + VMSTATE_UINT32(control_regs.irqstatus, AHCIState), + VMSTATE_UINT32(control_regs.impl, AHCIState), + VMSTATE_UINT32(control_regs.version, AHCIState), + VMSTATE_UINT32(idp_index, AHCIState), + VMSTATE_INT32_EQUAL(ports, AHCIState), + VMSTATE_END_OF_LIST() + }, +}; + +static const VMStateDescription vmstate_sysbus_ahci = { + .name = "sysbus-ahci", + .fields = (VMStateField[]) { + VMSTATE_AHCI(ahci, SysbusAHCIState), + VMSTATE_END_OF_LIST() + }, +}; + +static void sysbus_ahci_reset(DeviceState *dev) +{ + SysbusAHCIState *s = SYSBUS_AHCI(dev); + + ahci_reset(&s->ahci); +} + +static void sysbus_ahci_init(Object *obj) +{ + SysbusAHCIState *s = SYSBUS_AHCI(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + ahci_init(&s->ahci, DEVICE(obj)); + + sysbus_init_mmio(sbd, &s->ahci.mem); + sysbus_init_irq(sbd, &s->ahci.irq); +} + +static void sysbus_ahci_realize(DeviceState *dev, Error **errp) +{ + SysbusAHCIState *s = SYSBUS_AHCI(dev); + + ahci_realize(&s->ahci, dev, &address_space_memory, s->num_ports); +} + +static Property sysbus_ahci_properties[] = { + DEFINE_PROP_UINT32("num-ports", SysbusAHCIState, num_ports, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sysbus_ahci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = sysbus_ahci_realize; + dc->vmsd = &vmstate_sysbus_ahci; + dc->props = sysbus_ahci_properties; + dc->reset = sysbus_ahci_reset; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo sysbus_ahci_info = { + .name = TYPE_SYSBUS_AHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SysbusAHCIState), + .instance_init = sysbus_ahci_init, + .class_init = sysbus_ahci_class_init, +}; + +#define ALLWINNER_AHCI_BISTAFR ((0xa0 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_BISTCR ((0xa4 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_BISTFCTR ((0xa8 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_BISTSR ((0xac - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_BISTDECR ((0xb0 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_DIAGNR0 ((0xb4 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_DIAGNR1 ((0xb8 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_OOBR ((0xbc - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_PHYCS0R ((0xc0 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_PHYCS1R ((0xc4 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_PHYCS2R ((0xc8 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_TIMER1MS ((0xe0 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_GPARAM1R ((0xe8 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_GPARAM2R ((0xec - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_PPARAMR ((0xf0 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_TESTR ((0xf4 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_VERSIONR ((0xf8 - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_IDR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4) +#define ALLWINNER_AHCI_RWCR ((0xfc - ALLWINNER_AHCI_MMIO_OFF) / 4) + +static uint64_t allwinner_ahci_mem_read(void *opaque, hwaddr addr, + unsigned size) +{ + AllwinnerAHCIState *a = opaque; + uint64_t val = a->regs[addr/4]; + + switch (addr / 4) { + case ALLWINNER_AHCI_PHYCS0R: + val |= 0x2 << 28; + break; + case ALLWINNER_AHCI_PHYCS2R: + val &= ~(0x1 << 24); + break; + } + DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n", + addr, val, size); + return val; +} + +static void allwinner_ahci_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + AllwinnerAHCIState *a = opaque; + + DPRINTF(-1, "addr=0x%" HWADDR_PRIx " val=0x%" PRIx64 ", size=%d\n", + addr, val, size); + a->regs[addr/4] = val; +} + +static const MemoryRegionOps allwinner_ahci_mem_ops = { + .read = allwinner_ahci_mem_read, + .write = allwinner_ahci_mem_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void allwinner_ahci_init(Object *obj) +{ + SysbusAHCIState *s = SYSBUS_AHCI(obj); + AllwinnerAHCIState *a = ALLWINNER_AHCI(obj); + + memory_region_init_io(&a->mmio, OBJECT(obj), &allwinner_ahci_mem_ops, a, + "allwinner-ahci", ALLWINNER_AHCI_MMIO_SIZE); + memory_region_add_subregion(&s->ahci.mem, ALLWINNER_AHCI_MMIO_OFF, + &a->mmio); +} + +static const VMStateDescription vmstate_allwinner_ahci = { + .name = "allwinner-ahci", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AllwinnerAHCIState, + ALLWINNER_AHCI_MMIO_SIZE/4), + VMSTATE_END_OF_LIST() + } +}; + +static void allwinner_ahci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->vmsd = &vmstate_allwinner_ahci; +} + +static const TypeInfo allwinner_ahci_info = { + .name = TYPE_ALLWINNER_AHCI, + .parent = TYPE_SYSBUS_AHCI, + .instance_size = sizeof(AllwinnerAHCIState), + .instance_init = allwinner_ahci_init, + .class_init = allwinner_ahci_class_init, +}; + +static void sysbus_ahci_register_types(void) +{ + type_register_static(&sysbus_ahci_info); + type_register_static(&allwinner_ahci_info); +} + +type_init(sysbus_ahci_register_types) + +void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd) +{ + AHCIPCIState *d = ICH_AHCI(dev); + AHCIState *ahci = &d->ahci; + int i; + + for (i = 0; i < ahci->ports; i++) { + if (hd[i] == NULL) { + continue; + } + ide_create_drive(&ahci->dev[i].port, 0, hd[i]); + } + +} diff --git a/src/hw/ide/ahci.h b/src/hw/ide/ahci.h new file mode 100644 index 0000000..bc777ed --- /dev/null +++ b/src/hw/ide/ahci.h @@ -0,0 +1,405 @@ +/* + * QEMU AHCI Emulation + * + * Copyright (c) 2010 qiaochong@loongson.cn + * Copyright (c) 2010 Roland Elek <elek.roland@gmail.com> + * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de> + * Copyright (c) 2010 Alexander Graf <agraf@suse.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + * + */ + +#ifndef HW_IDE_AHCI_H +#define HW_IDE_AHCI_H + +#include <hw/sysbus.h> + +#define AHCI_MEM_BAR_SIZE 0x1000 +#define AHCI_MAX_PORTS 32 +#define AHCI_MAX_SG 168 /* hardware max is 64K */ +#define AHCI_DMA_BOUNDARY 0xffffffff +#define AHCI_USE_CLUSTERING 0 +#define AHCI_MAX_CMDS 32 +#define AHCI_CMD_SZ 32 +#define AHCI_CMD_SLOT_SZ (AHCI_MAX_CMDS * AHCI_CMD_SZ) +#define AHCI_RX_FIS_SZ 256 +#define AHCI_CMD_TBL_CDB 0x40 +#define AHCI_CMD_TBL_HDR_SZ 0x80 +#define AHCI_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16)) +#define AHCI_CMD_TBL_AR_SZ (AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS) +#define AHCI_PORT_PRIV_DMA_SZ (AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ + \ + AHCI_RX_FIS_SZ) + +#define AHCI_IRQ_ON_SG (1U << 31) +#define AHCI_CMD_ATAPI (1 << 5) +#define AHCI_CMD_WRITE (1 << 6) +#define AHCI_CMD_PREFETCH (1 << 7) +#define AHCI_CMD_RESET (1 << 8) +#define AHCI_CMD_CLR_BUSY (1 << 10) + +#define RX_FIS_D2H_REG 0x40 /* offset of D2H Register FIS data */ +#define RX_FIS_SDB 0x58 /* offset of SDB FIS data */ +#define RX_FIS_UNK 0x60 /* offset of Unknown FIS data */ + +/* global controller registers */ +#define HOST_CAP 0x00 /* host capabilities */ +#define HOST_CTL 0x04 /* global host control */ +#define HOST_IRQ_STAT 0x08 /* interrupt status */ +#define HOST_PORTS_IMPL 0x0c /* bitmap of implemented ports */ +#define HOST_VERSION 0x10 /* AHCI spec. version compliancy */ + +/* HOST_CTL bits */ +#define HOST_CTL_RESET (1 << 0) /* reset controller; self-clear */ +#define HOST_CTL_IRQ_EN (1 << 1) /* global IRQ enable */ +#define HOST_CTL_AHCI_EN (1U << 31) /* AHCI enabled */ + +/* HOST_CAP bits */ +#define HOST_CAP_SSC (1 << 14) /* Slumber capable */ +#define HOST_CAP_AHCI (1 << 18) /* AHCI only */ +#define HOST_CAP_CLO (1 << 24) /* Command List Override support */ +#define HOST_CAP_SSS (1 << 27) /* Staggered Spin-up */ +#define HOST_CAP_NCQ (1 << 30) /* Native Command Queueing */ +#define HOST_CAP_64 (1U << 31) /* PCI DAC (64-bit DMA) support */ + +/* registers for each SATA port */ +#define PORT_LST_ADDR 0x00 /* command list DMA addr */ +#define PORT_LST_ADDR_HI 0x04 /* command list DMA addr hi */ +#define PORT_FIS_ADDR 0x08 /* FIS rx buf addr */ +#define PORT_FIS_ADDR_HI 0x0c /* FIS rx buf addr hi */ +#define PORT_IRQ_STAT 0x10 /* interrupt status */ +#define PORT_IRQ_MASK 0x14 /* interrupt enable/disable mask */ +#define PORT_CMD 0x18 /* port command */ +#define PORT_TFDATA 0x20 /* taskfile data */ +#define PORT_SIG 0x24 /* device TF signature */ +#define PORT_SCR_STAT 0x28 /* SATA phy register: SStatus */ +#define PORT_SCR_CTL 0x2c /* SATA phy register: SControl */ +#define PORT_SCR_ERR 0x30 /* SATA phy register: SError */ +#define PORT_SCR_ACT 0x34 /* SATA phy register: SActive */ +#define PORT_CMD_ISSUE 0x38 /* command issue */ +#define PORT_RESERVED 0x3c /* reserved */ + +/* PORT_IRQ_{STAT,MASK} bits */ +#define PORT_IRQ_COLD_PRES (1U << 31) /* cold presence detect */ +#define PORT_IRQ_TF_ERR (1 << 30) /* task file error */ +#define PORT_IRQ_HBUS_ERR (1 << 29) /* host bus fatal error */ +#define PORT_IRQ_HBUS_DATA_ERR (1 << 28) /* host bus data error */ +#define PORT_IRQ_IF_ERR (1 << 27) /* interface fatal error */ +#define PORT_IRQ_IF_NONFATAL (1 << 26) /* interface non-fatal error */ +#define PORT_IRQ_OVERFLOW (1 << 24) /* xfer exhausted available S/G */ +#define PORT_IRQ_BAD_PMP (1 << 23) /* incorrect port multiplier */ + +#define PORT_IRQ_PHYRDY (1 << 22) /* PhyRdy changed */ +#define PORT_IRQ_DEV_ILCK (1 << 7) /* device interlock */ +#define PORT_IRQ_CONNECT (1 << 6) /* port connect change status */ +#define PORT_IRQ_SG_DONE (1 << 5) /* descriptor processed */ +#define PORT_IRQ_UNK_FIS (1 << 4) /* unknown FIS rx'd */ +#define PORT_IRQ_SDB_FIS (1 << 3) /* Set Device Bits FIS rx'd */ +#define PORT_IRQ_DMAS_FIS (1 << 2) /* DMA Setup FIS rx'd */ +#define PORT_IRQ_PIOS_FIS (1 << 1) /* PIO Setup FIS rx'd */ +#define PORT_IRQ_D2H_REG_FIS (1 << 0) /* D2H Register FIS rx'd */ + +#define PORT_IRQ_FREEZE (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | \ + PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY | \ + PORT_IRQ_UNK_FIS) +#define PORT_IRQ_ERROR (PORT_IRQ_FREEZE | PORT_IRQ_TF_ERR | \ + PORT_IRQ_HBUS_DATA_ERR) +#define DEF_PORT_IRQ (PORT_IRQ_ERROR | PORT_IRQ_SG_DONE | \ + PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS | \ + PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS) + +/* PORT_CMD bits */ +#define PORT_CMD_ATAPI (1 << 24) /* Device is ATAPI */ +#define PORT_CMD_LIST_ON (1 << 15) /* cmd list DMA engine running */ +#define PORT_CMD_FIS_ON (1 << 14) /* FIS DMA engine running */ +#define PORT_CMD_FIS_RX (1 << 4) /* Enable FIS receive DMA engine */ +#define PORT_CMD_CLO (1 << 3) /* Command list override */ +#define PORT_CMD_POWER_ON (1 << 2) /* Power up device */ +#define PORT_CMD_SPIN_UP (1 << 1) /* Spin up device */ +#define PORT_CMD_START (1 << 0) /* Enable port DMA engine */ + +#define PORT_CMD_ICC_MASK (0xfU << 28) /* i/f ICC state mask */ +#define PORT_CMD_ICC_ACTIVE (0x1 << 28) /* Put i/f in active state */ +#define PORT_CMD_ICC_PARTIAL (0x2 << 28) /* Put i/f in partial state */ +#define PORT_CMD_ICC_SLUMBER (0x6 << 28) /* Put i/f in slumber state */ + +#define PORT_CMD_RO_MASK 0x007dffe0 /* Which CMD bits are read only? */ + +/* ap->flags bits */ +#define AHCI_FLAG_NO_NCQ (1 << 24) +#define AHCI_FLAG_IGN_IRQ_IF_ERR (1 << 25) /* ignore IRQ_IF_ERR */ +#define AHCI_FLAG_HONOR_PI (1 << 26) /* honor PORTS_IMPL */ +#define AHCI_FLAG_IGN_SERR_INTERNAL (1 << 27) /* ignore SERR_INTERNAL */ +#define AHCI_FLAG_32BIT_ONLY (1 << 28) /* force 32bit */ + +#define ATA_SRST (1 << 2) /* software reset */ + +#define STATE_RUN 0 +#define STATE_RESET 1 + +#define SATA_SCR_SSTATUS_DET_NODEV 0x0 +#define SATA_SCR_SSTATUS_DET_DEV_PRESENT_PHY_UP 0x3 + +#define SATA_SCR_SSTATUS_SPD_NODEV 0x00 +#define SATA_SCR_SSTATUS_SPD_GEN1 0x10 + +#define SATA_SCR_SSTATUS_IPM_NODEV 0x000 +#define SATA_SCR_SSTATUS_IPM_ACTIVE 0X100 + +#define AHCI_SCR_SCTL_DET 0xf + +#define SATA_FIS_TYPE_REGISTER_H2D 0x27 +#define SATA_FIS_REG_H2D_UPDATE_COMMAND_REGISTER 0x80 +#define SATA_FIS_TYPE_REGISTER_D2H 0x34 +#define SATA_FIS_TYPE_PIO_SETUP 0x5f +#define SATA_FIS_TYPE_SDB 0xA1 + +#define AHCI_CMD_HDR_CMD_FIS_LEN 0x1f +#define AHCI_CMD_HDR_PRDT_LEN 16 + +#define SATA_SIGNATURE_CDROM 0xeb140101 +#define SATA_SIGNATURE_DISK 0x00000101 + +#define AHCI_GENERIC_HOST_CONTROL_REGS_MAX_ADDR 0x20 + /* Shouldn't this be 0x2c? */ + +#define AHCI_PORT_REGS_START_ADDR 0x100 +#define AHCI_PORT_ADDR_OFFSET_MASK 0x7f +#define AHCI_PORT_ADDR_OFFSET_LEN 0x80 + +#define AHCI_NUM_COMMAND_SLOTS 31 +#define AHCI_SUPPORTED_SPEED 20 +#define AHCI_SUPPORTED_SPEED_GEN1 1 +#define AHCI_VERSION_1_0 0x10000 + +#define AHCI_PROGMODE_MAJOR_REV_1 1 + +#define AHCI_COMMAND_TABLE_ACMD 0x40 + +#define AHCI_PRDT_SIZE_MASK 0x3fffff + +#define IDE_FEATURE_DMA 1 + +#define READ_FPDMA_QUEUED 0x60 +#define WRITE_FPDMA_QUEUED 0x61 +#define NCQ_NON_DATA 0x63 +#define RECEIVE_FPDMA_QUEUED 0x65 +#define SEND_FPDMA_QUEUED 0x64 + +#define NCQ_FIS_FUA_MASK 0x80 +#define NCQ_FIS_RARC_MASK 0x01 + +#define RES_FIS_DSFIS 0x00 +#define RES_FIS_PSFIS 0x20 +#define RES_FIS_RFIS 0x40 +#define RES_FIS_SDBFIS 0x58 +#define RES_FIS_UFIS 0x60 + +#define SATA_CAP_SIZE 0x8 +#define SATA_CAP_REV 0x2 +#define SATA_CAP_BAR 0x4 + +typedef struct AHCIControlRegs { + uint32_t cap; + uint32_t ghc; + uint32_t irqstatus; + uint32_t impl; + uint32_t version; +} AHCIControlRegs; + +typedef struct AHCIPortRegs { + uint32_t lst_addr; + uint32_t lst_addr_hi; + uint32_t fis_addr; + uint32_t fis_addr_hi; + uint32_t irq_stat; + uint32_t irq_mask; + uint32_t cmd; + uint32_t unused0; + uint32_t tfdata; + uint32_t sig; + uint32_t scr_stat; + uint32_t scr_ctl; + uint32_t scr_err; + uint32_t scr_act; + uint32_t cmd_issue; + uint32_t reserved; +} AHCIPortRegs; + +typedef struct AHCICmdHdr { + uint16_t opts; + uint16_t prdtl; + uint32_t status; + uint64_t tbl_addr; + uint32_t reserved[4]; +} QEMU_PACKED AHCICmdHdr; + +typedef struct AHCI_SG { + uint64_t addr; + uint32_t reserved; + uint32_t flags_size; +} QEMU_PACKED AHCI_SG; + +typedef struct AHCIDevice AHCIDevice; + +typedef struct NCQTransferState { + AHCIDevice *drive; + BlockAIOCB *aiocb; + AHCICmdHdr *cmdh; + QEMUSGList sglist; + BlockAcctCookie acct; + uint32_t sector_count; + uint64_t lba; + uint8_t tag; + uint8_t cmd; + uint8_t slot; + bool used; + bool halt; +} NCQTransferState; + +struct AHCIDevice { + IDEDMA dma; + IDEBus port; + int port_no; + uint32_t port_state; + uint32_t finished; + AHCIPortRegs port_regs; + struct AHCIState *hba; + QEMUBH *check_bh; + uint8_t *lst; + uint8_t *res_fis; + bool done_atapi_packet; + int32_t busy_slot; + bool init_d2h_sent; + AHCICmdHdr *cur_cmd; + NCQTransferState ncq_tfs[AHCI_MAX_CMDS]; +}; + +typedef struct AHCIState { + DeviceState *container; + + AHCIDevice *dev; + AHCIControlRegs control_regs; + MemoryRegion mem; + MemoryRegion idp; /* Index-Data Pair I/O port space */ + unsigned idp_offset; /* Offset of index in I/O port space */ + uint32_t idp_index; /* Current IDP index */ + int32_t ports; + qemu_irq irq; + AddressSpace *as; +} AHCIState; + +typedef struct AHCIPCIState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + AHCIState ahci; +} AHCIPCIState; + +#define TYPE_ICH9_AHCI "ich9-ahci" + +#define ICH_AHCI(obj) \ + OBJECT_CHECK(AHCIPCIState, (obj), TYPE_ICH9_AHCI) + +extern const VMStateDescription vmstate_ahci; + +#define VMSTATE_AHCI(_field, _state) { \ + .name = (stringify(_field)), \ + .size = sizeof(AHCIState), \ + .vmsd = &vmstate_ahci, \ + .flags = VMS_STRUCT, \ + .offset = vmstate_offset_value(_state, _field, AHCIState), \ +} + +/** + * NCQFrame is the same as a Register H2D FIS (described in SATA 3.2), + * but some fields have been re-mapped and re-purposed, as seen in + * SATA 3.2 section 13.6.4.1 ("READ FPDMA QUEUED") + * + * cmd_fis[3], feature 7:0, becomes sector count 7:0. + * cmd_fis[7], device 7:0, uses bit 7 as the Force Unit Access bit. + * cmd_fis[11], feature 15:8, becomes sector count 15:8. + * cmd_fis[12], count 7:0, becomes the NCQ TAG (7:3) and RARC bit (0) + * cmd_fis[13], count 15:8, becomes the priority value (7:6) + * bytes 16-19 become an le32 "auxiliary" field. + */ +typedef struct NCQFrame { + uint8_t fis_type; + uint8_t c; + uint8_t command; + uint8_t sector_count_low; /* (feature 7:0) */ + uint8_t lba0; + uint8_t lba1; + uint8_t lba2; + uint8_t fua; /* (device 7:0) */ + uint8_t lba3; + uint8_t lba4; + uint8_t lba5; + uint8_t sector_count_high; /* (feature 15:8) */ + uint8_t tag; /* (count 0:7) */ + uint8_t prio; /* (count 15:8) */ + uint8_t icc; + uint8_t control; + uint8_t aux0; + uint8_t aux1; + uint8_t aux2; + uint8_t aux3; +} QEMU_PACKED NCQFrame; + +typedef struct SDBFIS { + uint8_t type; + uint8_t flags; + uint8_t status; + uint8_t error; + uint32_t payload; +} QEMU_PACKED SDBFIS; + +void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports); +void ahci_init(AHCIState *s, DeviceState *qdev); +void ahci_uninit(AHCIState *s); + +void ahci_reset(AHCIState *s); + +void ahci_ide_create_devs(PCIDevice *dev, DriveInfo **hd); + +#define TYPE_SYSBUS_AHCI "sysbus-ahci" +#define SYSBUS_AHCI(obj) OBJECT_CHECK(SysbusAHCIState, (obj), TYPE_SYSBUS_AHCI) + +typedef struct SysbusAHCIState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + AHCIState ahci; + uint32_t num_ports; +} SysbusAHCIState; + +#define TYPE_ALLWINNER_AHCI "allwinner-ahci" +#define ALLWINNER_AHCI(obj) OBJECT_CHECK(AllwinnerAHCIState, (obj), \ + TYPE_ALLWINNER_AHCI) + +#define ALLWINNER_AHCI_MMIO_OFF 0x80 +#define ALLWINNER_AHCI_MMIO_SIZE 0x80 + +struct AllwinnerAHCIState { + /*< private >*/ + SysbusAHCIState parent_obj; + /*< public >*/ + + MemoryRegion mmio; + uint32_t regs[ALLWINNER_AHCI_MMIO_SIZE/4]; +}; + +#endif /* HW_IDE_AHCI_H */ diff --git a/src/hw/ide/atapi.c b/src/hw/ide/atapi.c new file mode 100644 index 0000000..65f8dd4 --- /dev/null +++ b/src/hw/ide/atapi.c @@ -0,0 +1,1369 @@ +/* + * QEMU ATAPI Emulation + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "hw/ide/internal.h" +#include "hw/scsi/scsi.h" +#include "sysemu/block-backend.h" + +static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret); + +static void padstr8(uint8_t *buf, int buf_size, const char *src) +{ + int i; + for(i = 0; i < buf_size; i++) { + if (*src) + buf[i] = *src++; + else + buf[i] = ' '; + } +} + +static inline void cpu_to_ube16(uint8_t *buf, int val) +{ + buf[0] = val >> 8; + buf[1] = val & 0xff; +} + +static inline void cpu_to_ube32(uint8_t *buf, unsigned int val) +{ + buf[0] = val >> 24; + buf[1] = val >> 16; + buf[2] = val >> 8; + buf[3] = val & 0xff; +} + +static inline int ube16_to_cpu(const uint8_t *buf) +{ + return (buf[0] << 8) | buf[1]; +} + +static inline int ube32_to_cpu(const uint8_t *buf) +{ + return (buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]; +} + +static void lba_to_msf(uint8_t *buf, int lba) +{ + lba += 150; + buf[0] = (lba / 75) / 60; + buf[1] = (lba / 75) % 60; + buf[2] = lba % 75; +} + +static inline int media_present(IDEState *s) +{ + return !s->tray_open && s->nb_sectors > 0; +} + +/* XXX: DVDs that could fit on a CD will be reported as a CD */ +static inline int media_is_dvd(IDEState *s) +{ + return (media_present(s) && s->nb_sectors > CD_MAX_SECTORS); +} + +static inline int media_is_cd(IDEState *s) +{ + return (media_present(s) && s->nb_sectors <= CD_MAX_SECTORS); +} + +static void cd_data_to_raw(uint8_t *buf, int lba) +{ + /* sync bytes */ + buf[0] = 0x00; + memset(buf + 1, 0xff, 10); + buf[11] = 0x00; + buf += 12; + /* MSF */ + lba_to_msf(buf, lba); + buf[3] = 0x01; /* mode 1 data */ + buf += 4; + /* data */ + buf += 2048; + /* XXX: ECC not computed */ + memset(buf, 0, 288); +} + +static int +cd_read_sector_sync(IDEState *s) +{ + int ret; + block_acct_start(blk_get_stats(s->blk), &s->acct, + 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); + +#ifdef DEBUG_IDE_ATAPI + printf("cd_read_sector_sync: lba=%d\n", s->lba); +#endif + + switch (s->cd_sector_size) { + case 2048: + ret = blk_read(s->blk, (int64_t)s->lba << 2, + s->io_buffer, 4); + break; + case 2352: + ret = blk_read(s->blk, (int64_t)s->lba << 2, + s->io_buffer + 16, 4); + if (ret >= 0) { + cd_data_to_raw(s->io_buffer, s->lba); + } + break; + default: + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ); + return -EIO; + } + + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + } else { + block_acct_done(blk_get_stats(s->blk), &s->acct); + s->lba++; + s->io_buffer_index = 0; + } + + return ret; +} + +static void cd_read_sector_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + +#ifdef DEBUG_IDE_ATAPI + printf("cd_read_sector_cb: lba=%d ret=%d\n", s->lba, ret); +#endif + + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + ide_atapi_io_error(s, ret); + return; + } + + block_acct_done(blk_get_stats(s->blk), &s->acct); + + if (s->cd_sector_size == 2352) { + cd_data_to_raw(s->io_buffer, s->lba); + } + + s->lba++; + s->io_buffer_index = 0; + s->status &= ~BUSY_STAT; + + ide_atapi_cmd_reply_end(s); +} + +static int cd_read_sector(IDEState *s) +{ + if (s->cd_sector_size != 2048 && s->cd_sector_size != 2352) { + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ); + return -EINVAL; + } + + s->iov.iov_base = (s->cd_sector_size == 2352) ? + s->io_buffer + 16 : s->io_buffer; + + s->iov.iov_len = 4 * BDRV_SECTOR_SIZE; + qemu_iovec_init_external(&s->qiov, &s->iov, 1); + +#ifdef DEBUG_IDE_ATAPI + printf("cd_read_sector: lba=%d\n", s->lba); +#endif + + block_acct_start(blk_get_stats(s->blk), &s->acct, + 4 * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); + + ide_buffered_readv(s, (int64_t)s->lba << 2, &s->qiov, 4, + cd_read_sector_cb, s); + + s->status |= BUSY_STAT; + return 0; +} + +void ide_atapi_cmd_ok(IDEState *s) +{ + s->error = 0; + s->status = READY_STAT | SEEK_STAT; + s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; + ide_transfer_stop(s); + ide_set_irq(s->bus); +} + +void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc) +{ +#ifdef DEBUG_IDE_ATAPI + printf("atapi_cmd_error: sense=0x%x asc=0x%x\n", sense_key, asc); +#endif + s->error = sense_key << 4; + s->status = READY_STAT | ERR_STAT; + s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; + s->sense_key = sense_key; + s->asc = asc; + ide_transfer_stop(s); + ide_set_irq(s->bus); +} + +void ide_atapi_io_error(IDEState *s, int ret) +{ + /* XXX: handle more errors */ + if (ret == -ENOMEDIUM) { + ide_atapi_cmd_error(s, NOT_READY, + ASC_MEDIUM_NOT_PRESENT); + } else { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_LOGICAL_BLOCK_OOR); + } +} + +static uint16_t atapi_byte_count_limit(IDEState *s) +{ + uint16_t bcl; + + bcl = s->lcyl | (s->hcyl << 8); + if (bcl == 0xffff) { + return 0xfffe; + } + return bcl; +} + +/* The whole ATAPI transfer logic is handled in this function */ +void ide_atapi_cmd_reply_end(IDEState *s) +{ + int byte_count_limit, size, ret; +#ifdef DEBUG_IDE_ATAPI + printf("reply: tx_size=%d elem_tx_size=%d index=%d\n", + s->packet_transfer_size, + s->elementary_transfer_size, + s->io_buffer_index); +#endif + if (s->packet_transfer_size <= 0) { + /* end of transfer */ + ide_atapi_cmd_ok(s); + ide_set_irq(s->bus); +#ifdef DEBUG_IDE_ATAPI + printf("end of transfer, status=0x%x\n", s->status); +#endif + } else { + /* see if a new sector must be read */ + if (s->lba != -1 && s->io_buffer_index >= s->cd_sector_size) { + if (!s->elementary_transfer_size) { + ret = cd_read_sector(s); + if (ret < 0) { + ide_atapi_io_error(s, ret); + } + return; + } else { + /* rebuffering within an elementary transfer is + * only possible with a sync request because we + * end up with a race condition otherwise */ + ret = cd_read_sector_sync(s); + if (ret < 0) { + ide_atapi_io_error(s, ret); + return; + } + } + } + if (s->elementary_transfer_size > 0) { + /* there are some data left to transmit in this elementary + transfer */ + size = s->cd_sector_size - s->io_buffer_index; + if (size > s->elementary_transfer_size) + size = s->elementary_transfer_size; + s->packet_transfer_size -= size; + s->elementary_transfer_size -= size; + s->io_buffer_index += size; + ide_transfer_start(s, s->io_buffer + s->io_buffer_index - size, + size, ide_atapi_cmd_reply_end); + } else { + /* a new transfer is needed */ + s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO; + byte_count_limit = atapi_byte_count_limit(s); +#ifdef DEBUG_IDE_ATAPI + printf("byte_count_limit=%d\n", byte_count_limit); +#endif + size = s->packet_transfer_size; + if (size > byte_count_limit) { + /* byte count limit must be even if this case */ + if (byte_count_limit & 1) + byte_count_limit--; + size = byte_count_limit; + } + s->lcyl = size; + s->hcyl = size >> 8; + s->elementary_transfer_size = size; + /* we cannot transmit more than one sector at a time */ + if (s->lba != -1) { + if (size > (s->cd_sector_size - s->io_buffer_index)) + size = (s->cd_sector_size - s->io_buffer_index); + } + s->packet_transfer_size -= size; + s->elementary_transfer_size -= size; + s->io_buffer_index += size; + ide_transfer_start(s, s->io_buffer + s->io_buffer_index - size, + size, ide_atapi_cmd_reply_end); + ide_set_irq(s->bus); +#ifdef DEBUG_IDE_ATAPI + printf("status=0x%x\n", s->status); +#endif + } + } +} + +/* send a reply of 'size' bytes in s->io_buffer to an ATAPI command */ +static void ide_atapi_cmd_reply(IDEState *s, int size, int max_size) +{ + if (size > max_size) + size = max_size; + s->lba = -1; /* no sector read */ + s->packet_transfer_size = size; + s->io_buffer_size = size; /* dma: send the reply data as one chunk */ + s->elementary_transfer_size = 0; + + if (s->atapi_dma) { + block_acct_start(blk_get_stats(s->blk), &s->acct, size, + BLOCK_ACCT_READ); + s->status = READY_STAT | SEEK_STAT | DRQ_STAT; + ide_start_dma(s, ide_atapi_cmd_read_dma_cb); + } else { + s->status = READY_STAT | SEEK_STAT; + s->io_buffer_index = 0; + ide_atapi_cmd_reply_end(s); + } +} + +/* start a CD-CDROM read command */ +static void ide_atapi_cmd_read_pio(IDEState *s, int lba, int nb_sectors, + int sector_size) +{ + s->lba = lba; + s->packet_transfer_size = nb_sectors * sector_size; + s->elementary_transfer_size = 0; + s->io_buffer_index = sector_size; + s->cd_sector_size = sector_size; + + ide_atapi_cmd_reply_end(s); +} + +static void ide_atapi_cmd_check_status(IDEState *s) +{ +#ifdef DEBUG_IDE_ATAPI + printf("atapi_cmd_check_status\n"); +#endif + s->error = MC_ERR | (UNIT_ATTENTION << 4); + s->status = ERR_STAT; + s->nsector = 0; + ide_set_irq(s->bus); +} +/* ATAPI DMA support */ + +/* XXX: handle read errors */ +static void ide_atapi_cmd_read_dma_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + int data_offset, n; + + if (ret < 0) { + ide_atapi_io_error(s, ret); + goto eot; + } + + if (s->io_buffer_size > 0) { + /* + * For a cdrom read sector command (s->lba != -1), + * adjust the lba for the next s->io_buffer_size chunk + * and dma the current chunk. + * For a command != read (s->lba == -1), just transfer + * the reply data. + */ + if (s->lba != -1) { + if (s->cd_sector_size == 2352) { + n = 1; + cd_data_to_raw(s->io_buffer, s->lba); + } else { + n = s->io_buffer_size >> 11; + } + s->lba += n; + } + s->packet_transfer_size -= s->io_buffer_size; + if (s->bus->dma->ops->rw_buf(s->bus->dma, 1) == 0) + goto eot; + } + + if (s->packet_transfer_size <= 0) { + s->status = READY_STAT | SEEK_STAT; + s->nsector = (s->nsector & ~7) | ATAPI_INT_REASON_IO | ATAPI_INT_REASON_CD; + ide_set_irq(s->bus); + goto eot; + } + + s->io_buffer_index = 0; + if (s->cd_sector_size == 2352) { + n = 1; + s->io_buffer_size = s->cd_sector_size; + data_offset = 16; + } else { + n = s->packet_transfer_size >> 11; + if (n > (IDE_DMA_BUF_SECTORS / 4)) + n = (IDE_DMA_BUF_SECTORS / 4); + s->io_buffer_size = n * 2048; + data_offset = 0; + } +#ifdef DEBUG_AIO + printf("aio_read_cd: lba=%u n=%d\n", s->lba, n); +#endif + + s->bus->dma->iov.iov_base = (void *)(s->io_buffer + data_offset); + s->bus->dma->iov.iov_len = n * 4 * 512; + qemu_iovec_init_external(&s->bus->dma->qiov, &s->bus->dma->iov, 1); + + s->bus->dma->aiocb = ide_buffered_readv(s, (int64_t)s->lba << 2, + &s->bus->dma->qiov, n * 4, + ide_atapi_cmd_read_dma_cb, s); + return; + +eot: + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + } else { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } + ide_set_inactive(s, false); +} + +/* start a CD-CDROM read command with DMA */ +/* XXX: test if DMA is available */ +static void ide_atapi_cmd_read_dma(IDEState *s, int lba, int nb_sectors, + int sector_size) +{ + s->lba = lba; + s->packet_transfer_size = nb_sectors * sector_size; + s->io_buffer_size = 0; + s->cd_sector_size = sector_size; + + block_acct_start(blk_get_stats(s->blk), &s->acct, s->packet_transfer_size, + BLOCK_ACCT_READ); + + /* XXX: check if BUSY_STAT should be set */ + s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT; + ide_start_dma(s, ide_atapi_cmd_read_dma_cb); +} + +static void ide_atapi_cmd_read(IDEState *s, int lba, int nb_sectors, + int sector_size) +{ +#ifdef DEBUG_IDE_ATAPI + printf("read %s: LBA=%d nb_sectors=%d\n", s->atapi_dma ? "dma" : "pio", + lba, nb_sectors); +#endif + if (s->atapi_dma) { + ide_atapi_cmd_read_dma(s, lba, nb_sectors, sector_size); + } else { + ide_atapi_cmd_read_pio(s, lba, nb_sectors, sector_size); + } +} + + +/* Called by *_restart_bh when the transfer function points + * to ide_atapi_cmd + */ +void ide_atapi_dma_restart(IDEState *s) +{ + /* + * I'm not sure we have enough stored to restart the command + * safely, so give the guest an error it should recover from. + * I'm assuming most guests will try to recover from something + * listed as a medium error on a CD; it seems to work on Linux. + * This would be more of a problem if we did any other type of + * DMA operation. + */ + ide_atapi_cmd_error(s, MEDIUM_ERROR, ASC_NO_SEEK_COMPLETE); +} + +static inline uint8_t ide_atapi_set_profile(uint8_t *buf, uint8_t *index, + uint16_t profile) +{ + uint8_t *buf_profile = buf + 12; /* start of profiles */ + + buf_profile += ((*index) * 4); /* start of indexed profile */ + cpu_to_ube16 (buf_profile, profile); + buf_profile[2] = ((buf_profile[0] == buf[6]) && (buf_profile[1] == buf[7])); + + /* each profile adds 4 bytes to the response */ + (*index)++; + buf[11] += 4; /* Additional Length */ + + return 4; +} + +static int ide_dvd_read_structure(IDEState *s, int format, + const uint8_t *packet, uint8_t *buf) +{ + switch (format) { + case 0x0: /* Physical format information */ + { + int layer = packet[6]; + uint64_t total_sectors; + + if (layer != 0) + return -ASC_INV_FIELD_IN_CMD_PACKET; + + total_sectors = s->nb_sectors >> 2; + if (total_sectors == 0) { + return -ASC_MEDIUM_NOT_PRESENT; + } + + buf[4] = 1; /* DVD-ROM, part version 1 */ + buf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ + buf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ + buf[7] = 0; /* default densities */ + + /* FIXME: 0x30000 per spec? */ + cpu_to_ube32(buf + 8, 0); /* start sector */ + cpu_to_ube32(buf + 12, total_sectors - 1); /* end sector */ + cpu_to_ube32(buf + 16, total_sectors - 1); /* l0 end sector */ + + /* Size of buffer, not including 2 byte size field */ + stw_be_p(buf, 2048 + 2); + + /* 2k data + 4 byte header */ + return (2048 + 4); + } + + case 0x01: /* DVD copyright information */ + buf[4] = 0; /* no copyright data */ + buf[5] = 0; /* no region restrictions */ + + /* Size of buffer, not including 2 byte size field */ + stw_be_p(buf, 4 + 2); + + /* 4 byte header + 4 byte data */ + return (4 + 4); + + case 0x03: /* BCA information - invalid field for no BCA info */ + return -ASC_INV_FIELD_IN_CMD_PACKET; + + case 0x04: /* DVD disc manufacturing information */ + /* Size of buffer, not including 2 byte size field */ + stw_be_p(buf, 2048 + 2); + + /* 2k data + 4 byte header */ + return (2048 + 4); + + case 0xff: + /* + * This lists all the command capabilities above. Add new ones + * in order and update the length and buffer return values. + */ + + buf[4] = 0x00; /* Physical format */ + buf[5] = 0x40; /* Not writable, is readable */ + stw_be_p(buf + 6, 2048 + 4); + + buf[8] = 0x01; /* Copyright info */ + buf[9] = 0x40; /* Not writable, is readable */ + stw_be_p(buf + 10, 4 + 4); + + buf[12] = 0x03; /* BCA info */ + buf[13] = 0x40; /* Not writable, is readable */ + stw_be_p(buf + 14, 188 + 4); + + buf[16] = 0x04; /* Manufacturing info */ + buf[17] = 0x40; /* Not writable, is readable */ + stw_be_p(buf + 18, 2048 + 4); + + /* Size of buffer, not including 2 byte size field */ + stw_be_p(buf, 16 + 2); + + /* data written + 4 byte header */ + return (16 + 4); + + default: /* TODO: formats beyond DVD-ROM requires */ + return -ASC_INV_FIELD_IN_CMD_PACKET; + } +} + +static unsigned int event_status_media(IDEState *s, + uint8_t *buf) +{ + uint8_t event_code, media_status; + + media_status = 0; + if (s->tray_open) { + media_status = MS_TRAY_OPEN; + } else if (blk_is_inserted(s->blk)) { + media_status = MS_MEDIA_PRESENT; + } + + /* Event notification descriptor */ + event_code = MEC_NO_CHANGE; + if (media_status != MS_TRAY_OPEN) { + if (s->events.new_media) { + event_code = MEC_NEW_MEDIA; + s->events.new_media = false; + } else if (s->events.eject_request) { + event_code = MEC_EJECT_REQUESTED; + s->events.eject_request = false; + } + } + + buf[4] = event_code; + buf[5] = media_status; + + /* These fields are reserved, just clear them. */ + buf[6] = 0; + buf[7] = 0; + + return 8; /* We wrote to 4 extra bytes from the header */ +} + +static void cmd_get_event_status_notification(IDEState *s, + uint8_t *buf) +{ + const uint8_t *packet = buf; + + struct { + uint8_t opcode; + uint8_t polled; /* lsb bit is polled; others are reserved */ + uint8_t reserved2[2]; + uint8_t class; + uint8_t reserved3[2]; + uint16_t len; + uint8_t control; + } QEMU_PACKED *gesn_cdb; + + struct { + uint16_t len; + uint8_t notification_class; + uint8_t supported_events; + } QEMU_PACKED *gesn_event_header; + unsigned int max_len, used_len; + + gesn_cdb = (void *)packet; + gesn_event_header = (void *)buf; + + max_len = be16_to_cpu(gesn_cdb->len); + + /* It is fine by the MMC spec to not support async mode operations */ + if (!(gesn_cdb->polled & 0x01)) { /* asynchronous mode */ + /* Only polling is supported, asynchronous mode is not. */ + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + + /* polling mode operation */ + + /* + * These are the supported events. + * + * We currently only support requests of the 'media' type. + * Notification class requests and supported event classes are bitmasks, + * but they are build from the same values as the "notification class" + * field. + */ + gesn_event_header->supported_events = 1 << GESN_MEDIA; + + /* + * We use |= below to set the class field; other bits in this byte + * are reserved now but this is useful to do if we have to use the + * reserved fields later. + */ + gesn_event_header->notification_class = 0; + + /* + * Responses to requests are to be based on request priority. The + * notification_class_request_type enum above specifies the + * priority: upper elements are higher prio than lower ones. + */ + if (gesn_cdb->class & (1 << GESN_MEDIA)) { + gesn_event_header->notification_class |= GESN_MEDIA; + used_len = event_status_media(s, buf); + } else { + gesn_event_header->notification_class = 0x80; /* No event available */ + used_len = sizeof(*gesn_event_header); + } + gesn_event_header->len = cpu_to_be16(used_len + - sizeof(*gesn_event_header)); + ide_atapi_cmd_reply(s, used_len, max_len); +} + +static void cmd_request_sense(IDEState *s, uint8_t *buf) +{ + int max_len = buf[4]; + + memset(buf, 0, 18); + buf[0] = 0x70 | (1 << 7); + buf[2] = s->sense_key; + buf[7] = 10; + buf[12] = s->asc; + + if (s->sense_key == UNIT_ATTENTION) { + s->sense_key = NO_SENSE; + } + + ide_atapi_cmd_reply(s, 18, max_len); +} + +static void cmd_inquiry(IDEState *s, uint8_t *buf) +{ + uint8_t page_code = buf[2]; + int max_len = buf[4]; + + unsigned idx = 0; + unsigned size_idx; + unsigned preamble_len; + + /* If the EVPD (Enable Vital Product Data) bit is set in byte 1, + * we are being asked for a specific page of info indicated by byte 2. */ + if (buf[1] & 0x01) { + preamble_len = 4; + size_idx = 3; + + buf[idx++] = 0x05; /* CD-ROM */ + buf[idx++] = page_code; /* Page Code */ + buf[idx++] = 0x00; /* reserved */ + idx++; /* length (set later) */ + + switch (page_code) { + case 0x00: + /* Supported Pages: List of supported VPD responses. */ + buf[idx++] = 0x00; /* 0x00: Supported Pages, and: */ + buf[idx++] = 0x83; /* 0x83: Device Identification. */ + break; + + case 0x83: + /* Device Identification. Each entry is optional, but the entries + * included here are modeled after libata's VPD responses. + * If the response is given, at least one entry must be present. */ + + /* Entry 1: Serial */ + if (idx + 24 > max_len) { + /* Not enough room for even the first entry: */ + /* 4 byte header + 20 byte string */ + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_DATA_PHASE_ERROR); + return; + } + buf[idx++] = 0x02; /* Ascii */ + buf[idx++] = 0x00; /* Vendor Specific */ + buf[idx++] = 0x00; + buf[idx++] = 20; /* Remaining length */ + padstr8(buf + idx, 20, s->drive_serial_str); + idx += 20; + + /* Entry 2: Drive Model and Serial */ + if (idx + 72 > max_len) { + /* 4 (header) + 8 (vendor) + 60 (model & serial) */ + goto out; + } + buf[idx++] = 0x02; /* Ascii */ + buf[idx++] = 0x01; /* T10 Vendor */ + buf[idx++] = 0x00; + buf[idx++] = 68; + padstr8(buf + idx, 8, "ATA"); /* Generic T10 vendor */ + idx += 8; + padstr8(buf + idx, 40, s->drive_model_str); + idx += 40; + padstr8(buf + idx, 20, s->drive_serial_str); + idx += 20; + + /* Entry 3: WWN */ + if (s->wwn && (idx + 12 <= max_len)) { + /* 4 byte header + 8 byte wwn */ + buf[idx++] = 0x01; /* Binary */ + buf[idx++] = 0x03; /* NAA */ + buf[idx++] = 0x00; + buf[idx++] = 0x08; + stq_be_p(&buf[idx], s->wwn); + idx += 8; + } + break; + + default: + /* SPC-3, revision 23 sec. 6.4 */ + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + } else { + preamble_len = 5; + size_idx = 4; + + buf[0] = 0x05; /* CD-ROM */ + buf[1] = 0x80; /* removable */ + buf[2] = 0x00; /* ISO */ + buf[3] = 0x21; /* ATAPI-2 (XXX: put ATAPI-4 ?) */ + /* buf[size_idx] set below. */ + buf[5] = 0; /* reserved */ + buf[6] = 0; /* reserved */ + buf[7] = 0; /* reserved */ + padstr8(buf + 8, 8, "QEMU"); + padstr8(buf + 16, 16, "QEMU DVD-ROM"); + padstr8(buf + 32, 4, s->version); + idx = 36; + } + + out: + buf[size_idx] = idx - preamble_len; + ide_atapi_cmd_reply(s, idx, max_len); + return; +} + +static void cmd_get_configuration(IDEState *s, uint8_t *buf) +{ + uint32_t len; + uint8_t index = 0; + int max_len; + + /* only feature 0 is supported */ + if (buf[2] != 0 || buf[3] != 0) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + + /* XXX: could result in alignment problems in some architectures */ + max_len = ube16_to_cpu(buf + 7); + + /* + * XXX: avoid overflow for io_buffer if max_len is bigger than + * the size of that buffer (dimensioned to max number of + * sectors to transfer at once) + * + * Only a problem if the feature/profiles grow. + */ + if (max_len > 512) { + /* XXX: assume 1 sector */ + max_len = 512; + } + + memset(buf, 0, max_len); + /* + * the number of sectors from the media tells us which profile + * to use as current. 0 means there is no media + */ + if (media_is_dvd(s)) { + cpu_to_ube16(buf + 6, MMC_PROFILE_DVD_ROM); + } else if (media_is_cd(s)) { + cpu_to_ube16(buf + 6, MMC_PROFILE_CD_ROM); + } + + buf[10] = 0x02 | 0x01; /* persistent and current */ + len = 12; /* headers: 8 + 4 */ + len += ide_atapi_set_profile(buf, &index, MMC_PROFILE_DVD_ROM); + len += ide_atapi_set_profile(buf, &index, MMC_PROFILE_CD_ROM); + cpu_to_ube32(buf, len - 4); /* data length */ + + ide_atapi_cmd_reply(s, len, max_len); +} + +static void cmd_mode_sense(IDEState *s, uint8_t *buf) +{ + int action, code; + int max_len; + + max_len = ube16_to_cpu(buf + 7); + action = buf[2] >> 6; + code = buf[2] & 0x3f; + + switch(action) { + case 0: /* current values */ + switch(code) { + case MODE_PAGE_R_W_ERROR: /* error recovery */ + cpu_to_ube16(&buf[0], 16 - 2); + buf[2] = 0x70; + buf[3] = 0; + buf[4] = 0; + buf[5] = 0; + buf[6] = 0; + buf[7] = 0; + + buf[8] = MODE_PAGE_R_W_ERROR; + buf[9] = 16 - 10; + buf[10] = 0x00; + buf[11] = 0x05; + buf[12] = 0x00; + buf[13] = 0x00; + buf[14] = 0x00; + buf[15] = 0x00; + ide_atapi_cmd_reply(s, 16, max_len); + break; + case MODE_PAGE_AUDIO_CTL: + cpu_to_ube16(&buf[0], 24 - 2); + buf[2] = 0x70; + buf[3] = 0; + buf[4] = 0; + buf[5] = 0; + buf[6] = 0; + buf[7] = 0; + + buf[8] = MODE_PAGE_AUDIO_CTL; + buf[9] = 24 - 10; + /* Fill with CDROM audio volume */ + buf[17] = 0; + buf[19] = 0; + buf[21] = 0; + buf[23] = 0; + + ide_atapi_cmd_reply(s, 24, max_len); + break; + case MODE_PAGE_CAPABILITIES: + cpu_to_ube16(&buf[0], 30 - 2); + buf[2] = 0x70; + buf[3] = 0; + buf[4] = 0; + buf[5] = 0; + buf[6] = 0; + buf[7] = 0; + + buf[8] = MODE_PAGE_CAPABILITIES; + buf[9] = 30 - 10; + buf[10] = 0x3b; /* read CDR/CDRW/DVDROM/DVDR/DVDRAM */ + buf[11] = 0x00; + + /* Claim PLAY_AUDIO capability (0x01) since some Linux + code checks for this to automount media. */ + buf[12] = 0x71; + buf[13] = 3 << 5; + buf[14] = (1 << 0) | (1 << 3) | (1 << 5); + if (s->tray_locked) { + buf[14] |= 1 << 1; + } + buf[15] = 0x00; /* No volume & mute control, no changer */ + cpu_to_ube16(&buf[16], 704); /* 4x read speed */ + buf[18] = 0; /* Two volume levels */ + buf[19] = 2; + cpu_to_ube16(&buf[20], 512); /* 512k buffer */ + cpu_to_ube16(&buf[22], 704); /* 4x read speed current */ + buf[24] = 0; + buf[25] = 0; + buf[26] = 0; + buf[27] = 0; + buf[28] = 0; + buf[29] = 0; + ide_atapi_cmd_reply(s, 30, max_len); + break; + default: + goto error_cmd; + } + break; + case 1: /* changeable values */ + goto error_cmd; + case 2: /* default values */ + goto error_cmd; + default: + case 3: /* saved values */ + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_SAVING_PARAMETERS_NOT_SUPPORTED); + break; + } + return; + +error_cmd: + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_INV_FIELD_IN_CMD_PACKET); +} + +static void cmd_test_unit_ready(IDEState *s, uint8_t *buf) +{ + /* Not Ready Conditions are already handled in ide_atapi_cmd(), so if we + * come here, we know that it's ready. */ + ide_atapi_cmd_ok(s); +} + +static void cmd_prevent_allow_medium_removal(IDEState *s, uint8_t* buf) +{ + s->tray_locked = buf[4] & 1; + blk_lock_medium(s->blk, buf[4] & 1); + ide_atapi_cmd_ok(s); +} + +static void cmd_read(IDEState *s, uint8_t* buf) +{ + int nb_sectors, lba; + + if (buf[0] == GPCMD_READ_10) { + nb_sectors = ube16_to_cpu(buf + 7); + } else { + nb_sectors = ube32_to_cpu(buf + 6); + } + + lba = ube32_to_cpu(buf + 2); + if (nb_sectors == 0) { + ide_atapi_cmd_ok(s); + return; + } + + ide_atapi_cmd_read(s, lba, nb_sectors, 2048); +} + +static void cmd_read_cd(IDEState *s, uint8_t* buf) +{ + int nb_sectors, lba, transfer_request; + + nb_sectors = (buf[6] << 16) | (buf[7] << 8) | buf[8]; + lba = ube32_to_cpu(buf + 2); + + if (nb_sectors == 0) { + ide_atapi_cmd_ok(s); + return; + } + + transfer_request = buf[9]; + switch(transfer_request & 0xf8) { + case 0x00: + /* nothing */ + ide_atapi_cmd_ok(s); + break; + case 0x10: + /* normal read */ + ide_atapi_cmd_read(s, lba, nb_sectors, 2048); + break; + case 0xf8: + /* read all data */ + ide_atapi_cmd_read(s, lba, nb_sectors, 2352); + break; + default: + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + break; + } +} + +static void cmd_seek(IDEState *s, uint8_t* buf) +{ + unsigned int lba; + uint64_t total_sectors = s->nb_sectors >> 2; + + lba = ube32_to_cpu(buf + 2); + if (lba >= total_sectors) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_LOGICAL_BLOCK_OOR); + return; + } + + ide_atapi_cmd_ok(s); +} + +static void cmd_start_stop_unit(IDEState *s, uint8_t* buf) +{ + int sense; + bool start = buf[4] & 1; + bool loej = buf[4] & 2; /* load on start, eject on !start */ + int pwrcnd = buf[4] & 0xf0; + + if (pwrcnd) { + /* eject/load only happens for power condition == 0 */ + ide_atapi_cmd_ok(s); + return; + } + + if (loej) { + if (!start && !s->tray_open && s->tray_locked) { + sense = blk_is_inserted(s->blk) + ? NOT_READY : ILLEGAL_REQUEST; + ide_atapi_cmd_error(s, sense, ASC_MEDIA_REMOVAL_PREVENTED); + return; + } + + if (s->tray_open != !start) { + blk_eject(s->blk, !start); + s->tray_open = !start; + } + } + + ide_atapi_cmd_ok(s); +} + +static void cmd_mechanism_status(IDEState *s, uint8_t* buf) +{ + int max_len = ube16_to_cpu(buf + 8); + + cpu_to_ube16(buf, 0); + /* no current LBA */ + buf[2] = 0; + buf[3] = 0; + buf[4] = 0; + buf[5] = 1; + cpu_to_ube16(buf + 6, 0); + ide_atapi_cmd_reply(s, 8, max_len); +} + +static void cmd_read_toc_pma_atip(IDEState *s, uint8_t* buf) +{ + int format, msf, start_track, len; + int max_len; + uint64_t total_sectors = s->nb_sectors >> 2; + + max_len = ube16_to_cpu(buf + 7); + format = buf[9] >> 6; + msf = (buf[1] >> 1) & 1; + start_track = buf[6]; + + switch(format) { + case 0: + len = cdrom_read_toc(total_sectors, buf, msf, start_track); + if (len < 0) + goto error_cmd; + ide_atapi_cmd_reply(s, len, max_len); + break; + case 1: + /* multi session : only a single session defined */ + memset(buf, 0, 12); + buf[1] = 0x0a; + buf[2] = 0x01; + buf[3] = 0x01; + ide_atapi_cmd_reply(s, 12, max_len); + break; + case 2: + len = cdrom_read_toc_raw(total_sectors, buf, msf, start_track); + if (len < 0) + goto error_cmd; + ide_atapi_cmd_reply(s, len, max_len); + break; + default: + error_cmd: + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + } +} + +static void cmd_read_cdvd_capacity(IDEState *s, uint8_t* buf) +{ + uint64_t total_sectors = s->nb_sectors >> 2; + + /* NOTE: it is really the number of sectors minus 1 */ + cpu_to_ube32(buf, total_sectors - 1); + cpu_to_ube32(buf + 4, 2048); + ide_atapi_cmd_reply(s, 8, 8); +} + +static void cmd_read_disc_information(IDEState *s, uint8_t* buf) +{ + uint8_t type = buf[1] & 7; + uint32_t max_len = ube16_to_cpu(buf + 7); + + /* Types 1/2 are only defined for Blu-Ray. */ + if (type != 0) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + + memset(buf, 0, 34); + buf[1] = 32; + buf[2] = 0xe; /* last session complete, disc finalized */ + buf[3] = 1; /* first track on disc */ + buf[4] = 1; /* # of sessions */ + buf[5] = 1; /* first track of last session */ + buf[6] = 1; /* last track of last session */ + buf[7] = 0x20; /* unrestricted use */ + buf[8] = 0x00; /* CD-ROM or DVD-ROM */ + /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ + /* 12-23: not meaningful for CD-ROM or DVD-ROM */ + /* 24-31: disc bar code */ + /* 32: disc application code */ + /* 33: number of OPC tables */ + + ide_atapi_cmd_reply(s, 34, max_len); +} + +static void cmd_read_dvd_structure(IDEState *s, uint8_t* buf) +{ + int max_len; + int media = buf[1]; + int format = buf[7]; + int ret; + + max_len = ube16_to_cpu(buf + 8); + + if (format < 0xff) { + if (media_is_cd(s)) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INCOMPATIBLE_FORMAT); + return; + } else if (!media_present(s)) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + return; + } + } + + memset(buf, 0, max_len > IDE_DMA_BUF_SECTORS * 512 + 4 ? + IDE_DMA_BUF_SECTORS * 512 + 4 : max_len); + + switch (format) { + case 0x00 ... 0x7f: + case 0xff: + if (media == 0) { + ret = ide_dvd_read_structure(s, format, buf, buf); + + if (ret < 0) { + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, -ret); + } else { + ide_atapi_cmd_reply(s, ret, max_len); + } + + break; + } + /* TODO: BD support, fall through for now */ + + /* Generic disk structures */ + case 0x80: /* TODO: AACS volume identifier */ + case 0x81: /* TODO: AACS media serial number */ + case 0x82: /* TODO: AACS media identifier */ + case 0x83: /* TODO: AACS media key block */ + case 0x90: /* TODO: List of recognized format layers */ + case 0xc0: /* TODO: Write protection status */ + default: + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, + ASC_INV_FIELD_IN_CMD_PACKET); + break; + } +} + +static void cmd_set_speed(IDEState *s, uint8_t* buf) +{ + ide_atapi_cmd_ok(s); +} + +enum { + /* + * Only commands flagged as ALLOW_UA are allowed to run under a + * unit attention condition. (See MMC-5, section 4.1.6.1) + */ + ALLOW_UA = 0x01, + + /* + * Commands flagged with CHECK_READY can only execute if a medium is present. + * Otherwise they report the Not Ready Condition. (See MMC-5, section + * 4.1.8) + */ + CHECK_READY = 0x02, + + /* + * Commands flagged with NONDATA do not in any circumstances return + * any data via ide_atapi_cmd_reply. These commands are exempt from + * the normal byte_count_limit constraints. + * See ATA8-ACS3 "7.21.5 Byte Count Limit" + */ + NONDATA = 0x04, +}; + +static const struct AtapiCmd { + void (*handler)(IDEState *s, uint8_t *buf); + int flags; +} atapi_cmd_table[0x100] = { + [ 0x00 ] = { cmd_test_unit_ready, CHECK_READY | NONDATA }, + [ 0x03 ] = { cmd_request_sense, ALLOW_UA }, + [ 0x12 ] = { cmd_inquiry, ALLOW_UA }, + [ 0x1b ] = { cmd_start_stop_unit, NONDATA }, /* [1] */ + [ 0x1e ] = { cmd_prevent_allow_medium_removal, NONDATA }, + [ 0x25 ] = { cmd_read_cdvd_capacity, CHECK_READY }, + [ 0x28 ] = { cmd_read, /* (10) */ CHECK_READY }, + [ 0x2b ] = { cmd_seek, CHECK_READY | NONDATA }, + [ 0x43 ] = { cmd_read_toc_pma_atip, CHECK_READY }, + [ 0x46 ] = { cmd_get_configuration, ALLOW_UA }, + [ 0x4a ] = { cmd_get_event_status_notification, ALLOW_UA }, + [ 0x51 ] = { cmd_read_disc_information, CHECK_READY }, + [ 0x5a ] = { cmd_mode_sense, /* (10) */ 0 }, + [ 0xa8 ] = { cmd_read, /* (12) */ CHECK_READY }, + [ 0xad ] = { cmd_read_dvd_structure, CHECK_READY }, + [ 0xbb ] = { cmd_set_speed, NONDATA }, + [ 0xbd ] = { cmd_mechanism_status, 0 }, + [ 0xbe ] = { cmd_read_cd, CHECK_READY }, + /* [1] handler detects and reports not ready condition itself */ +}; + +void ide_atapi_cmd(IDEState *s) +{ + uint8_t *buf = s->io_buffer; + const struct AtapiCmd *cmd = &atapi_cmd_table[s->io_buffer[0]]; + +#ifdef DEBUG_IDE_ATAPI + { + int i; + printf("ATAPI limit=0x%x packet:", s->lcyl | (s->hcyl << 8)); + for(i = 0; i < ATAPI_PACKET_SIZE; i++) { + printf(" %02x", buf[i]); + } + printf("\n"); + } +#endif + + /* + * If there's a UNIT_ATTENTION condition pending, only command flagged with + * ALLOW_UA are allowed to complete. with other commands getting a CHECK + * condition response unless a higher priority status, defined by the drive + * here, is pending. + */ + if (s->sense_key == UNIT_ATTENTION && !(cmd->flags & ALLOW_UA)) { + ide_atapi_cmd_check_status(s); + return; + } + /* + * When a CD gets changed, we have to report an ejected state and + * then a loaded state to guests so that they detect tray + * open/close and media change events. Guests that do not use + * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close + * states rely on this behavior. + */ + if (!(cmd->flags & ALLOW_UA) && + !s->tray_open && blk_is_inserted(s->blk) && s->cdrom_changed) { + + if (s->cdrom_changed == 1) { + ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT); + s->cdrom_changed = 2; + } else { + ide_atapi_cmd_error(s, UNIT_ATTENTION, ASC_MEDIUM_MAY_HAVE_CHANGED); + s->cdrom_changed = 0; + } + + return; + } + + /* Report a Not Ready condition if appropriate for the command */ + if ((cmd->flags & CHECK_READY) && + (!media_present(s) || !blk_is_inserted(s->blk))) + { + ide_atapi_cmd_error(s, NOT_READY, ASC_MEDIUM_NOT_PRESENT); + return; + } + + /* Nondata commands permit the byte_count_limit to be 0. + * If this is a data-transferring PIO command and BCL is 0, + * we abort at the /ATA/ level, not the ATAPI level. + * See ATA8 ACS3 section 7.17.6.49 and 7.21.5 */ + if (cmd->handler && !(cmd->flags & NONDATA)) { + /* TODO: Check IDENTIFY data word 125 for default BCL (currently 0) */ + if (!(atapi_byte_count_limit(s) || s->atapi_dma)) { + /* TODO: Move abort back into core.c and make static inline again */ + ide_abort_command(s); + return; + } + } + + /* Execute the command */ + if (cmd->handler) { + cmd->handler(s, buf); + return; + } + + ide_atapi_cmd_error(s, ILLEGAL_REQUEST, ASC_ILLEGAL_OPCODE); +} diff --git a/src/hw/ide/cmd646.c b/src/hw/ide/cmd646.c new file mode 100644 index 0000000..27f3da2 --- /dev/null +++ b/src/hw/ide/cmd646.c @@ -0,0 +1,434 @@ +/* + * QEMU IDE Emulation: PCI cmd646 support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include <hw/hw.h> +#include <hw/i386/pc.h> +#include <hw/pci/pci.h> +#include <hw/isa/isa.h> +#include "sysemu/block-backend.h" +#include "sysemu/sysemu.h" +#include "sysemu/dma.h" + +#include <hw/ide/pci.h> + +/* CMD646 specific */ +#define CFR 0x50 +#define CFR_INTR_CH0 0x04 +#define CNTRL 0x51 +#define CNTRL_EN_CH0 0x04 +#define CNTRL_EN_CH1 0x08 +#define ARTTIM23 0x57 +#define ARTTIM23_INTR_CH1 0x10 +#define MRDMODE 0x71 +#define MRDMODE_INTR_CH0 0x04 +#define MRDMODE_INTR_CH1 0x08 +#define MRDMODE_BLK_CH0 0x10 +#define MRDMODE_BLK_CH1 0x20 +#define UDIDETCR0 0x73 +#define UDIDETCR1 0x7B + +static void cmd646_update_irq(PCIDevice *pd); + +static uint64_t cmd646_cmd_read(void *opaque, hwaddr addr, + unsigned size) +{ + CMD646BAR *cmd646bar = opaque; + + if (addr != 2 || size != 1) { + return ((uint64_t)1 << (size * 8)) - 1; + } + return ide_status_read(cmd646bar->bus, addr + 2); +} + +static void cmd646_cmd_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + CMD646BAR *cmd646bar = opaque; + + if (addr != 2 || size != 1) { + return; + } + ide_cmd_write(cmd646bar->bus, addr + 2, data); +} + +static const MemoryRegionOps cmd646_cmd_ops = { + .read = cmd646_cmd_read, + .write = cmd646_cmd_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t cmd646_data_read(void *opaque, hwaddr addr, + unsigned size) +{ + CMD646BAR *cmd646bar = opaque; + + if (size == 1) { + return ide_ioport_read(cmd646bar->bus, addr); + } else if (addr == 0) { + if (size == 2) { + return ide_data_readw(cmd646bar->bus, addr); + } else { + return ide_data_readl(cmd646bar->bus, addr); + } + } + return ((uint64_t)1 << (size * 8)) - 1; +} + +static void cmd646_data_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + CMD646BAR *cmd646bar = opaque; + + if (size == 1) { + ide_ioport_write(cmd646bar->bus, addr, data); + } else if (addr == 0) { + if (size == 2) { + ide_data_writew(cmd646bar->bus, addr, data); + } else { + ide_data_writel(cmd646bar->bus, addr, data); + } + } +} + +static const MemoryRegionOps cmd646_data_ops = { + .read = cmd646_data_read, + .write = cmd646_data_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void setup_cmd646_bar(PCIIDEState *d, int bus_num) +{ + IDEBus *bus = &d->bus[bus_num]; + CMD646BAR *bar = &d->cmd646_bar[bus_num]; + + bar->bus = bus; + bar->pci_dev = d; + memory_region_init_io(&bar->cmd, OBJECT(d), &cmd646_cmd_ops, bar, + "cmd646-cmd", 4); + memory_region_init_io(&bar->data, OBJECT(d), &cmd646_data_ops, bar, + "cmd646-data", 8); +} + +static void cmd646_update_dma_interrupts(PCIDevice *pd) +{ + /* Sync DMA interrupt status from UDMA interrupt status */ + if (pd->config[MRDMODE] & MRDMODE_INTR_CH0) { + pd->config[CFR] |= CFR_INTR_CH0; + } else { + pd->config[CFR] &= ~CFR_INTR_CH0; + } + + if (pd->config[MRDMODE] & MRDMODE_INTR_CH1) { + pd->config[ARTTIM23] |= ARTTIM23_INTR_CH1; + } else { + pd->config[ARTTIM23] &= ~ARTTIM23_INTR_CH1; + } +} + +static void cmd646_update_udma_interrupts(PCIDevice *pd) +{ + /* Sync UDMA interrupt status from DMA interrupt status */ + if (pd->config[CFR] & CFR_INTR_CH0) { + pd->config[MRDMODE] |= MRDMODE_INTR_CH0; + } else { + pd->config[MRDMODE] &= ~MRDMODE_INTR_CH0; + } + + if (pd->config[ARTTIM23] & ARTTIM23_INTR_CH1) { + pd->config[MRDMODE] |= MRDMODE_INTR_CH1; + } else { + pd->config[MRDMODE] &= ~MRDMODE_INTR_CH1; + } +} + +static uint64_t bmdma_read(void *opaque, hwaddr addr, + unsigned size) +{ + BMDMAState *bm = opaque; + PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); + uint32_t val; + + if (size != 1) { + return ((uint64_t)1 << (size * 8)) - 1; + } + + switch(addr & 3) { + case 0: + val = bm->cmd; + break; + case 1: + val = pci_dev->config[MRDMODE]; + break; + case 2: + val = bm->status; + break; + case 3: + if (bm == &bm->pci_dev->bmdma[0]) { + val = pci_dev->config[UDIDETCR0]; + } else { + val = pci_dev->config[UDIDETCR1]; + } + break; + default: + val = 0xff; + break; + } +#ifdef DEBUG_IDE + printf("bmdma: readb " TARGET_FMT_plx " : 0x%02x\n", addr, val); +#endif + return val; +} + +static void bmdma_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + BMDMAState *bm = opaque; + PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); + + if (size != 1) { + return; + } + +#ifdef DEBUG_IDE + printf("bmdma: writeb " TARGET_FMT_plx " : 0x%" PRIx64 "\n", addr, val); +#endif + switch(addr & 3) { + case 0: + bmdma_cmd_writeb(bm, val); + break; + case 1: + pci_dev->config[MRDMODE] = + (pci_dev->config[MRDMODE] & ~0x30) | (val & 0x30); + cmd646_update_dma_interrupts(pci_dev); + cmd646_update_irq(pci_dev); + break; + case 2: + bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06); + break; + case 3: + if (bm == &bm->pci_dev->bmdma[0]) { + pci_dev->config[UDIDETCR0] = val; + } else { + pci_dev->config[UDIDETCR1] = val; + } + break; + } +} + +static const MemoryRegionOps cmd646_bmdma_ops = { + .read = bmdma_read, + .write = bmdma_write, +}; + +static void bmdma_setup_bar(PCIIDEState *d) +{ + BMDMAState *bm; + int i; + + memory_region_init(&d->bmdma_bar, OBJECT(d), "cmd646-bmdma", 16); + for(i = 0;i < 2; i++) { + bm = &d->bmdma[i]; + memory_region_init_io(&bm->extra_io, OBJECT(d), &cmd646_bmdma_ops, bm, + "cmd646-bmdma-bus", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8, &bm->extra_io); + memory_region_init_io(&bm->addr_ioport, OBJECT(d), + &bmdma_addr_ioport_ops, bm, + "cmd646-bmdma-ioport", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8 + 4, &bm->addr_ioport); + } +} + +static void cmd646_update_irq(PCIDevice *pd) +{ + int pci_level; + + pci_level = ((pd->config[MRDMODE] & MRDMODE_INTR_CH0) && + !(pd->config[MRDMODE] & MRDMODE_BLK_CH0)) || + ((pd->config[MRDMODE] & MRDMODE_INTR_CH1) && + !(pd->config[MRDMODE] & MRDMODE_BLK_CH1)); + pci_set_irq(pd, pci_level); +} + +/* the PCI irq level is the logical OR of the two channels */ +static void cmd646_set_irq(void *opaque, int channel, int level) +{ + PCIIDEState *d = opaque; + PCIDevice *pd = PCI_DEVICE(d); + int irq_mask; + + irq_mask = MRDMODE_INTR_CH0 << channel; + if (level) { + pd->config[MRDMODE] |= irq_mask; + } else { + pd->config[MRDMODE] &= ~irq_mask; + } + cmd646_update_dma_interrupts(pd); + cmd646_update_irq(pd); +} + +static void cmd646_reset(void *opaque) +{ + PCIIDEState *d = opaque; + unsigned int i; + + for (i = 0; i < 2; i++) { + ide_bus_reset(&d->bus[i]); + } +} + +static uint32_t cmd646_pci_config_read(PCIDevice *d, + uint32_t address, int len) +{ + return pci_default_read_config(d, address, len); +} + +static void cmd646_pci_config_write(PCIDevice *d, uint32_t addr, uint32_t val, + int l) +{ + uint32_t i; + + pci_default_write_config(d, addr, val, l); + + for (i = addr; i < addr + l; i++) { + switch (i) { + case CFR: + case ARTTIM23: + cmd646_update_udma_interrupts(d); + break; + case MRDMODE: + cmd646_update_dma_interrupts(d); + break; + } + } + + cmd646_update_irq(d); +} + +/* CMD646 PCI IDE controller */ +static void pci_cmd646_ide_realize(PCIDevice *dev, Error **errp) +{ + PCIIDEState *d = PCI_IDE(dev); + uint8_t *pci_conf = dev->config; + qemu_irq *irq; + int i; + + pci_conf[PCI_CLASS_PROG] = 0x8f; + + pci_conf[CNTRL] = CNTRL_EN_CH0; // enable IDE0 + if (d->secondary) { + /* XXX: if not enabled, really disable the seconday IDE controller */ + pci_conf[CNTRL] |= CNTRL_EN_CH1; /* enable IDE1 */ + } + + /* Set write-to-clear interrupt bits */ + dev->wmask[CFR] = 0x0; + dev->w1cmask[CFR] = CFR_INTR_CH0; + dev->wmask[ARTTIM23] = 0x0; + dev->w1cmask[ARTTIM23] = ARTTIM23_INTR_CH1; + dev->wmask[MRDMODE] = 0x0; + dev->w1cmask[MRDMODE] = MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1; + + setup_cmd646_bar(d, 0); + setup_cmd646_bar(d, 1); + pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd646_bar[0].data); + pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd646_bar[0].cmd); + pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd646_bar[1].data); + pci_register_bar(dev, 3, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd646_bar[1].cmd); + bmdma_setup_bar(d); + pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar); + + /* TODO: RST# value should be 0 */ + pci_conf[PCI_INTERRUPT_PIN] = 0x01; // interrupt on pin 1 + + irq = qemu_allocate_irqs(cmd646_set_irq, d, 2); + for (i = 0; i < 2; i++) { + ide_bus_new(&d->bus[i], sizeof(d->bus[i]), DEVICE(dev), i, 2); + ide_init2(&d->bus[i], irq[i]); + + bmdma_init(&d->bus[i], &d->bmdma[i], d); + d->bmdma[i].bus = &d->bus[i]; + ide_register_restart_cb(&d->bus[i]); + } + + vmstate_register(DEVICE(dev), 0, &vmstate_ide_pci, d); + qemu_register_reset(cmd646_reset, d); +} + +static void pci_cmd646_ide_exitfn(PCIDevice *dev) +{ + PCIIDEState *d = PCI_IDE(dev); + unsigned i; + + for (i = 0; i < 2; ++i) { + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io); + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport); + } +} + +void pci_cmd646_ide_init(PCIBus *bus, DriveInfo **hd_table, + int secondary_ide_enabled) +{ + PCIDevice *dev; + + dev = pci_create(bus, -1, "cmd646-ide"); + qdev_prop_set_uint32(&dev->qdev, "secondary", secondary_ide_enabled); + qdev_init_nofail(&dev->qdev); + + pci_ide_create_devs(dev, hd_table); +} + +static Property cmd646_ide_properties[] = { + DEFINE_PROP_UINT32("secondary", PCIIDEState, secondary, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void cmd646_ide_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_cmd646_ide_realize; + k->exit = pci_cmd646_ide_exitfn; + k->vendor_id = PCI_VENDOR_ID_CMD; + k->device_id = PCI_DEVICE_ID_CMD_646; + k->revision = 0x07; + k->class_id = PCI_CLASS_STORAGE_IDE; + k->config_read = cmd646_pci_config_read; + k->config_write = cmd646_pci_config_write; + dc->props = cmd646_ide_properties; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo cmd646_ide_info = { + .name = "cmd646-ide", + .parent = TYPE_PCI_IDE, + .class_init = cmd646_ide_class_init, +}; + +static void cmd646_ide_register_types(void) +{ + type_register_static(&cmd646_ide_info); +} + +type_init(cmd646_ide_register_types) diff --git a/src/hw/ide/core.c b/src/hw/ide/core.c new file mode 100644 index 0000000..da3baab --- /dev/null +++ b/src/hw/ide/core.c @@ -0,0 +1,2776 @@ +/* + * QEMU IDE disk and CD/DVD-ROM Emulator + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include <hw/hw.h> +#include <hw/i386/pc.h> +#include <hw/pci/pci.h> +#include <hw/isa/isa.h> +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "sysemu/dma.h" +#include "hw/block/block.h" +#include "sysemu/block-backend.h" + +#include <hw/ide/internal.h> + +/* These values were based on a Seagate ST3500418AS but have been modified + to make more sense in QEMU */ +static const int smart_attributes[][12] = { + /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */ + /* raw read error rate*/ + { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06}, + /* spin up */ + { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + /* start stop count */ + { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14}, + /* remapped sectors */ + { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24}, + /* power on hours */ + { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + /* power cycle count */ + { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + /* airflow-temperature-celsius */ + { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32}, +}; + +static int ide_handle_rw_error(IDEState *s, int error, int op); +static void ide_dummy_transfer_stop(IDEState *s); + +static void padstr(char *str, const char *src, int len) +{ + int i, v; + for(i = 0; i < len; i++) { + if (*src) + v = *src++; + else + v = ' '; + str[i^1] = v; + } +} + +static void put_le16(uint16_t *p, unsigned int v) +{ + *p = cpu_to_le16(v); +} + +static void ide_identify_size(IDEState *s) +{ + uint16_t *p = (uint16_t *)s->identify_data; + put_le16(p + 60, s->nb_sectors); + put_le16(p + 61, s->nb_sectors >> 16); + put_le16(p + 100, s->nb_sectors); + put_le16(p + 101, s->nb_sectors >> 16); + put_le16(p + 102, s->nb_sectors >> 32); + put_le16(p + 103, s->nb_sectors >> 48); +} + +static void ide_identify(IDEState *s) +{ + uint16_t *p; + unsigned int oldsize; + IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master; + + p = (uint16_t *)s->identify_data; + if (s->identify_set) { + goto fill_buffer; + } + memset(p, 0, sizeof(s->identify_data)); + + put_le16(p + 0, 0x0040); + put_le16(p + 1, s->cylinders); + put_le16(p + 3, s->heads); + put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */ + put_le16(p + 5, 512); /* XXX: retired, remove ? */ + put_le16(p + 6, s->sectors); + padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ + put_le16(p + 20, 3); /* XXX: retired, remove ? */ + put_le16(p + 21, 512); /* cache size in sectors */ + put_le16(p + 22, 4); /* ecc bytes */ + padstr((char *)(p + 23), s->version, 8); /* firmware version */ + padstr((char *)(p + 27), s->drive_model_str, 40); /* model */ +#if MAX_MULT_SECTORS > 1 + put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS); +#endif + put_le16(p + 48, 1); /* dword I/O */ + put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */ + put_le16(p + 51, 0x200); /* PIO transfer cycle */ + put_le16(p + 52, 0x200); /* DMA transfer cycle */ + put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */ + put_le16(p + 54, s->cylinders); + put_le16(p + 55, s->heads); + put_le16(p + 56, s->sectors); + oldsize = s->cylinders * s->heads * s->sectors; + put_le16(p + 57, oldsize); + put_le16(p + 58, oldsize >> 16); + if (s->mult_sectors) + put_le16(p + 59, 0x100 | s->mult_sectors); + /* *(p + 60) := nb_sectors -- see ide_identify_size */ + /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */ + put_le16(p + 62, 0x07); /* single word dma0-2 supported */ + put_le16(p + 63, 0x07); /* mdma0-2 supported */ + put_le16(p + 64, 0x03); /* pio3-4 supported */ + put_le16(p + 65, 120); + put_le16(p + 66, 120); + put_le16(p + 67, 120); + put_le16(p + 68, 120); + if (dev && dev->conf.discard_granularity) { + put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */ + } + + if (s->ncq_queues) { + put_le16(p + 75, s->ncq_queues - 1); + /* NCQ supported */ + put_le16(p + 76, (1 << 8)); + } + + put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */ + put_le16(p + 81, 0x16); /* conforms to ata5 */ + /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */ + put_le16(p + 82, (1 << 14) | (1 << 5) | 1); + /* 13=flush_cache_ext,12=flush_cache,10=lba48 */ + put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10)); + /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */ + if (s->wwn) { + put_le16(p + 84, (1 << 14) | (1 << 8) | 0); + } else { + put_le16(p + 84, (1 << 14) | 0); + } + /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */ + if (blk_enable_write_cache(s->blk)) { + put_le16(p + 85, (1 << 14) | (1 << 5) | 1); + } else { + put_le16(p + 85, (1 << 14) | 1); + } + /* 13=flush_cache_ext,12=flush_cache,10=lba48 */ + put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10)); + /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */ + if (s->wwn) { + put_le16(p + 87, (1 << 14) | (1 << 8) | 0); + } else { + put_le16(p + 87, (1 << 14) | 0); + } + put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */ + put_le16(p + 93, 1 | (1 << 14) | 0x2000); + /* *(p + 100) := nb_sectors -- see ide_identify_size */ + /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */ + /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */ + /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */ + + if (dev && dev->conf.physical_block_size) + put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf)); + if (s->wwn) { + /* LE 16-bit words 111-108 contain 64-bit World Wide Name */ + put_le16(p + 108, s->wwn >> 48); + put_le16(p + 109, s->wwn >> 32); + put_le16(p + 110, s->wwn >> 16); + put_le16(p + 111, s->wwn); + } + if (dev && dev->conf.discard_granularity) { + put_le16(p + 169, 1); /* TRIM support */ + } + + ide_identify_size(s); + s->identify_set = 1; + +fill_buffer: + memcpy(s->io_buffer, p, sizeof(s->identify_data)); +} + +static void ide_atapi_identify(IDEState *s) +{ + uint16_t *p; + + p = (uint16_t *)s->identify_data; + if (s->identify_set) { + goto fill_buffer; + } + memset(p, 0, sizeof(s->identify_data)); + + /* Removable CDROM, 50us response, 12 byte packets */ + put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0)); + padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ + put_le16(p + 20, 3); /* buffer type */ + put_le16(p + 21, 512); /* cache size in sectors */ + put_le16(p + 22, 4); /* ecc bytes */ + padstr((char *)(p + 23), s->version, 8); /* firmware version */ + padstr((char *)(p + 27), s->drive_model_str, 40); /* model */ + put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */ +#ifdef USE_DMA_CDROM + put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */ + put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */ + put_le16(p + 62, 7); /* single word dma0-2 supported */ + put_le16(p + 63, 7); /* mdma0-2 supported */ +#else + put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */ + put_le16(p + 53, 3); /* words 64-70, 54-58 valid */ + put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */ +#endif + put_le16(p + 64, 3); /* pio3-4 supported */ + put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */ + put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */ + put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */ + put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */ + + put_le16(p + 71, 30); /* in ns */ + put_le16(p + 72, 30); /* in ns */ + + if (s->ncq_queues) { + put_le16(p + 75, s->ncq_queues - 1); + /* NCQ supported */ + put_le16(p + 76, (1 << 8)); + } + + put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */ + if (s->wwn) { + put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */ + put_le16(p + 87, (1 << 8)); /* WWN enabled */ + } + +#ifdef USE_DMA_CDROM + put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */ +#endif + + if (s->wwn) { + /* LE 16-bit words 111-108 contain 64-bit World Wide Name */ + put_le16(p + 108, s->wwn >> 48); + put_le16(p + 109, s->wwn >> 32); + put_le16(p + 110, s->wwn >> 16); + put_le16(p + 111, s->wwn); + } + + s->identify_set = 1; + +fill_buffer: + memcpy(s->io_buffer, p, sizeof(s->identify_data)); +} + +static void ide_cfata_identify_size(IDEState *s) +{ + uint16_t *p = (uint16_t *)s->identify_data; + put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */ + put_le16(p + 8, s->nb_sectors); /* Sectors per card */ + put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */ + put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */ +} + +static void ide_cfata_identify(IDEState *s) +{ + uint16_t *p; + uint32_t cur_sec; + + p = (uint16_t *)s->identify_data; + if (s->identify_set) { + goto fill_buffer; + } + memset(p, 0, sizeof(s->identify_data)); + + cur_sec = s->cylinders * s->heads * s->sectors; + + put_le16(p + 0, 0x848a); /* CF Storage Card signature */ + put_le16(p + 1, s->cylinders); /* Default cylinders */ + put_le16(p + 3, s->heads); /* Default heads */ + put_le16(p + 6, s->sectors); /* Default sectors per track */ + /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */ + /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */ + padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */ + put_le16(p + 22, 0x0004); /* ECC bytes */ + padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */ + padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */ +#if MAX_MULT_SECTORS > 1 + put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS); +#else + put_le16(p + 47, 0x0000); +#endif + put_le16(p + 49, 0x0f00); /* Capabilities */ + put_le16(p + 51, 0x0002); /* PIO cycle timing mode */ + put_le16(p + 52, 0x0001); /* DMA cycle timing mode */ + put_le16(p + 53, 0x0003); /* Translation params valid */ + put_le16(p + 54, s->cylinders); /* Current cylinders */ + put_le16(p + 55, s->heads); /* Current heads */ + put_le16(p + 56, s->sectors); /* Current sectors */ + put_le16(p + 57, cur_sec); /* Current capacity */ + put_le16(p + 58, cur_sec >> 16); /* Current capacity */ + if (s->mult_sectors) /* Multiple sector setting */ + put_le16(p + 59, 0x100 | s->mult_sectors); + /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */ + /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */ + put_le16(p + 63, 0x0203); /* Multiword DMA capability */ + put_le16(p + 64, 0x0001); /* Flow Control PIO support */ + put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */ + put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */ + put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */ + put_le16(p + 82, 0x400c); /* Command Set supported */ + put_le16(p + 83, 0x7068); /* Command Set supported */ + put_le16(p + 84, 0x4000); /* Features supported */ + put_le16(p + 85, 0x000c); /* Command Set enabled */ + put_le16(p + 86, 0x7044); /* Command Set enabled */ + put_le16(p + 87, 0x4000); /* Features enabled */ + put_le16(p + 91, 0x4060); /* Current APM level */ + put_le16(p + 129, 0x0002); /* Current features option */ + put_le16(p + 130, 0x0005); /* Reassigned sectors */ + put_le16(p + 131, 0x0001); /* Initial power mode */ + put_le16(p + 132, 0x0000); /* User signature */ + put_le16(p + 160, 0x8100); /* Power requirement */ + put_le16(p + 161, 0x8001); /* CF command set */ + + ide_cfata_identify_size(s); + s->identify_set = 1; + +fill_buffer: + memcpy(s->io_buffer, p, sizeof(s->identify_data)); +} + +static void ide_set_signature(IDEState *s) +{ + s->select &= 0xf0; /* clear head */ + /* put signature */ + s->nsector = 1; + s->sector = 1; + if (s->drive_kind == IDE_CD) { + s->lcyl = 0x14; + s->hcyl = 0xeb; + } else if (s->blk) { + s->lcyl = 0; + s->hcyl = 0; + } else { + s->lcyl = 0xff; + s->hcyl = 0xff; + } +} + +typedef struct TrimAIOCB { + BlockAIOCB common; + BlockBackend *blk; + QEMUBH *bh; + int ret; + QEMUIOVector *qiov; + BlockAIOCB *aiocb; + int i, j; +} TrimAIOCB; + +static void trim_aio_cancel(BlockAIOCB *acb) +{ + TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common); + + /* Exit the loop so ide_issue_trim_cb will not continue */ + iocb->j = iocb->qiov->niov - 1; + iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1; + + iocb->ret = -ECANCELED; + + if (iocb->aiocb) { + blk_aio_cancel_async(iocb->aiocb); + iocb->aiocb = NULL; + } +} + +static const AIOCBInfo trim_aiocb_info = { + .aiocb_size = sizeof(TrimAIOCB), + .cancel_async = trim_aio_cancel, +}; + +static void ide_trim_bh_cb(void *opaque) +{ + TrimAIOCB *iocb = opaque; + + iocb->common.cb(iocb->common.opaque, iocb->ret); + + qemu_bh_delete(iocb->bh); + iocb->bh = NULL; + qemu_aio_unref(iocb); +} + +static void ide_issue_trim_cb(void *opaque, int ret) +{ + TrimAIOCB *iocb = opaque; + if (ret >= 0) { + while (iocb->j < iocb->qiov->niov) { + int j = iocb->j; + while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) { + int i = iocb->i; + uint64_t *buffer = iocb->qiov->iov[j].iov_base; + + /* 6-byte LBA + 2-byte range per entry */ + uint64_t entry = le64_to_cpu(buffer[i]); + uint64_t sector = entry & 0x0000ffffffffffffULL; + uint16_t count = entry >> 48; + + if (count == 0) { + continue; + } + + /* Got an entry! Submit and exit. */ + iocb->aiocb = blk_aio_discard(iocb->blk, sector, count, + ide_issue_trim_cb, opaque); + return; + } + + iocb->j++; + iocb->i = -1; + } + } else { + iocb->ret = ret; + } + + iocb->aiocb = NULL; + if (iocb->bh) { + qemu_bh_schedule(iocb->bh); + } +} + +BlockAIOCB *ide_issue_trim(BlockBackend *blk, + int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque) +{ + TrimAIOCB *iocb; + + iocb = blk_aio_get(&trim_aiocb_info, blk, cb, opaque); + iocb->blk = blk; + iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb); + iocb->ret = 0; + iocb->qiov = qiov; + iocb->i = -1; + iocb->j = 0; + ide_issue_trim_cb(iocb, 0); + return &iocb->common; +} + +void ide_abort_command(IDEState *s) +{ + ide_transfer_stop(s); + s->status = READY_STAT | ERR_STAT; + s->error = ABRT_ERR; +} + +/* prepare data transfer and tell what to do after */ +void ide_transfer_start(IDEState *s, uint8_t *buf, int size, + EndTransferFunc *end_transfer_func) +{ + s->end_transfer_func = end_transfer_func; + s->data_ptr = buf; + s->data_end = buf + size; + if (!(s->status & ERR_STAT)) { + s->status |= DRQ_STAT; + } + if (s->bus->dma->ops->start_transfer) { + s->bus->dma->ops->start_transfer(s->bus->dma); + } +} + +static void ide_cmd_done(IDEState *s) +{ + if (s->bus->dma->ops->cmd_done) { + s->bus->dma->ops->cmd_done(s->bus->dma); + } +} + +void ide_transfer_stop(IDEState *s) +{ + s->end_transfer_func = ide_transfer_stop; + s->data_ptr = s->io_buffer; + s->data_end = s->io_buffer; + s->status &= ~DRQ_STAT; + ide_cmd_done(s); +} + +int64_t ide_get_sector(IDEState *s) +{ + int64_t sector_num; + if (s->select & 0x40) { + /* lba */ + if (!s->lba48) { + sector_num = ((s->select & 0x0f) << 24) | (s->hcyl << 16) | + (s->lcyl << 8) | s->sector; + } else { + sector_num = ((int64_t)s->hob_hcyl << 40) | + ((int64_t) s->hob_lcyl << 32) | + ((int64_t) s->hob_sector << 24) | + ((int64_t) s->hcyl << 16) | + ((int64_t) s->lcyl << 8) | s->sector; + } + } else { + sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors + + (s->select & 0x0f) * s->sectors + (s->sector - 1); + } + return sector_num; +} + +void ide_set_sector(IDEState *s, int64_t sector_num) +{ + unsigned int cyl, r; + if (s->select & 0x40) { + if (!s->lba48) { + s->select = (s->select & 0xf0) | (sector_num >> 24); + s->hcyl = (sector_num >> 16); + s->lcyl = (sector_num >> 8); + s->sector = (sector_num); + } else { + s->sector = sector_num; + s->lcyl = sector_num >> 8; + s->hcyl = sector_num >> 16; + s->hob_sector = sector_num >> 24; + s->hob_lcyl = sector_num >> 32; + s->hob_hcyl = sector_num >> 40; + } + } else { + cyl = sector_num / (s->heads * s->sectors); + r = sector_num % (s->heads * s->sectors); + s->hcyl = cyl >> 8; + s->lcyl = cyl; + s->select = (s->select & 0xf0) | ((r / s->sectors) & 0x0f); + s->sector = (r % s->sectors) + 1; + } +} + +static void ide_rw_error(IDEState *s) { + ide_abort_command(s); + ide_set_irq(s->bus); +} + +static bool ide_sect_range_ok(IDEState *s, + uint64_t sector, uint64_t nb_sectors) +{ + uint64_t total_sectors; + + blk_get_geometry(s->blk, &total_sectors); + if (sector > total_sectors || nb_sectors > total_sectors - sector) { + return false; + } + return true; +} + +static void ide_buffered_readv_cb(void *opaque, int ret) +{ + IDEBufferedRequest *req = opaque; + if (!req->orphaned) { + if (!ret) { + qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base, + req->original_qiov->size); + } + req->original_cb(req->original_opaque, ret); + } + QLIST_REMOVE(req, list); + qemu_vfree(req->iov.iov_base); + g_free(req); +} + +#define MAX_BUFFERED_REQS 16 + +BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque) +{ + BlockAIOCB *aioreq; + IDEBufferedRequest *req; + int c = 0; + + QLIST_FOREACH(req, &s->buffered_requests, list) { + c++; + } + if (c > MAX_BUFFERED_REQS) { + return blk_abort_aio_request(s->blk, cb, opaque, -EIO); + } + + req = g_new0(IDEBufferedRequest, 1); + req->original_qiov = iov; + req->original_cb = cb; + req->original_opaque = opaque; + req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size); + req->iov.iov_len = iov->size; + qemu_iovec_init_external(&req->qiov, &req->iov, 1); + + aioreq = blk_aio_readv(s->blk, sector_num, &req->qiov, nb_sectors, + ide_buffered_readv_cb, req); + + QLIST_INSERT_HEAD(&s->buffered_requests, req, list); + return aioreq; +} + +static void ide_sector_read(IDEState *s); + +static void ide_sector_read_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + int n; + + s->pio_aiocb = NULL; + s->status &= ~BUSY_STAT; + + if (ret == -ECANCELED) { + return; + } + if (ret != 0) { + if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO | + IDE_RETRY_READ)) { + return; + } + } + + block_acct_done(blk_get_stats(s->blk), &s->acct); + + n = s->nsector; + if (n > s->req_nb_sectors) { + n = s->req_nb_sectors; + } + + ide_set_sector(s, ide_get_sector(s) + n); + s->nsector -= n; + /* Allow the guest to read the io_buffer */ + ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read); + ide_set_irq(s->bus); +} + +static void ide_sector_read(IDEState *s) +{ + int64_t sector_num; + int n; + + s->status = READY_STAT | SEEK_STAT; + s->error = 0; /* not needed by IDE spec, but needed by Windows */ + sector_num = ide_get_sector(s); + n = s->nsector; + + if (n == 0) { + ide_transfer_stop(s); + return; + } + + s->status |= BUSY_STAT; + + if (n > s->req_nb_sectors) { + n = s->req_nb_sectors; + } + +#if defined(DEBUG_IDE) + printf("sector=%" PRId64 "\n", sector_num); +#endif + + if (!ide_sect_range_ok(s, sector_num, n)) { + ide_rw_error(s); + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ); + return; + } + + s->iov.iov_base = s->io_buffer; + s->iov.iov_len = n * BDRV_SECTOR_SIZE; + qemu_iovec_init_external(&s->qiov, &s->iov, 1); + + block_acct_start(blk_get_stats(s->blk), &s->acct, + n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); + s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n, + ide_sector_read_cb, s); +} + +void dma_buf_commit(IDEState *s, uint32_t tx_bytes) +{ + if (s->bus->dma->ops->commit_buf) { + s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes); + } + s->io_buffer_offset += tx_bytes; + qemu_sglist_destroy(&s->sg); +} + +void ide_set_inactive(IDEState *s, bool more) +{ + s->bus->dma->aiocb = NULL; + s->bus->retry_unit = -1; + s->bus->retry_sector_num = 0; + s->bus->retry_nsector = 0; + if (s->bus->dma->ops->set_inactive) { + s->bus->dma->ops->set_inactive(s->bus->dma, more); + } + ide_cmd_done(s); +} + +void ide_dma_error(IDEState *s) +{ + dma_buf_commit(s, 0); + ide_abort_command(s); + ide_set_inactive(s, false); + ide_set_irq(s->bus); +} + +static int ide_handle_rw_error(IDEState *s, int error, int op) +{ + bool is_read = (op & IDE_RETRY_READ) != 0; + BlockErrorAction action = blk_get_error_action(s->blk, is_read, error); + + if (action == BLOCK_ERROR_ACTION_STOP) { + assert(s->bus->retry_unit == s->unit); + s->bus->error_status = op; + } else if (action == BLOCK_ERROR_ACTION_REPORT) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + if (op & IDE_RETRY_DMA) { + ide_dma_error(s); + } else { + ide_rw_error(s); + } + } + blk_error_action(s->blk, action, is_read, error); + return action != BLOCK_ERROR_ACTION_IGNORE; +} + +static void ide_dma_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + int n; + int64_t sector_num; + bool stay_active = false; + + if (ret == -ECANCELED) { + return; + } + if (ret < 0) { + int op = IDE_RETRY_DMA; + + if (s->dma_cmd == IDE_DMA_READ) + op |= IDE_RETRY_READ; + else if (s->dma_cmd == IDE_DMA_TRIM) + op |= IDE_RETRY_TRIM; + + if (ide_handle_rw_error(s, -ret, op)) { + return; + } + } + + n = s->io_buffer_size >> 9; + if (n > s->nsector) { + /* The PRDs were longer than needed for this request. Shorten them so + * we don't get a negative remainder. The Active bit must remain set + * after the request completes. */ + n = s->nsector; + stay_active = true; + } + + sector_num = ide_get_sector(s); + if (n > 0) { + assert(n * 512 == s->sg.size); + dma_buf_commit(s, s->sg.size); + sector_num += n; + ide_set_sector(s, sector_num); + s->nsector -= n; + } + + /* end of transfer ? */ + if (s->nsector == 0) { + s->status = READY_STAT | SEEK_STAT; + ide_set_irq(s->bus); + goto eot; + } + + /* launch next transfer */ + n = s->nsector; + s->io_buffer_index = 0; + s->io_buffer_size = n * 512; + if (s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size) < 512) { + /* The PRDs were too short. Reset the Active bit, but don't raise an + * interrupt. */ + s->status = READY_STAT | SEEK_STAT; + dma_buf_commit(s, 0); + goto eot; + } + +#ifdef DEBUG_AIO + printf("ide_dma_cb: sector_num=%" PRId64 " n=%d, cmd_cmd=%d\n", + sector_num, n, s->dma_cmd); +#endif + + if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) && + !ide_sect_range_ok(s, sector_num, n)) { + ide_dma_error(s); + block_acct_invalid(blk_get_stats(s->blk), s->acct.type); + return; + } + + switch (s->dma_cmd) { + case IDE_DMA_READ: + s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, sector_num, + ide_dma_cb, s); + break; + case IDE_DMA_WRITE: + s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, sector_num, + ide_dma_cb, s); + break; + case IDE_DMA_TRIM: + s->bus->dma->aiocb = dma_blk_io(s->blk, &s->sg, sector_num, + ide_issue_trim, ide_dma_cb, s, + DMA_DIRECTION_TO_DEVICE); + break; + } + return; + +eot: + if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } + ide_set_inactive(s, stay_active); +} + +static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd) +{ + s->status = READY_STAT | SEEK_STAT | DRQ_STAT | BUSY_STAT; + s->io_buffer_size = 0; + s->dma_cmd = dma_cmd; + + switch (dma_cmd) { + case IDE_DMA_READ: + block_acct_start(blk_get_stats(s->blk), &s->acct, + s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ); + break; + case IDE_DMA_WRITE: + block_acct_start(blk_get_stats(s->blk), &s->acct, + s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE); + break; + default: + break; + } + + ide_start_dma(s, ide_dma_cb); +} + +void ide_start_dma(IDEState *s, BlockCompletionFunc *cb) +{ + s->io_buffer_index = 0; + s->bus->retry_unit = s->unit; + s->bus->retry_sector_num = ide_get_sector(s); + s->bus->retry_nsector = s->nsector; + if (s->bus->dma->ops->start_dma) { + s->bus->dma->ops->start_dma(s->bus->dma, s, cb); + } +} + +static void ide_sector_write(IDEState *s); + +static void ide_sector_write_timer_cb(void *opaque) +{ + IDEState *s = opaque; + ide_set_irq(s->bus); +} + +static void ide_sector_write_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + int n; + + if (ret == -ECANCELED) { + return; + } + + s->pio_aiocb = NULL; + s->status &= ~BUSY_STAT; + + if (ret != 0) { + if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) { + return; + } + } + + block_acct_done(blk_get_stats(s->blk), &s->acct); + + n = s->nsector; + if (n > s->req_nb_sectors) { + n = s->req_nb_sectors; + } + s->nsector -= n; + + ide_set_sector(s, ide_get_sector(s) + n); + if (s->nsector == 0) { + /* no more sectors to write */ + ide_transfer_stop(s); + } else { + int n1 = s->nsector; + if (n1 > s->req_nb_sectors) { + n1 = s->req_nb_sectors; + } + ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE, + ide_sector_write); + } + + if (win2k_install_hack && ((++s->irq_count % 16) == 0)) { + /* It seems there is a bug in the Windows 2000 installer HDD + IDE driver which fills the disk with empty logs when the + IDE write IRQ comes too early. This hack tries to correct + that at the expense of slower write performances. Use this + option _only_ to install Windows 2000. You must disable it + for normal use. */ + timer_mod(s->sector_write_timer, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + (get_ticks_per_sec() / 1000)); + } else { + ide_set_irq(s->bus); + } +} + +static void ide_sector_write(IDEState *s) +{ + int64_t sector_num; + int n; + + s->status = READY_STAT | SEEK_STAT | BUSY_STAT; + sector_num = ide_get_sector(s); +#if defined(DEBUG_IDE) + printf("sector=%" PRId64 "\n", sector_num); +#endif + n = s->nsector; + if (n > s->req_nb_sectors) { + n = s->req_nb_sectors; + } + + if (!ide_sect_range_ok(s, sector_num, n)) { + ide_rw_error(s); + block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE); + return; + } + + s->iov.iov_base = s->io_buffer; + s->iov.iov_len = n * BDRV_SECTOR_SIZE; + qemu_iovec_init_external(&s->qiov, &s->iov, 1); + + block_acct_start(blk_get_stats(s->blk), &s->acct, + n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE); + s->pio_aiocb = blk_aio_writev(s->blk, sector_num, &s->qiov, n, + ide_sector_write_cb, s); +} + +static void ide_flush_cb(void *opaque, int ret) +{ + IDEState *s = opaque; + + s->pio_aiocb = NULL; + + if (ret == -ECANCELED) { + return; + } + if (ret < 0) { + /* XXX: What sector number to set here? */ + if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) { + return; + } + } + + if (s->blk) { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } + s->status = READY_STAT | SEEK_STAT; + ide_cmd_done(s); + ide_set_irq(s->bus); +} + +static void ide_flush_cache(IDEState *s) +{ + if (s->blk == NULL) { + ide_flush_cb(s, 0); + return; + } + + s->status |= BUSY_STAT; + block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH); + s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s); +} + +static void ide_cfata_metadata_inquiry(IDEState *s) +{ + uint16_t *p; + uint32_t spd; + + p = (uint16_t *) s->io_buffer; + memset(p, 0, 0x200); + spd = ((s->mdata_size - 1) >> 9) + 1; + + put_le16(p + 0, 0x0001); /* Data format revision */ + put_le16(p + 1, 0x0000); /* Media property: silicon */ + put_le16(p + 2, s->media_changed); /* Media status */ + put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */ + put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */ + put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */ + put_le16(p + 6, spd >> 16); /* Sectors per device (high) */ +} + +static void ide_cfata_metadata_read(IDEState *s) +{ + uint16_t *p; + + if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) { + s->status = ERR_STAT; + s->error = ABRT_ERR; + return; + } + + p = (uint16_t *) s->io_buffer; + memset(p, 0, 0x200); + + put_le16(p + 0, s->media_changed); /* Media status */ + memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9), + MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9), + s->nsector << 9), 0x200 - 2)); +} + +static void ide_cfata_metadata_write(IDEState *s) +{ + if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) { + s->status = ERR_STAT; + s->error = ABRT_ERR; + return; + } + + s->media_changed = 0; + + memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9), + s->io_buffer + 2, + MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9), + s->nsector << 9), 0x200 - 2)); +} + +/* called when the inserted state of the media has changed */ +static void ide_cd_change_cb(void *opaque, bool load) +{ + IDEState *s = opaque; + uint64_t nb_sectors; + + s->tray_open = !load; + blk_get_geometry(s->blk, &nb_sectors); + s->nb_sectors = nb_sectors; + + /* + * First indicate to the guest that a CD has been removed. That's + * done on the next command the guest sends us. + * + * Then we set UNIT_ATTENTION, by which the guest will + * detect a new CD in the drive. See ide_atapi_cmd() for details. + */ + s->cdrom_changed = 1; + s->events.new_media = true; + s->events.eject_request = false; + ide_set_irq(s->bus); +} + +static void ide_cd_eject_request_cb(void *opaque, bool force) +{ + IDEState *s = opaque; + + s->events.eject_request = true; + if (force) { + s->tray_locked = false; + } + ide_set_irq(s->bus); +} + +static void ide_cmd_lba48_transform(IDEState *s, int lba48) +{ + s->lba48 = lba48; + + /* handle the 'magic' 0 nsector count conversion here. to avoid + * fiddling with the rest of the read logic, we just store the + * full sector count in ->nsector and ignore ->hob_nsector from now + */ + if (!s->lba48) { + if (!s->nsector) + s->nsector = 256; + } else { + if (!s->nsector && !s->hob_nsector) + s->nsector = 65536; + else { + int lo = s->nsector; + int hi = s->hob_nsector; + + s->nsector = (hi << 8) | lo; + } + } +} + +static void ide_clear_hob(IDEBus *bus) +{ + /* any write clears HOB high bit of device control register */ + bus->ifs[0].select &= ~(1 << 7); + bus->ifs[1].select &= ~(1 << 7); +} + +void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val) +{ + IDEBus *bus = opaque; + +#ifdef DEBUG_IDE + printf("IDE: write addr=0x%x val=0x%02x\n", addr, val); +#endif + + addr &= 7; + + /* ignore writes to command block while busy with previous command */ + if (addr != 7 && (idebus_active_if(bus)->status & (BUSY_STAT|DRQ_STAT))) + return; + + switch(addr) { + case 0: + break; + case 1: + ide_clear_hob(bus); + /* NOTE: data is written to the two drives */ + bus->ifs[0].hob_feature = bus->ifs[0].feature; + bus->ifs[1].hob_feature = bus->ifs[1].feature; + bus->ifs[0].feature = val; + bus->ifs[1].feature = val; + break; + case 2: + ide_clear_hob(bus); + bus->ifs[0].hob_nsector = bus->ifs[0].nsector; + bus->ifs[1].hob_nsector = bus->ifs[1].nsector; + bus->ifs[0].nsector = val; + bus->ifs[1].nsector = val; + break; + case 3: + ide_clear_hob(bus); + bus->ifs[0].hob_sector = bus->ifs[0].sector; + bus->ifs[1].hob_sector = bus->ifs[1].sector; + bus->ifs[0].sector = val; + bus->ifs[1].sector = val; + break; + case 4: + ide_clear_hob(bus); + bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl; + bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl; + bus->ifs[0].lcyl = val; + bus->ifs[1].lcyl = val; + break; + case 5: + ide_clear_hob(bus); + bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl; + bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl; + bus->ifs[0].hcyl = val; + bus->ifs[1].hcyl = val; + break; + case 6: + /* FIXME: HOB readback uses bit 7 */ + bus->ifs[0].select = (val & ~0x10) | 0xa0; + bus->ifs[1].select = (val | 0x10) | 0xa0; + /* select drive */ + bus->unit = (val >> 4) & 1; + break; + default: + case 7: + /* command */ + ide_exec_cmd(bus, val); + break; + } +} + +static bool cmd_nop(IDEState *s, uint8_t cmd) +{ + return true; +} + +static bool cmd_data_set_management(IDEState *s, uint8_t cmd) +{ + switch (s->feature) { + case DSM_TRIM: + if (s->blk) { + ide_sector_start_dma(s, IDE_DMA_TRIM); + return false; + } + break; + } + + ide_abort_command(s); + return true; +} + +static bool cmd_identify(IDEState *s, uint8_t cmd) +{ + if (s->blk && s->drive_kind != IDE_CD) { + if (s->drive_kind != IDE_CFATA) { + ide_identify(s); + } else { + ide_cfata_identify(s); + } + s->status = READY_STAT | SEEK_STAT; + ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop); + ide_set_irq(s->bus); + return false; + } else { + if (s->drive_kind == IDE_CD) { + ide_set_signature(s); + } + ide_abort_command(s); + } + + return true; +} + +static bool cmd_verify(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_VERIFY_EXT); + + /* do sector number check ? */ + ide_cmd_lba48_transform(s, lba48); + + return true; +} + +static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd) +{ + if (s->drive_kind == IDE_CFATA && s->nsector == 0) { + /* Disable Read and Write Multiple */ + s->mult_sectors = 0; + } else if ((s->nsector & 0xff) != 0 && + ((s->nsector & 0xff) > MAX_MULT_SECTORS || + (s->nsector & (s->nsector - 1)) != 0)) { + ide_abort_command(s); + } else { + s->mult_sectors = s->nsector & 0xff; + } + + return true; +} + +static bool cmd_read_multiple(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_MULTREAD_EXT); + + if (!s->blk || !s->mult_sectors) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + s->req_nb_sectors = s->mult_sectors; + ide_sector_read(s); + return false; +} + +static bool cmd_write_multiple(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_MULTWRITE_EXT); + int n; + + if (!s->blk || !s->mult_sectors) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + + s->req_nb_sectors = s->mult_sectors; + n = MIN(s->nsector, s->req_nb_sectors); + + s->status = SEEK_STAT | READY_STAT; + ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write); + + s->media_changed = 1; + + return false; +} + +static bool cmd_read_pio(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_READ_EXT); + + if (s->drive_kind == IDE_CD) { + ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */ + ide_abort_command(s); + return true; + } + + if (!s->blk) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + s->req_nb_sectors = 1; + ide_sector_read(s); + + return false; +} + +static bool cmd_write_pio(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_WRITE_EXT); + + if (!s->blk) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + + s->req_nb_sectors = 1; + s->status = SEEK_STAT | READY_STAT; + ide_transfer_start(s, s->io_buffer, 512, ide_sector_write); + + s->media_changed = 1; + + return false; +} + +static bool cmd_read_dma(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_READDMA_EXT); + + if (!s->blk) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + ide_sector_start_dma(s, IDE_DMA_READ); + + return false; +} + +static bool cmd_write_dma(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_WRITEDMA_EXT); + + if (!s->blk) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + ide_sector_start_dma(s, IDE_DMA_WRITE); + + s->media_changed = 1; + + return false; +} + +static bool cmd_flush_cache(IDEState *s, uint8_t cmd) +{ + ide_flush_cache(s); + return false; +} + +static bool cmd_seek(IDEState *s, uint8_t cmd) +{ + /* XXX: Check that seek is within bounds */ + return true; +} + +static bool cmd_read_native_max(IDEState *s, uint8_t cmd) +{ + bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT); + + /* Refuse if no sectors are addressable (e.g. medium not inserted) */ + if (s->nb_sectors == 0) { + ide_abort_command(s); + return true; + } + + ide_cmd_lba48_transform(s, lba48); + ide_set_sector(s, s->nb_sectors - 1); + + return true; +} + +static bool cmd_check_power_mode(IDEState *s, uint8_t cmd) +{ + s->nsector = 0xff; /* device active or idle */ + return true; +} + +static bool cmd_set_features(IDEState *s, uint8_t cmd) +{ + uint16_t *identify_data; + + if (!s->blk) { + ide_abort_command(s); + return true; + } + + /* XXX: valid for CDROM ? */ + switch (s->feature) { + case 0x02: /* write cache enable */ + blk_set_enable_write_cache(s->blk, true); + identify_data = (uint16_t *)s->identify_data; + put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1); + return true; + case 0x82: /* write cache disable */ + blk_set_enable_write_cache(s->blk, false); + identify_data = (uint16_t *)s->identify_data; + put_le16(identify_data + 85, (1 << 14) | 1); + ide_flush_cache(s); + return false; + case 0xcc: /* reverting to power-on defaults enable */ + case 0x66: /* reverting to power-on defaults disable */ + case 0xaa: /* read look-ahead enable */ + case 0x55: /* read look-ahead disable */ + case 0x05: /* set advanced power management mode */ + case 0x85: /* disable advanced power management mode */ + case 0x69: /* NOP */ + case 0x67: /* NOP */ + case 0x96: /* NOP */ + case 0x9a: /* NOP */ + case 0x42: /* enable Automatic Acoustic Mode */ + case 0xc2: /* disable Automatic Acoustic Mode */ + return true; + case 0x03: /* set transfer mode */ + { + uint8_t val = s->nsector & 0x07; + identify_data = (uint16_t *)s->identify_data; + + switch (s->nsector >> 3) { + case 0x00: /* pio default */ + case 0x01: /* pio mode */ + put_le16(identify_data + 62, 0x07); + put_le16(identify_data + 63, 0x07); + put_le16(identify_data + 88, 0x3f); + break; + case 0x02: /* sigle word dma mode*/ + put_le16(identify_data + 62, 0x07 | (1 << (val + 8))); + put_le16(identify_data + 63, 0x07); + put_le16(identify_data + 88, 0x3f); + break; + case 0x04: /* mdma mode */ + put_le16(identify_data + 62, 0x07); + put_le16(identify_data + 63, 0x07 | (1 << (val + 8))); + put_le16(identify_data + 88, 0x3f); + break; + case 0x08: /* udma mode */ + put_le16(identify_data + 62, 0x07); + put_le16(identify_data + 63, 0x07); + put_le16(identify_data + 88, 0x3f | (1 << (val + 8))); + break; + default: + goto abort_cmd; + } + return true; + } + } + +abort_cmd: + ide_abort_command(s); + return true; +} + + +/*** ATAPI commands ***/ + +static bool cmd_identify_packet(IDEState *s, uint8_t cmd) +{ + ide_atapi_identify(s); + s->status = READY_STAT | SEEK_STAT; + ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop); + ide_set_irq(s->bus); + return false; +} + +static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd) +{ + ide_set_signature(s); + + if (s->drive_kind == IDE_CD) { + s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet + * devices to return a clear status register + * with READY_STAT *not* set. */ + s->error = 0x01; + } else { + s->status = READY_STAT | SEEK_STAT; + /* The bits of the error register are not as usual for this command! + * They are part of the regular output (this is why ERR_STAT isn't set) + * Device 0 passed, Device 1 passed or not present. */ + s->error = 0x01; + ide_set_irq(s->bus); + } + + return false; +} + +static bool cmd_device_reset(IDEState *s, uint8_t cmd) +{ + ide_set_signature(s); + s->status = 0x00; /* NOTE: READY is _not_ set */ + s->error = 0x01; + + return false; +} + +static bool cmd_packet(IDEState *s, uint8_t cmd) +{ + /* overlapping commands not supported */ + if (s->feature & 0x02) { + ide_abort_command(s); + return true; + } + + s->status = READY_STAT | SEEK_STAT; + s->atapi_dma = s->feature & 1; + s->nsector = 1; + ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE, + ide_atapi_cmd); + return false; +} + + +/*** CF-ATA commands ***/ + +static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd) +{ + s->error = 0x09; /* miscellaneous error */ + s->status = READY_STAT | SEEK_STAT; + ide_set_irq(s->bus); + + return false; +} + +static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd) +{ + /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is + * required for Windows 8 to work with AHCI */ + + if (cmd == CFA_WEAR_LEVEL) { + s->nsector = 0; + } + + if (cmd == CFA_ERASE_SECTORS) { + s->media_changed = 1; + } + + return true; +} + +static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd) +{ + s->status = READY_STAT | SEEK_STAT; + + memset(s->io_buffer, 0, 0x200); + s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */ + s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */ + s->io_buffer[0x02] = s->select; /* Head */ + s->io_buffer[0x03] = s->sector; /* Sector */ + s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */ + s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */ + s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */ + s->io_buffer[0x13] = 0x00; /* Erase flag */ + s->io_buffer[0x18] = 0x00; /* Hot count */ + s->io_buffer[0x19] = 0x00; /* Hot count */ + s->io_buffer[0x1a] = 0x01; /* Hot count */ + + ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); + ide_set_irq(s->bus); + + return false; +} + +static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd) +{ + switch (s->feature) { + case 0x02: /* Inquiry Metadata Storage */ + ide_cfata_metadata_inquiry(s); + break; + case 0x03: /* Read Metadata Storage */ + ide_cfata_metadata_read(s); + break; + case 0x04: /* Write Metadata Storage */ + ide_cfata_metadata_write(s); + break; + default: + ide_abort_command(s); + return true; + } + + ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); + s->status = 0x00; /* NOTE: READY is _not_ set */ + ide_set_irq(s->bus); + + return false; +} + +static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd) +{ + switch (s->feature) { + case 0x01: /* sense temperature in device */ + s->nsector = 0x50; /* +20 C */ + break; + default: + ide_abort_command(s); + return true; + } + + return true; +} + + +/*** SMART commands ***/ + +static bool cmd_smart(IDEState *s, uint8_t cmd) +{ + int n; + + if (s->hcyl != 0xc2 || s->lcyl != 0x4f) { + goto abort_cmd; + } + + if (!s->smart_enabled && s->feature != SMART_ENABLE) { + goto abort_cmd; + } + + switch (s->feature) { + case SMART_DISABLE: + s->smart_enabled = 0; + return true; + + case SMART_ENABLE: + s->smart_enabled = 1; + return true; + + case SMART_ATTR_AUTOSAVE: + switch (s->sector) { + case 0x00: + s->smart_autosave = 0; + break; + case 0xf1: + s->smart_autosave = 1; + break; + default: + goto abort_cmd; + } + return true; + + case SMART_STATUS: + if (!s->smart_errors) { + s->hcyl = 0xc2; + s->lcyl = 0x4f; + } else { + s->hcyl = 0x2c; + s->lcyl = 0xf4; + } + return true; + + case SMART_READ_THRESH: + memset(s->io_buffer, 0, 0x200); + s->io_buffer[0] = 0x01; /* smart struct version */ + + for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) { + s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0]; + s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11]; + } + + /* checksum */ + for (n = 0; n < 511; n++) { + s->io_buffer[511] += s->io_buffer[n]; + } + s->io_buffer[511] = 0x100 - s->io_buffer[511]; + + s->status = READY_STAT | SEEK_STAT; + ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); + ide_set_irq(s->bus); + return false; + + case SMART_READ_DATA: + memset(s->io_buffer, 0, 0x200); + s->io_buffer[0] = 0x01; /* smart struct version */ + + for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) { + int i; + for (i = 0; i < 11; i++) { + s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i]; + } + } + + s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00); + if (s->smart_selftest_count == 0) { + s->io_buffer[363] = 0; + } else { + s->io_buffer[363] = + s->smart_selftest_data[3 + + (s->smart_selftest_count - 1) * + 24]; + } + s->io_buffer[364] = 0x20; + s->io_buffer[365] = 0x01; + /* offline data collection capacity: execute + self-test*/ + s->io_buffer[367] = (1 << 4 | 1 << 3 | 1); + s->io_buffer[368] = 0x03; /* smart capability (1) */ + s->io_buffer[369] = 0x00; /* smart capability (2) */ + s->io_buffer[370] = 0x01; /* error logging supported */ + s->io_buffer[372] = 0x02; /* minutes for poll short test */ + s->io_buffer[373] = 0x36; /* minutes for poll ext test */ + s->io_buffer[374] = 0x01; /* minutes for poll conveyance */ + + for (n = 0; n < 511; n++) { + s->io_buffer[511] += s->io_buffer[n]; + } + s->io_buffer[511] = 0x100 - s->io_buffer[511]; + + s->status = READY_STAT | SEEK_STAT; + ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); + ide_set_irq(s->bus); + return false; + + case SMART_READ_LOG: + switch (s->sector) { + case 0x01: /* summary smart error log */ + memset(s->io_buffer, 0, 0x200); + s->io_buffer[0] = 0x01; + s->io_buffer[1] = 0x00; /* no error entries */ + s->io_buffer[452] = s->smart_errors & 0xff; + s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8; + + for (n = 0; n < 511; n++) { + s->io_buffer[511] += s->io_buffer[n]; + } + s->io_buffer[511] = 0x100 - s->io_buffer[511]; + break; + case 0x06: /* smart self test log */ + memset(s->io_buffer, 0, 0x200); + s->io_buffer[0] = 0x01; + if (s->smart_selftest_count == 0) { + s->io_buffer[508] = 0; + } else { + s->io_buffer[508] = s->smart_selftest_count; + for (n = 2; n < 506; n++) { + s->io_buffer[n] = s->smart_selftest_data[n]; + } + } + + for (n = 0; n < 511; n++) { + s->io_buffer[511] += s->io_buffer[n]; + } + s->io_buffer[511] = 0x100 - s->io_buffer[511]; + break; + default: + goto abort_cmd; + } + s->status = READY_STAT | SEEK_STAT; + ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop); + ide_set_irq(s->bus); + return false; + + case SMART_EXECUTE_OFFLINE: + switch (s->sector) { + case 0: /* off-line routine */ + case 1: /* short self test */ + case 2: /* extended self test */ + s->smart_selftest_count++; + if (s->smart_selftest_count > 21) { + s->smart_selftest_count = 1; + } + n = 2 + (s->smart_selftest_count - 1) * 24; + s->smart_selftest_data[n] = s->sector; + s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */ + s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */ + s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */ + break; + default: + goto abort_cmd; + } + return true; + } + +abort_cmd: + ide_abort_command(s); + return true; +} + +#define HD_OK (1u << IDE_HD) +#define CD_OK (1u << IDE_CD) +#define CFA_OK (1u << IDE_CFATA) +#define HD_CFA_OK (HD_OK | CFA_OK) +#define ALL_OK (HD_OK | CD_OK | CFA_OK) + +/* Set the Disk Seek Completed status bit during completion */ +#define SET_DSC (1u << 8) + +/* See ACS-2 T13/2015-D Table B.2 Command codes */ +static const struct { + /* Returns true if the completion code should be run */ + bool (*handler)(IDEState *s, uint8_t cmd); + int flags; +} ide_cmd_table[0x100] = { + /* NOP not implemented, mandatory for CD */ + [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK }, + [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK }, + [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK }, + [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC}, + [WIN_READ] = { cmd_read_pio, ALL_OK }, + [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK }, + [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK }, + [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK }, + [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC }, + [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK }, + [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK }, + [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK }, + [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK }, + [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK }, + [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK }, + [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK }, + [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK }, + [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC }, + [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC }, + [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC }, + [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC }, + [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK }, + [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK }, + [WIN_SPECIFY] = { cmd_nop, HD_CFA_OK | SET_DSC }, + [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK }, + [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK }, + [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK }, + [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK }, + [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC }, + [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK }, + [WIN_PACKETCMD] = { cmd_packet, CD_OK }, + [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK }, + [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC }, + [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK }, + [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC }, + [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK }, + [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK }, + [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC }, + [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK }, + [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK }, + [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK }, + [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK }, + [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK }, + [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK }, + [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK }, + [WIN_STANDBY] = { cmd_nop, HD_CFA_OK }, + [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK }, + [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC }, + [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK }, + [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK }, + [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK }, + [WIN_IDENTIFY] = { cmd_identify, ALL_OK }, + [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC }, + [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC }, + [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC }, + [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC }, +}; + +static bool ide_cmd_permitted(IDEState *s, uint32_t cmd) +{ + return cmd < ARRAY_SIZE(ide_cmd_table) + && (ide_cmd_table[cmd].flags & (1u << s->drive_kind)); +} + +void ide_exec_cmd(IDEBus *bus, uint32_t val) +{ + IDEState *s; + bool complete; + +#if defined(DEBUG_IDE) + printf("ide: CMD=%02x\n", val); +#endif + s = idebus_active_if(bus); + /* ignore commands to non existent slave */ + if (s != bus->ifs && !s->blk) { + return; + } + + /* Only DEVICE RESET is allowed while BSY or/and DRQ are set */ + if ((s->status & (BUSY_STAT|DRQ_STAT)) && val != WIN_DEVICE_RESET) + return; + + if (!ide_cmd_permitted(s, val)) { + ide_abort_command(s); + ide_set_irq(s->bus); + return; + } + + s->status = READY_STAT | BUSY_STAT; + s->error = 0; + s->io_buffer_offset = 0; + + complete = ide_cmd_table[val].handler(s, val); + if (complete) { + s->status &= ~BUSY_STAT; + assert(!!s->error == !!(s->status & ERR_STAT)); + + if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) { + s->status |= SEEK_STAT; + } + + ide_cmd_done(s); + ide_set_irq(s->bus); + } +} + +uint32_t ide_ioport_read(void *opaque, uint32_t addr1) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + uint32_t addr; + int ret, hob; + + addr = addr1 & 7; + /* FIXME: HOB readback uses bit 7, but it's always set right now */ + //hob = s->select & (1 << 7); + hob = 0; + switch(addr) { + case 0: + ret = 0xff; + break; + case 1: + if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || + (s != bus->ifs && !s->blk)) { + ret = 0; + } else if (!hob) { + ret = s->error; + } else { + ret = s->hob_feature; + } + break; + case 2: + if (!bus->ifs[0].blk && !bus->ifs[1].blk) { + ret = 0; + } else if (!hob) { + ret = s->nsector & 0xff; + } else { + ret = s->hob_nsector; + } + break; + case 3: + if (!bus->ifs[0].blk && !bus->ifs[1].blk) { + ret = 0; + } else if (!hob) { + ret = s->sector; + } else { + ret = s->hob_sector; + } + break; + case 4: + if (!bus->ifs[0].blk && !bus->ifs[1].blk) { + ret = 0; + } else if (!hob) { + ret = s->lcyl; + } else { + ret = s->hob_lcyl; + } + break; + case 5: + if (!bus->ifs[0].blk && !bus->ifs[1].blk) { + ret = 0; + } else if (!hob) { + ret = s->hcyl; + } else { + ret = s->hob_hcyl; + } + break; + case 6: + if (!bus->ifs[0].blk && !bus->ifs[1].blk) { + ret = 0; + } else { + ret = s->select; + } + break; + default: + case 7: + if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || + (s != bus->ifs && !s->blk)) { + ret = 0; + } else { + ret = s->status; + } + qemu_irq_lower(bus->irq); + break; + } +#ifdef DEBUG_IDE + printf("ide: read addr=0x%x val=%02x\n", addr1, ret); +#endif + return ret; +} + +uint32_t ide_status_read(void *opaque, uint32_t addr) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + int ret; + + if ((!bus->ifs[0].blk && !bus->ifs[1].blk) || + (s != bus->ifs && !s->blk)) { + ret = 0; + } else { + ret = s->status; + } +#ifdef DEBUG_IDE + printf("ide: read status addr=0x%x val=%02x\n", addr, ret); +#endif + return ret; +} + +void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val) +{ + IDEBus *bus = opaque; + IDEState *s; + int i; + +#ifdef DEBUG_IDE + printf("ide: write control addr=0x%x val=%02x\n", addr, val); +#endif + /* common for both drives */ + if (!(bus->cmd & IDE_CMD_RESET) && + (val & IDE_CMD_RESET)) { + /* reset low to high */ + for(i = 0;i < 2; i++) { + s = &bus->ifs[i]; + s->status = BUSY_STAT | SEEK_STAT; + s->error = 0x01; + } + } else if ((bus->cmd & IDE_CMD_RESET) && + !(val & IDE_CMD_RESET)) { + /* high to low */ + for(i = 0;i < 2; i++) { + s = &bus->ifs[i]; + if (s->drive_kind == IDE_CD) + s->status = 0x00; /* NOTE: READY is _not_ set */ + else + s->status = READY_STAT | SEEK_STAT; + ide_set_signature(s); + } + } + + bus->cmd = val; +} + +/* + * Returns true if the running PIO transfer is a PIO out (i.e. data is + * transferred from the device to the guest), false if it's a PIO in + */ +static bool ide_is_pio_out(IDEState *s) +{ + if (s->end_transfer_func == ide_sector_write || + s->end_transfer_func == ide_atapi_cmd) { + return false; + } else if (s->end_transfer_func == ide_sector_read || + s->end_transfer_func == ide_transfer_stop || + s->end_transfer_func == ide_atapi_cmd_reply_end || + s->end_transfer_func == ide_dummy_transfer_stop) { + return true; + } + + abort(); +} + +void ide_data_writew(void *opaque, uint32_t addr, uint32_t val) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + uint8_t *p; + + /* PIO data access allowed only when DRQ bit is set. The result of a write + * during PIO out is indeterminate, just ignore it. */ + if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) { + return; + } + + p = s->data_ptr; + if (p + 2 > s->data_end) { + return; + } + + *(uint16_t *)p = le16_to_cpu(val); + p += 2; + s->data_ptr = p; + if (p >= s->data_end) { + s->status &= ~DRQ_STAT; + s->end_transfer_func(s); + } +} + +uint32_t ide_data_readw(void *opaque, uint32_t addr) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + uint8_t *p; + int ret; + + /* PIO data access allowed only when DRQ bit is set. The result of a read + * during PIO in is indeterminate, return 0 and don't move forward. */ + if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) { + return 0; + } + + p = s->data_ptr; + if (p + 2 > s->data_end) { + return 0; + } + + ret = cpu_to_le16(*(uint16_t *)p); + p += 2; + s->data_ptr = p; + if (p >= s->data_end) { + s->status &= ~DRQ_STAT; + s->end_transfer_func(s); + } + return ret; +} + +void ide_data_writel(void *opaque, uint32_t addr, uint32_t val) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + uint8_t *p; + + /* PIO data access allowed only when DRQ bit is set. The result of a write + * during PIO out is indeterminate, just ignore it. */ + if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) { + return; + } + + p = s->data_ptr; + if (p + 4 > s->data_end) { + return; + } + + *(uint32_t *)p = le32_to_cpu(val); + p += 4; + s->data_ptr = p; + if (p >= s->data_end) { + s->status &= ~DRQ_STAT; + s->end_transfer_func(s); + } +} + +uint32_t ide_data_readl(void *opaque, uint32_t addr) +{ + IDEBus *bus = opaque; + IDEState *s = idebus_active_if(bus); + uint8_t *p; + int ret; + + /* PIO data access allowed only when DRQ bit is set. The result of a read + * during PIO in is indeterminate, return 0 and don't move forward. */ + if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) { + return 0; + } + + p = s->data_ptr; + if (p + 4 > s->data_end) { + return 0; + } + + ret = cpu_to_le32(*(uint32_t *)p); + p += 4; + s->data_ptr = p; + if (p >= s->data_end) { + s->status &= ~DRQ_STAT; + s->end_transfer_func(s); + } + return ret; +} + +static void ide_dummy_transfer_stop(IDEState *s) +{ + s->data_ptr = s->io_buffer; + s->data_end = s->io_buffer; + s->io_buffer[0] = 0xff; + s->io_buffer[1] = 0xff; + s->io_buffer[2] = 0xff; + s->io_buffer[3] = 0xff; +} + +static void ide_reset(IDEState *s) +{ +#ifdef DEBUG_IDE + printf("ide: reset\n"); +#endif + + if (s->pio_aiocb) { + blk_aio_cancel(s->pio_aiocb); + s->pio_aiocb = NULL; + } + + if (s->drive_kind == IDE_CFATA) + s->mult_sectors = 0; + else + s->mult_sectors = MAX_MULT_SECTORS; + /* ide regs */ + s->feature = 0; + s->error = 0; + s->nsector = 0; + s->sector = 0; + s->lcyl = 0; + s->hcyl = 0; + + /* lba48 */ + s->hob_feature = 0; + s->hob_sector = 0; + s->hob_nsector = 0; + s->hob_lcyl = 0; + s->hob_hcyl = 0; + + s->select = 0xa0; + s->status = READY_STAT | SEEK_STAT; + + s->lba48 = 0; + + /* ATAPI specific */ + s->sense_key = 0; + s->asc = 0; + s->cdrom_changed = 0; + s->packet_transfer_size = 0; + s->elementary_transfer_size = 0; + s->io_buffer_index = 0; + s->cd_sector_size = 0; + s->atapi_dma = 0; + s->tray_locked = 0; + s->tray_open = 0; + /* ATA DMA state */ + s->io_buffer_size = 0; + s->req_nb_sectors = 0; + + ide_set_signature(s); + /* init the transfer handler so that 0xffff is returned on data + accesses */ + s->end_transfer_func = ide_dummy_transfer_stop; + ide_dummy_transfer_stop(s); + s->media_changed = 0; +} + +void ide_bus_reset(IDEBus *bus) +{ + bus->unit = 0; + bus->cmd = 0; + ide_reset(&bus->ifs[0]); + ide_reset(&bus->ifs[1]); + ide_clear_hob(bus); + + /* pending async DMA */ + if (bus->dma->aiocb) { +#ifdef DEBUG_AIO + printf("aio_cancel\n"); +#endif + blk_aio_cancel(bus->dma->aiocb); + bus->dma->aiocb = NULL; + } + + /* reset dma provider too */ + if (bus->dma->ops->reset) { + bus->dma->ops->reset(bus->dma); + } +} + +static bool ide_cd_is_tray_open(void *opaque) +{ + return ((IDEState *)opaque)->tray_open; +} + +static bool ide_cd_is_medium_locked(void *opaque) +{ + return ((IDEState *)opaque)->tray_locked; +} + +static void ide_resize_cb(void *opaque) +{ + IDEState *s = opaque; + uint64_t nb_sectors; + + if (!s->identify_set) { + return; + } + + blk_get_geometry(s->blk, &nb_sectors); + s->nb_sectors = nb_sectors; + + /* Update the identify data buffer. */ + if (s->drive_kind == IDE_CFATA) { + ide_cfata_identify_size(s); + } else { + /* IDE_CD uses a different set of callbacks entirely. */ + assert(s->drive_kind != IDE_CD); + ide_identify_size(s); + } +} + +static const BlockDevOps ide_cd_block_ops = { + .change_media_cb = ide_cd_change_cb, + .eject_request_cb = ide_cd_eject_request_cb, + .is_tray_open = ide_cd_is_tray_open, + .is_medium_locked = ide_cd_is_medium_locked, +}; + +static const BlockDevOps ide_hd_block_ops = { + .resize_cb = ide_resize_cb, +}; + +int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, + const char *version, const char *serial, const char *model, + uint64_t wwn, + uint32_t cylinders, uint32_t heads, uint32_t secs, + int chs_trans) +{ + uint64_t nb_sectors; + + s->blk = blk; + s->drive_kind = kind; + + blk_get_geometry(blk, &nb_sectors); + s->cylinders = cylinders; + s->heads = heads; + s->sectors = secs; + s->chs_trans = chs_trans; + s->nb_sectors = nb_sectors; + s->wwn = wwn; + /* The SMART values should be preserved across power cycles + but they aren't. */ + s->smart_enabled = 1; + s->smart_autosave = 1; + s->smart_errors = 0; + s->smart_selftest_count = 0; + if (kind == IDE_CD) { + blk_set_dev_ops(blk, &ide_cd_block_ops, s); + blk_set_guest_block_size(blk, 2048); + } else { + if (!blk_is_inserted(s->blk)) { + error_report("Device needs media, but drive is empty"); + return -1; + } + if (blk_is_read_only(blk)) { + error_report("Can't use a read-only drive"); + return -1; + } + blk_set_dev_ops(blk, &ide_hd_block_ops, s); + } + if (serial) { + pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial); + } else { + snprintf(s->drive_serial_str, sizeof(s->drive_serial_str), + "QM%05d", s->drive_serial); + } + if (model) { + pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model); + } else { + switch (kind) { + case IDE_CD: + strcpy(s->drive_model_str, "QEMU DVD-ROM"); + break; + case IDE_CFATA: + strcpy(s->drive_model_str, "QEMU MICRODRIVE"); + break; + default: + strcpy(s->drive_model_str, "QEMU HARDDISK"); + break; + } + } + + if (version) { + pstrcpy(s->version, sizeof(s->version), version); + } else { + pstrcpy(s->version, sizeof(s->version), qemu_hw_version()); + } + + ide_reset(s); + blk_iostatus_enable(blk); + return 0; +} + +static void ide_init1(IDEBus *bus, int unit) +{ + static int drive_serial = 1; + IDEState *s = &bus->ifs[unit]; + + s->bus = bus; + s->unit = unit; + s->drive_serial = drive_serial++; + /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */ + s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4; + s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len); + memset(s->io_buffer, 0, s->io_buffer_total_len); + + s->smart_selftest_data = blk_blockalign(s->blk, 512); + memset(s->smart_selftest_data, 0, 512); + + s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + ide_sector_write_timer_cb, s); +} + +static int ide_nop_int(IDEDMA *dma, int x) +{ + return 0; +} + +static void ide_nop(IDEDMA *dma) +{ +} + +static int32_t ide_nop_int32(IDEDMA *dma, int32_t l) +{ + return 0; +} + +static const IDEDMAOps ide_dma_nop_ops = { + .prepare_buf = ide_nop_int32, + .restart_dma = ide_nop, + .rw_buf = ide_nop_int, +}; + +static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd) +{ + s->unit = s->bus->retry_unit; + ide_set_sector(s, s->bus->retry_sector_num); + s->nsector = s->bus->retry_nsector; + s->bus->dma->ops->restart_dma(s->bus->dma); + s->io_buffer_size = 0; + s->dma_cmd = dma_cmd; + ide_start_dma(s, ide_dma_cb); +} + +static void ide_restart_bh(void *opaque) +{ + IDEBus *bus = opaque; + IDEState *s; + bool is_read; + int error_status; + + qemu_bh_delete(bus->bh); + bus->bh = NULL; + + error_status = bus->error_status; + if (bus->error_status == 0) { + return; + } + + s = idebus_active_if(bus); + is_read = (bus->error_status & IDE_RETRY_READ) != 0; + + /* The error status must be cleared before resubmitting the request: The + * request may fail again, and this case can only be distinguished if the + * called function can set a new error status. */ + bus->error_status = 0; + + /* The HBA has generically asked to be kicked on retry */ + if (error_status & IDE_RETRY_HBA) { + if (s->bus->dma->ops->restart) { + s->bus->dma->ops->restart(s->bus->dma); + } + } + + if (error_status & IDE_RETRY_DMA) { + if (error_status & IDE_RETRY_TRIM) { + ide_restart_dma(s, IDE_DMA_TRIM); + } else { + ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE); + } + } else if (error_status & IDE_RETRY_PIO) { + if (is_read) { + ide_sector_read(s); + } else { + ide_sector_write(s); + } + } else if (error_status & IDE_RETRY_FLUSH) { + ide_flush_cache(s); + } else { + /* + * We've not got any bits to tell us about ATAPI - but + * we do have the end_transfer_func that tells us what + * we're trying to do. + */ + if (s->end_transfer_func == ide_atapi_cmd) { + ide_atapi_dma_restart(s); + } + } +} + +static void ide_restart_cb(void *opaque, int running, RunState state) +{ + IDEBus *bus = opaque; + + if (!running) + return; + + if (!bus->bh) { + bus->bh = qemu_bh_new(ide_restart_bh, bus); + qemu_bh_schedule(bus->bh); + } +} + +void ide_register_restart_cb(IDEBus *bus) +{ + if (bus->dma->ops->restart_dma) { + qemu_add_vm_change_state_handler(ide_restart_cb, bus); + } +} + +static IDEDMA ide_dma_nop = { + .ops = &ide_dma_nop_ops, + .aiocb = NULL, +}; + +void ide_init2(IDEBus *bus, qemu_irq irq) +{ + int i; + + for(i = 0; i < 2; i++) { + ide_init1(bus, i); + ide_reset(&bus->ifs[i]); + } + bus->irq = irq; + bus->dma = &ide_dma_nop; +} + +static const MemoryRegionPortio ide_portio_list[] = { + { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write }, + { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew }, + { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel }, + PORTIO_END_OF_LIST(), +}; + +static const MemoryRegionPortio ide_portio2_list[] = { + { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write }, + PORTIO_END_OF_LIST(), +}; + +void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2) +{ + /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA + bridge has been setup properly to always register with ISA. */ + isa_register_portio_list(dev, iobase, ide_portio_list, bus, "ide"); + + if (iobase2) { + isa_register_portio_list(dev, iobase2, ide_portio2_list, bus, "ide"); + } +} + +static bool is_identify_set(void *opaque, int version_id) +{ + IDEState *s = opaque; + + return s->identify_set != 0; +} + +static EndTransferFunc* transfer_end_table[] = { + ide_sector_read, + ide_sector_write, + ide_transfer_stop, + ide_atapi_cmd_reply_end, + ide_atapi_cmd, + ide_dummy_transfer_stop, +}; + +static int transfer_end_table_idx(EndTransferFunc *fn) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++) + if (transfer_end_table[i] == fn) + return i; + + return -1; +} + +static int ide_drive_post_load(void *opaque, int version_id) +{ + IDEState *s = opaque; + + if (s->blk && s->identify_set) { + blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5))); + } + return 0; +} + +static int ide_drive_pio_post_load(void *opaque, int version_id) +{ + IDEState *s = opaque; + + if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) { + return -EINVAL; + } + s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx]; + s->data_ptr = s->io_buffer + s->cur_io_buffer_offset; + s->data_end = s->data_ptr + s->cur_io_buffer_len; + s->atapi_dma = s->feature & 1; /* as per cmd_packet */ + + return 0; +} + +static void ide_drive_pio_pre_save(void *opaque) +{ + IDEState *s = opaque; + int idx; + + s->cur_io_buffer_offset = s->data_ptr - s->io_buffer; + s->cur_io_buffer_len = s->data_end - s->data_ptr; + + idx = transfer_end_table_idx(s->end_transfer_func); + if (idx == -1) { + fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n", + __func__); + s->end_transfer_fn_idx = 2; + } else { + s->end_transfer_fn_idx = idx; + } +} + +static bool ide_drive_pio_state_needed(void *opaque) +{ + IDEState *s = opaque; + + return ((s->status & DRQ_STAT) != 0) + || (s->bus->error_status & IDE_RETRY_PIO); +} + +static bool ide_tray_state_needed(void *opaque) +{ + IDEState *s = opaque; + + return s->tray_open || s->tray_locked; +} + +static bool ide_atapi_gesn_needed(void *opaque) +{ + IDEState *s = opaque; + + return s->events.new_media || s->events.eject_request; +} + +static bool ide_error_needed(void *opaque) +{ + IDEBus *bus = opaque; + + return (bus->error_status != 0); +} + +/* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */ +static const VMStateDescription vmstate_ide_atapi_gesn_state = { + .name ="ide_drive/atapi/gesn_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = ide_atapi_gesn_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(events.new_media, IDEState), + VMSTATE_BOOL(events.eject_request, IDEState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ide_tray_state = { + .name = "ide_drive/tray_state", + .version_id = 1, + .minimum_version_id = 1, + .needed = ide_tray_state_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(tray_open, IDEState), + VMSTATE_BOOL(tray_locked, IDEState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_ide_drive_pio_state = { + .name = "ide_drive/pio_state", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = ide_drive_pio_pre_save, + .post_load = ide_drive_pio_post_load, + .needed = ide_drive_pio_state_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(req_nb_sectors, IDEState), + VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1, + vmstate_info_uint8, uint8_t), + VMSTATE_INT32(cur_io_buffer_offset, IDEState), + VMSTATE_INT32(cur_io_buffer_len, IDEState), + VMSTATE_UINT8(end_transfer_fn_idx, IDEState), + VMSTATE_INT32(elementary_transfer_size, IDEState), + VMSTATE_INT32(packet_transfer_size, IDEState), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_ide_drive = { + .name = "ide_drive", + .version_id = 3, + .minimum_version_id = 0, + .post_load = ide_drive_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT32(mult_sectors, IDEState), + VMSTATE_INT32(identify_set, IDEState), + VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set), + VMSTATE_UINT8(feature, IDEState), + VMSTATE_UINT8(error, IDEState), + VMSTATE_UINT32(nsector, IDEState), + VMSTATE_UINT8(sector, IDEState), + VMSTATE_UINT8(lcyl, IDEState), + VMSTATE_UINT8(hcyl, IDEState), + VMSTATE_UINT8(hob_feature, IDEState), + VMSTATE_UINT8(hob_sector, IDEState), + VMSTATE_UINT8(hob_nsector, IDEState), + VMSTATE_UINT8(hob_lcyl, IDEState), + VMSTATE_UINT8(hob_hcyl, IDEState), + VMSTATE_UINT8(select, IDEState), + VMSTATE_UINT8(status, IDEState), + VMSTATE_UINT8(lba48, IDEState), + VMSTATE_UINT8(sense_key, IDEState), + VMSTATE_UINT8(asc, IDEState), + VMSTATE_UINT8_V(cdrom_changed, IDEState, 3), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_ide_drive_pio_state, + &vmstate_ide_tray_state, + &vmstate_ide_atapi_gesn_state, + NULL + } +}; + +static const VMStateDescription vmstate_ide_error_status = { + .name ="ide_bus/error", + .version_id = 2, + .minimum_version_id = 1, + .needed = ide_error_needed, + .fields = (VMStateField[]) { + VMSTATE_INT32(error_status, IDEBus), + VMSTATE_INT64_V(retry_sector_num, IDEBus, 2), + VMSTATE_UINT32_V(retry_nsector, IDEBus, 2), + VMSTATE_UINT8_V(retry_unit, IDEBus, 2), + VMSTATE_END_OF_LIST() + } +}; + +const VMStateDescription vmstate_ide_bus = { + .name = "ide_bus", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT8(cmd, IDEBus), + VMSTATE_UINT8(unit, IDEBus), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_ide_error_status, + NULL + } +}; + +void ide_drive_get(DriveInfo **hd, int n) +{ + int i; + int highest_bus = drive_get_max_bus(IF_IDE) + 1; + int max_devs = drive_get_max_devs(IF_IDE); + int n_buses = max_devs ? (n / max_devs) : n; + + /* + * Note: The number of actual buses available is not known. + * We compute this based on the size of the DriveInfo* array, n. + * If it is less than max_devs * <num_real_buses>, + * We will stop looking for drives prematurely instead of overfilling + * the array. + */ + + if (highest_bus > n_buses) { + error_report("Too many IDE buses defined (%d > %d)", + highest_bus, n_buses); + exit(1); + } + + for (i = 0; i < n; i++) { + hd[i] = drive_get_by_index(IF_IDE, i); + } +} diff --git a/src/hw/ide/ich.c b/src/hw/ide/ich.c new file mode 100644 index 0000000..16925fa --- /dev/null +++ b/src/hw/ide/ich.c @@ -0,0 +1,190 @@ +/* + * QEMU ICH Emulation + * + * Copyright (c) 2010 Sebastian Herbszt <herbszt@gmx.de> + * Copyright (c) 2010 Alexander Graf <agraf@suse.de> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + * + * + * lspci dump of a ICH-9 real device + * + * 00:1f.2 SATA controller [0106]: Intel Corporation 82801IR/IO/IH (ICH9R/DO/DH) 6 port SATA AHCI Controller [8086:2922] (rev 02) (prog-if 01 [AHCI 1.0]) + * Subsystem: Intel Corporation 82801IR/IO/IH (ICH9R/DO/DH) 6 port SATA AHCI Controller [8086:2922] + * Control: I/O+ Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B- DisINTx+ + * Status: Cap+ 66MHz+ UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR- INTx- + * Latency: 0 + * Interrupt: pin B routed to IRQ 222 + * Region 0: I/O ports at d000 [size=8] + * Region 1: I/O ports at cc00 [size=4] + * Region 2: I/O ports at c880 [size=8] + * Region 3: I/O ports at c800 [size=4] + * Region 4: I/O ports at c480 [size=32] + * Region 5: Memory at febf9000 (32-bit, non-prefetchable) [size=2K] + * Capabilities: [80] Message Signalled Interrupts: Mask- 64bit- Count=1/16 Enable+ + * Address: fee0f00c Data: 41d9 + * Capabilities: [70] Power Management version 3 + * Flags: PMEClk- DSI- D1- D2- AuxCurrent=0mA PME(D0-,D1-,D2-,D3hot+,D3cold-) + * Status: D0 PME-Enable- DSel=0 DScale=0 PME- + * Capabilities: [a8] SATA HBA <?> + * Capabilities: [b0] Vendor Specific Information <?> + * Kernel driver in use: ahci + * Kernel modules: ahci + * 00: 86 80 22 29 07 04 b0 02 02 01 06 01 00 00 00 00 + * 10: 01 d0 00 00 01 cc 00 00 81 c8 00 00 01 c8 00 00 + * 20: 81 c4 00 00 00 90 bf fe 00 00 00 00 86 80 22 29 + * 30: 00 00 00 00 80 00 00 00 00 00 00 00 0f 02 00 00 + * 40: 00 80 00 80 00 00 00 00 00 00 00 00 00 00 00 00 + * 50: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * 60: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * 70: 01 a8 03 40 08 00 00 00 00 00 00 00 00 00 00 00 + * 80: 05 70 09 00 0c f0 e0 fe d9 41 00 00 00 00 00 00 + * 90: 40 00 0f 82 93 01 00 00 00 00 00 00 00 00 00 00 + * a0: ac 00 00 00 0a 00 12 00 12 b0 10 00 48 00 00 00 + * b0: 09 00 06 20 00 00 00 00 00 00 00 00 00 00 00 00 + * c0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * d0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * e0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * f0: 00 00 00 00 00 00 00 00 86 0f 02 00 00 00 00 00 + * + */ + +#include <hw/hw.h> +#include <hw/pci/msi.h> +#include <hw/i386/pc.h> +#include <hw/pci/pci.h> +#include <hw/isa/isa.h> +#include "sysemu/block-backend.h" +#include "sysemu/dma.h" + +#include <hw/ide/pci.h> +#include <hw/ide/ahci.h> + +#define ICH9_MSI_CAP_OFFSET 0x80 +#define ICH9_SATA_CAP_OFFSET 0xA8 + +#define ICH9_IDP_BAR 4 +#define ICH9_MEM_BAR 5 + +#define ICH9_IDP_INDEX 0x10 +#define ICH9_IDP_INDEX_LOG2 0x04 + +static const VMStateDescription vmstate_ich9_ahci = { + .name = "ich9_ahci", + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, AHCIPCIState), + VMSTATE_AHCI(ahci, AHCIPCIState), + VMSTATE_END_OF_LIST() + }, +}; + +static void pci_ich9_reset(DeviceState *dev) +{ + AHCIPCIState *d = ICH_AHCI(dev); + + ahci_reset(&d->ahci); +} + +static void pci_ich9_ahci_init(Object *obj) +{ + struct AHCIPCIState *d = ICH_AHCI(obj); + + ahci_init(&d->ahci, DEVICE(obj)); +} + +static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp) +{ + struct AHCIPCIState *d; + int sata_cap_offset; + uint8_t *sata_cap; + d = ICH_AHCI(dev); + + ahci_realize(&d->ahci, DEVICE(dev), pci_get_address_space(dev), 6); + + pci_config_set_prog_interface(dev->config, AHCI_PROGMODE_MAJOR_REV_1); + + dev->config[PCI_CACHE_LINE_SIZE] = 0x08; /* Cache line size */ + dev->config[PCI_LATENCY_TIMER] = 0x00; /* Latency timer */ + pci_config_set_interrupt_pin(dev->config, 1); + + /* XXX Software should program this register */ + dev->config[0x90] = 1 << 6; /* Address Map Register - AHCI mode */ + + d->ahci.irq = pci_allocate_irq(dev); + + pci_register_bar(dev, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO, + &d->ahci.idp); + pci_register_bar(dev, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY, + &d->ahci.mem); + + sata_cap_offset = pci_add_capability2(dev, PCI_CAP_ID_SATA, + ICH9_SATA_CAP_OFFSET, SATA_CAP_SIZE, + errp); + if (sata_cap_offset < 0) { + return; + } + + sata_cap = dev->config + sata_cap_offset; + pci_set_word(sata_cap + SATA_CAP_REV, 0x10); + pci_set_long(sata_cap + SATA_CAP_BAR, + (ICH9_IDP_BAR + 0x4) | (ICH9_IDP_INDEX_LOG2 << 4)); + d->ahci.idp_offset = ICH9_IDP_INDEX; + + /* Although the AHCI 1.3 specification states that the first capability + * should be PMCAP, the Intel ICH9 data sheet specifies that the ICH9 + * AHCI device puts the MSI capability first, pointing to 0x80. */ + msi_init(dev, ICH9_MSI_CAP_OFFSET, 1, true, false); +} + +static void pci_ich9_uninit(PCIDevice *dev) +{ + struct AHCIPCIState *d; + d = ICH_AHCI(dev); + + msi_uninit(dev); + ahci_uninit(&d->ahci); + qemu_free_irq(d->ahci.irq); +} + +static void ich_ahci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_ich9_ahci_realize; + k->exit = pci_ich9_uninit; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82801IR; + k->revision = 0x02; + k->class_id = PCI_CLASS_STORAGE_SATA; + dc->vmsd = &vmstate_ich9_ahci; + dc->reset = pci_ich9_reset; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo ich_ahci_info = { + .name = TYPE_ICH9_AHCI, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(AHCIPCIState), + .instance_init = pci_ich9_ahci_init, + .class_init = ich_ahci_class_init, +}; + +static void ich_ahci_register_types(void) +{ + type_register_static(&ich_ahci_info); +} + +type_init(ich_ahci_register_types) diff --git a/src/hw/ide/internal.h b/src/hw/ide/internal.h new file mode 100644 index 0000000..2d1e2d2 --- /dev/null +++ b/src/hw/ide/internal.h @@ -0,0 +1,599 @@ +#ifndef HW_IDE_INTERNAL_H +#define HW_IDE_INTERNAL_H + +/* + * QEMU IDE Emulation -- internal header file + * only files in hw/ide/ are supposed to include this file. + * non-internal declarations are in hw/ide.h + */ +#include <hw/ide.h> +#include <hw/isa/isa.h> +#include "sysemu/dma.h" +#include "sysemu/sysemu.h" +#include "hw/block/block.h" +#include "block/scsi.h" + +/* debug IDE devices */ +//#define DEBUG_IDE +//#define DEBUG_IDE_ATAPI +//#define DEBUG_AIO +#define USE_DMA_CDROM + +typedef struct IDEBus IDEBus; +typedef struct IDEDevice IDEDevice; +typedef struct IDEState IDEState; +typedef struct IDEDMA IDEDMA; +typedef struct IDEDMAOps IDEDMAOps; + +#define TYPE_IDE_BUS "IDE" +#define IDE_BUS(obj) OBJECT_CHECK(IDEBus, (obj), TYPE_IDE_BUS) + +/* Bits of HD_STATUS */ +#define ERR_STAT 0x01 +#define INDEX_STAT 0x02 +#define ECC_STAT 0x04 /* Corrected error */ +#define DRQ_STAT 0x08 +#define SEEK_STAT 0x10 +#define SRV_STAT 0x10 +#define WRERR_STAT 0x20 +#define READY_STAT 0x40 +#define BUSY_STAT 0x80 + +/* Bits for HD_ERROR */ +#define MARK_ERR 0x01 /* Bad address mark */ +#define TRK0_ERR 0x02 /* couldn't find track 0 */ +#define ABRT_ERR 0x04 /* Command aborted */ +#define MCR_ERR 0x08 /* media change request */ +#define ID_ERR 0x10 /* ID field not found */ +#define MC_ERR 0x20 /* media changed */ +#define ECC_ERR 0x40 /* Uncorrectable ECC error */ +#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ +#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ + +/* Bits of HD_NSECTOR */ +#define CD 0x01 +#define IO 0x02 +#define REL 0x04 +#define TAG_MASK 0xf8 + +#define IDE_CMD_RESET 0x04 +#define IDE_CMD_DISABLE_IRQ 0x02 + +/* ACS-2 T13/2015-D Table B.2 Command codes */ +#define WIN_NOP 0x00 +/* reserved 0x01..0x02 */ +#define CFA_REQ_EXT_ERROR_CODE 0x03 /* CFA Request Extended Error Code */ +/* reserved 0x04..0x05 */ +#define WIN_DSM 0x06 +/* reserved 0x07 */ +#define WIN_DEVICE_RESET 0x08 +/* reserved 0x09..0x0a */ +/* REQUEST SENSE DATA EXT 0x0B */ +/* reserved 0x0C..0x0F */ +#define WIN_RECAL 0x10 /* obsolete since ATA4 */ +/* obsolete since ATA3, retired in ATA4 0x11..0x1F */ +#define WIN_READ 0x20 /* 28-Bit */ +#define WIN_READ_ONCE 0x21 /* 28-Bit w/o retries, obsolete since ATA5 */ +/* obsolete since ATA4 0x22..0x23 */ +#define WIN_READ_EXT 0x24 /* 48-Bit */ +#define WIN_READDMA_EXT 0x25 /* 48-Bit */ +#define WIN_READDMA_QUEUED_EXT 0x26 /* 48-Bit, obsolete since ACS2 */ +#define WIN_READ_NATIVE_MAX_EXT 0x27 /* 48-Bit */ +/* reserved 0x28 */ +#define WIN_MULTREAD_EXT 0x29 /* 48-Bit */ +/* READ STREAM DMA EXT 0x2A */ +/* READ STREAM EXT 0x2B */ +/* reserved 0x2C..0x2E */ +/* READ LOG EXT 0x2F */ +#define WIN_WRITE 0x30 /* 28-Bit */ +#define WIN_WRITE_ONCE 0x31 /* 28-Bit w/o retries, obsolete since ATA5 */ +/* obsolete since ATA4 0x32..0x33 */ +#define WIN_WRITE_EXT 0x34 /* 48-Bit */ +#define WIN_WRITEDMA_EXT 0x35 /* 48-Bit */ +#define WIN_WRITEDMA_QUEUED_EXT 0x36 /* 48-Bit */ +#define WIN_SET_MAX_EXT 0x37 /* 48-Bit, obsolete since ACS2 */ +#define WIN_SET_MAX_EXT 0x37 /* 48-Bit */ +#define CFA_WRITE_SECT_WO_ERASE 0x38 /* CFA Write Sectors without erase */ +#define WIN_MULTWRITE_EXT 0x39 /* 48-Bit */ +/* WRITE STREAM DMA EXT 0x3A */ +/* WRITE STREAM EXT 0x3B */ +#define WIN_WRITE_VERIFY 0x3C /* 28-Bit, obsolete since ATA4 */ +/* WRITE DMA FUA EXT 0x3D */ +/* obsolete since ACS2 0x3E */ +/* WRITE LOG EXT 0x3F */ +#define WIN_VERIFY 0x40 /* 28-Bit - Read Verify Sectors */ +#define WIN_VERIFY_ONCE 0x41 /* 28-Bit - w/o retries, obsolete since ATA5 */ +#define WIN_VERIFY_EXT 0x42 /* 48-Bit */ +/* reserved 0x43..0x44 */ +/* WRITE UNCORRECTABLE EXT 0x45 */ +/* reserved 0x46 */ +/* READ LOG DMA EXT 0x47 */ +/* reserved 0x48..0x4F */ +/* obsolete since ATA4 0x50 */ +/* CONFIGURE STREAM 0x51 */ +/* reserved 0x52..0x56 */ +/* WRITE LOG DMA EXT 0x57 */ +/* reserved 0x58..0x5A */ +/* TRUSTED NON DATA 0x5B */ +/* TRUSTED RECEIVE 0x5C */ +/* TRUSTED RECEIVE DMA 0x5D */ +/* TRUSTED SEND 0x5E */ +/* TRUSTED SEND DMA 0x5F */ +/* READ FPDMA QUEUED 0x60 */ +/* WRITE FPDMA QUEUED 0x61 */ +/* reserved 0x62->0x6F */ +#define WIN_SEEK 0x70 /* obsolete since ATA7 */ +/* reserved 0x71-0x7F */ +/* vendor specific 0x80-0x86 */ +#define CFA_TRANSLATE_SECTOR 0x87 /* CFA Translate Sector */ +/* vendor specific 0x88-0x8F */ +#define WIN_DIAGNOSE 0x90 +#define WIN_SPECIFY 0x91 /* set drive geometry translation, obsolete since ATA6 */ +#define WIN_DOWNLOAD_MICROCODE 0x92 +/* DOWNLOAD MICROCODE DMA 0x93 */ +#define WIN_STANDBYNOW2 0x94 /* retired in ATA4 */ +#define WIN_IDLEIMMEDIATE2 0x95 /* force drive to become "ready", retired in ATA4 */ +#define WIN_STANDBY2 0x96 /* retired in ATA4 */ +#define WIN_SETIDLE2 0x97 /* retired in ATA4 */ +#define WIN_CHECKPOWERMODE2 0x98 /* retired in ATA4 */ +#define WIN_SLEEPNOW2 0x99 /* retired in ATA4 */ +/* vendor specific 0x9A */ +/* reserved 0x9B..0x9F */ +#define WIN_PACKETCMD 0xA0 /* Send a packet command. */ +#define WIN_PIDENTIFY 0xA1 /* identify ATAPI device */ +#define WIN_QUEUED_SERVICE 0xA2 /* obsolete since ACS2 */ +/* reserved 0xA3..0xAF */ +#define WIN_SMART 0xB0 /* self-monitoring and reporting */ +/* Device Configuration Overlay 0xB1 */ +/* reserved 0xB2..0xB3 */ +/* Sanitize Device 0xB4 */ +/* reserved 0xB5 */ +/* NV Cache 0xB6 */ +/* reserved for CFA 0xB7..0xBB */ +#define CFA_ACCESS_METADATA_STORAGE 0xB8 +/* reserved 0xBC..0xBF */ +#define CFA_ERASE_SECTORS 0xC0 /* microdrives implement as NOP */ +/* vendor specific 0xC1..0xC3 */ +#define WIN_MULTREAD 0xC4 /* read sectors using multiple mode*/ +#define WIN_MULTWRITE 0xC5 /* write sectors using multiple mode */ +#define WIN_SETMULT 0xC6 /* enable/disable multiple mode */ +#define WIN_READDMA_QUEUED 0xC7 /* read sectors using Queued DMA transfers, obsolete since ACS2 */ +#define WIN_READDMA 0xC8 /* read sectors using DMA transfers */ +#define WIN_READDMA_ONCE 0xC9 /* 28-Bit - w/o retries, obsolete since ATA5 */ +#define WIN_WRITEDMA 0xCA /* write sectors using DMA transfers */ +#define WIN_WRITEDMA_ONCE 0xCB /* 28-Bit - w/o retries, obsolete since ATA5 */ +#define WIN_WRITEDMA_QUEUED 0xCC /* write sectors using Queued DMA transfers, obsolete since ACS2 */ +#define CFA_WRITE_MULTI_WO_ERASE 0xCD /* CFA Write multiple without erase */ +/* WRITE MULTIPLE FUA EXT 0xCE */ +/* reserved 0xCF..0xDO */ +/* CHECK MEDIA CARD TYPE 0xD1 */ +/* reserved for media card pass through 0xD2..0xD4 */ +/* reserved 0xD5..0xD9 */ +#define WIN_GETMEDIASTATUS 0xDA /* obsolete since ATA8 */ +/* obsolete since ATA3, retired in ATA4 0xDB..0xDD */ +#define WIN_DOORLOCK 0xDE /* lock door on removable drives, obsolete since ATA8 */ +#define WIN_DOORUNLOCK 0xDF /* unlock door on removable drives, obsolete since ATA8 */ +#define WIN_STANDBYNOW1 0xE0 +#define WIN_IDLEIMMEDIATE 0xE1 /* force drive to become "ready" */ +#define WIN_STANDBY 0xE2 /* Set device in Standby Mode */ +#define WIN_SETIDLE1 0xE3 +#define WIN_READ_BUFFER 0xE4 /* force read only 1 sector */ +#define WIN_CHECKPOWERMODE1 0xE5 +#define WIN_SLEEPNOW1 0xE6 +#define WIN_FLUSH_CACHE 0xE7 +#define WIN_WRITE_BUFFER 0xE8 /* force write only 1 sector */ +/* READ BUFFER DMA 0xE9 */ +#define WIN_FLUSH_CACHE_EXT 0xEA /* 48-Bit */ +/* WRITE BUFFER DMA 0xEB */ +#define WIN_IDENTIFY 0xEC /* ask drive to identify itself */ +#define WIN_MEDIAEJECT 0xED /* obsolete since ATA8 */ +/* obsolete since ATA4 0xEE */ +#define WIN_SETFEATURES 0xEF /* set special drive features */ +#define IBM_SENSE_CONDITION 0xF0 /* measure disk temperature, vendor specific */ +#define WIN_SECURITY_SET_PASS 0xF1 +#define WIN_SECURITY_UNLOCK 0xF2 +#define WIN_SECURITY_ERASE_PREPARE 0xF3 +#define WIN_SECURITY_ERASE_UNIT 0xF4 +#define WIN_SECURITY_FREEZE_LOCK 0xF5 +#define CFA_WEAR_LEVEL 0xF5 /* microdrives implement as NOP; not specified in T13! */ +#define WIN_SECURITY_DISABLE 0xF6 +/* vendor specific 0xF7 */ +#define WIN_READ_NATIVE_MAX 0xF8 /* return the native maximum address */ +#define WIN_SET_MAX 0xF9 +/* vendor specific 0xFA..0xFF */ + +/* set to 1 set disable mult support */ +#define MAX_MULT_SECTORS 16 + +#define IDE_DMA_BUF_SECTORS 256 + +/* feature values for Data Set Management */ +#define DSM_TRIM 0x01 + +#if (IDE_DMA_BUF_SECTORS < MAX_MULT_SECTORS) +#error "IDE_DMA_BUF_SECTORS must be bigger or equal to MAX_MULT_SECTORS" +#endif + +/* ATAPI defines */ + +#define ATAPI_PACKET_SIZE 12 + +/* The generic packet command opcodes for CD/DVD Logical Units, + * From Table 57 of the SFF8090 Ver. 3 (Mt. Fuji) draft standard. */ +#define GPCMD_BLANK 0xa1 +#define GPCMD_CLOSE_TRACK 0x5b +#define GPCMD_FLUSH_CACHE 0x35 +#define GPCMD_FORMAT_UNIT 0x04 +#define GPCMD_GET_CONFIGURATION 0x46 +#define GPCMD_GET_EVENT_STATUS_NOTIFICATION 0x4a +#define GPCMD_GET_PERFORMANCE 0xac +#define GPCMD_INQUIRY 0x12 +#define GPCMD_LOAD_UNLOAD 0xa6 +#define GPCMD_MECHANISM_STATUS 0xbd +#define GPCMD_MODE_SELECT_10 0x55 +#define GPCMD_MODE_SENSE_10 0x5a +#define GPCMD_PAUSE_RESUME 0x4b +#define GPCMD_PLAY_AUDIO_10 0x45 +#define GPCMD_PLAY_AUDIO_MSF 0x47 +#define GPCMD_PLAY_AUDIO_TI 0x48 +#define GPCMD_PLAY_CD 0xbc +#define GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e +#define GPCMD_READ_10 0x28 +#define GPCMD_READ_12 0xa8 +#define GPCMD_READ_CDVD_CAPACITY 0x25 +#define GPCMD_READ_CD 0xbe +#define GPCMD_READ_CD_MSF 0xb9 +#define GPCMD_READ_DISC_INFO 0x51 +#define GPCMD_READ_DVD_STRUCTURE 0xad +#define GPCMD_READ_FORMAT_CAPACITIES 0x23 +#define GPCMD_READ_HEADER 0x44 +#define GPCMD_READ_TRACK_RZONE_INFO 0x52 +#define GPCMD_READ_SUBCHANNEL 0x42 +#define GPCMD_READ_TOC_PMA_ATIP 0x43 +#define GPCMD_REPAIR_RZONE_TRACK 0x58 +#define GPCMD_REPORT_KEY 0xa4 +#define GPCMD_REQUEST_SENSE 0x03 +#define GPCMD_RESERVE_RZONE_TRACK 0x53 +#define GPCMD_SCAN 0xba +#define GPCMD_SEEK 0x2b +#define GPCMD_SEND_DVD_STRUCTURE 0xad +#define GPCMD_SEND_EVENT 0xa2 +#define GPCMD_SEND_KEY 0xa3 +#define GPCMD_SEND_OPC 0x54 +#define GPCMD_SET_READ_AHEAD 0xa7 +#define GPCMD_SET_STREAMING 0xb6 +#define GPCMD_START_STOP_UNIT 0x1b +#define GPCMD_STOP_PLAY_SCAN 0x4e +#define GPCMD_TEST_UNIT_READY 0x00 +#define GPCMD_VERIFY_10 0x2f +#define GPCMD_WRITE_10 0x2a +#define GPCMD_WRITE_AND_VERIFY_10 0x2e +/* This is listed as optional in ATAPI 2.6, but is (curiously) + * missing from Mt. Fuji, Table 57. It _is_ mentioned in Mt. Fuji + * Table 377 as an MMC command for SCSi devices though... Most ATAPI + * drives support it. */ +#define GPCMD_SET_SPEED 0xbb +/* This seems to be a SCSI specific CD-ROM opcode + * to play data at track/index */ +#define GPCMD_PLAYAUDIO_TI 0x48 +/* + * From MS Media Status Notification Support Specification. For + * older drives only. + */ +#define GPCMD_GET_MEDIA_STATUS 0xda +#define GPCMD_MODE_SENSE_6 0x1a + +#define ATAPI_INT_REASON_CD 0x01 /* 0 = data transfer */ +#define ATAPI_INT_REASON_IO 0x02 /* 1 = transfer to the host */ +#define ATAPI_INT_REASON_REL 0x04 +#define ATAPI_INT_REASON_TAG 0xf8 + +/* same constants as bochs */ +#define ASC_NO_SEEK_COMPLETE 0x02 +#define ASC_ILLEGAL_OPCODE 0x20 +#define ASC_LOGICAL_BLOCK_OOR 0x21 +#define ASC_INV_FIELD_IN_CMD_PACKET 0x24 +#define ASC_MEDIUM_MAY_HAVE_CHANGED 0x28 +#define ASC_INCOMPATIBLE_FORMAT 0x30 +#define ASC_MEDIUM_NOT_PRESENT 0x3a +#define ASC_SAVING_PARAMETERS_NOT_SUPPORTED 0x39 +#define ASC_DATA_PHASE_ERROR 0x4b +#define ASC_MEDIA_REMOVAL_PREVENTED 0x53 + +#define CFA_NO_ERROR 0x00 +#define CFA_MISC_ERROR 0x09 +#define CFA_INVALID_COMMAND 0x20 +#define CFA_INVALID_ADDRESS 0x21 +#define CFA_ADDRESS_OVERFLOW 0x2f + +#define SMART_READ_DATA 0xd0 +#define SMART_READ_THRESH 0xd1 +#define SMART_ATTR_AUTOSAVE 0xd2 +#define SMART_SAVE_ATTR 0xd3 +#define SMART_EXECUTE_OFFLINE 0xd4 +#define SMART_READ_LOG 0xd5 +#define SMART_WRITE_LOG 0xd6 +#define SMART_ENABLE 0xd8 +#define SMART_DISABLE 0xd9 +#define SMART_STATUS 0xda + +typedef enum { IDE_HD, IDE_CD, IDE_CFATA } IDEDriveKind; + +typedef void EndTransferFunc(IDEState *); + +typedef void DMAStartFunc(IDEDMA *, IDEState *, BlockCompletionFunc *); +typedef void DMAVoidFunc(IDEDMA *); +typedef int DMAIntFunc(IDEDMA *, int); +typedef int32_t DMAInt32Func(IDEDMA *, int32_t len); +typedef void DMAu32Func(IDEDMA *, uint32_t); +typedef void DMAStopFunc(IDEDMA *, bool); +typedef void DMARestartFunc(void *, int, RunState); + +struct unreported_events { + bool eject_request; + bool new_media; +}; + +enum ide_dma_cmd { + IDE_DMA_READ, + IDE_DMA_WRITE, + IDE_DMA_TRIM, +}; + +#define ide_cmd_is_read(s) \ + ((s)->dma_cmd == IDE_DMA_READ) + +typedef struct IDEBufferedRequest { + QLIST_ENTRY(IDEBufferedRequest) list; + struct iovec iov; + QEMUIOVector qiov; + QEMUIOVector *original_qiov; + BlockCompletionFunc *original_cb; + void *original_opaque; + bool orphaned; +} IDEBufferedRequest; + +/* NOTE: IDEState represents in fact one drive */ +struct IDEState { + IDEBus *bus; + uint8_t unit; + /* ide config */ + IDEDriveKind drive_kind; + int cylinders, heads, sectors, chs_trans; + int64_t nb_sectors; + int mult_sectors; + int identify_set; + uint8_t identify_data[512]; + int drive_serial; + char drive_serial_str[21]; + char drive_model_str[41]; + uint64_t wwn; + /* ide regs */ + uint8_t feature; + uint8_t error; + uint32_t nsector; + uint8_t sector; + uint8_t lcyl; + uint8_t hcyl; + /* other part of tf for lba48 support */ + uint8_t hob_feature; + uint8_t hob_nsector; + uint8_t hob_sector; + uint8_t hob_lcyl; + uint8_t hob_hcyl; + + uint8_t select; + uint8_t status; + + /* set for lba48 access */ + uint8_t lba48; + BlockBackend *blk; + char version[9]; + /* ATAPI specific */ + struct unreported_events events; + uint8_t sense_key; + uint8_t asc; + bool tray_open; + bool tray_locked; + uint8_t cdrom_changed; + int packet_transfer_size; + int elementary_transfer_size; + int32_t io_buffer_index; + int lba; + int cd_sector_size; + int atapi_dma; /* true if dma is requested for the packet cmd */ + BlockAcctCookie acct; + BlockAIOCB *pio_aiocb; + struct iovec iov; + QEMUIOVector qiov; + QLIST_HEAD(, IDEBufferedRequest) buffered_requests; + /* ATA DMA state */ + uint64_t io_buffer_offset; + int32_t io_buffer_size; + QEMUSGList sg; + /* PIO transfer handling */ + int req_nb_sectors; /* number of sectors per interrupt */ + EndTransferFunc *end_transfer_func; + uint8_t *data_ptr; + uint8_t *data_end; + uint8_t *io_buffer; + /* PIO save/restore */ + int32_t io_buffer_total_len; + int32_t cur_io_buffer_offset; + int32_t cur_io_buffer_len; + uint8_t end_transfer_fn_idx; + QEMUTimer *sector_write_timer; /* only used for win2k install hack */ + uint32_t irq_count; /* counts IRQs when using win2k install hack */ + /* CF-ATA extended error */ + uint8_t ext_error; + /* CF-ATA metadata storage */ + uint32_t mdata_size; + uint8_t *mdata_storage; + int media_changed; + enum ide_dma_cmd dma_cmd; + /* SMART */ + uint8_t smart_enabled; + uint8_t smart_autosave; + int smart_errors; + uint8_t smart_selftest_count; + uint8_t *smart_selftest_data; + /* AHCI */ + int ncq_queues; +}; + +struct IDEDMAOps { + DMAStartFunc *start_dma; + DMAVoidFunc *start_transfer; + DMAInt32Func *prepare_buf; + DMAu32Func *commit_buf; + DMAIntFunc *rw_buf; + DMAVoidFunc *restart; + DMAVoidFunc *restart_dma; + DMAStopFunc *set_inactive; + DMAVoidFunc *cmd_done; + DMAVoidFunc *reset; +}; + +struct IDEDMA { + const struct IDEDMAOps *ops; + struct iovec iov; + QEMUIOVector qiov; + BlockAIOCB *aiocb; +}; + +struct IDEBus { + BusState qbus; + IDEDevice *master; + IDEDevice *slave; + IDEState ifs[2]; + QEMUBH *bh; + + int bus_id; + int max_units; + IDEDMA *dma; + uint8_t unit; + uint8_t cmd; + qemu_irq irq; + + int error_status; + uint8_t retry_unit; + int64_t retry_sector_num; + uint32_t retry_nsector; +}; + +#define TYPE_IDE_DEVICE "ide-device" +#define IDE_DEVICE(obj) \ + OBJECT_CHECK(IDEDevice, (obj), TYPE_IDE_DEVICE) +#define IDE_DEVICE_CLASS(klass) \ + OBJECT_CLASS_CHECK(IDEDeviceClass, (klass), TYPE_IDE_DEVICE) +#define IDE_DEVICE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(IDEDeviceClass, (obj), TYPE_IDE_DEVICE) + +typedef struct IDEDeviceClass { + DeviceClass parent_class; + int (*init)(IDEDevice *dev); +} IDEDeviceClass; + +struct IDEDevice { + DeviceState qdev; + uint32_t unit; + BlockConf conf; + int chs_trans; + char *version; + char *serial; + char *model; + uint64_t wwn; +}; + +/* These are used for the error_status field of IDEBus */ +#define IDE_RETRY_DMA 0x08 +#define IDE_RETRY_PIO 0x10 +#define IDE_RETRY_READ 0x20 +#define IDE_RETRY_FLUSH 0x40 +#define IDE_RETRY_TRIM 0x80 +#define IDE_RETRY_HBA 0x100 + +static inline IDEState *idebus_active_if(IDEBus *bus) +{ + return bus->ifs + bus->unit; +} + +static inline void ide_set_irq(IDEBus *bus) +{ + if (!(bus->cmd & IDE_CMD_DISABLE_IRQ)) { + qemu_irq_raise(bus->irq); + } +} + +/* hw/ide/core.c */ +extern const VMStateDescription vmstate_ide_bus; + +#define VMSTATE_IDE_BUS(_field, _state) \ + VMSTATE_STRUCT(_field, _state, 1, vmstate_ide_bus, IDEBus) + +#define VMSTATE_IDE_BUS_ARRAY(_field, _state, _num) \ + VMSTATE_STRUCT_ARRAY(_field, _state, _num, 1, vmstate_ide_bus, IDEBus) + +extern const VMStateDescription vmstate_ide_drive; + +#define VMSTATE_IDE_DRIVES(_field, _state) \ + VMSTATE_STRUCT_ARRAY(_field, _state, 2, 3, vmstate_ide_drive, IDEState) + +#define VMSTATE_IDE_DRIVE(_field, _state) \ + VMSTATE_STRUCT(_field, _state, 1, vmstate_ide_drive, IDEState) + +void ide_bus_reset(IDEBus *bus); +int64_t ide_get_sector(IDEState *s); +void ide_set_sector(IDEState *s, int64_t sector_num); + +void ide_start_dma(IDEState *s, BlockCompletionFunc *cb); +void dma_buf_commit(IDEState *s, uint32_t tx_bytes); +void ide_dma_error(IDEState *s); +void ide_abort_command(IDEState *s); + +void ide_atapi_cmd_ok(IDEState *s); +void ide_atapi_cmd_error(IDEState *s, int sense_key, int asc); +void ide_atapi_dma_restart(IDEState *s); +void ide_atapi_io_error(IDEState *s, int ret); + +void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val); +uint32_t ide_ioport_read(void *opaque, uint32_t addr1); +uint32_t ide_status_read(void *opaque, uint32_t addr); +void ide_cmd_write(void *opaque, uint32_t addr, uint32_t val); +void ide_data_writew(void *opaque, uint32_t addr, uint32_t val); +uint32_t ide_data_readw(void *opaque, uint32_t addr); +void ide_data_writel(void *opaque, uint32_t addr, uint32_t val); +uint32_t ide_data_readl(void *opaque, uint32_t addr); + +int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind, + const char *version, const char *serial, const char *model, + uint64_t wwn, + uint32_t cylinders, uint32_t heads, uint32_t secs, + int chs_trans); +void ide_init2(IDEBus *bus, qemu_irq irq); +void ide_init_ioport(IDEBus *bus, ISADevice *isa, int iobase, int iobase2); +void ide_register_restart_cb(IDEBus *bus); + +void ide_exec_cmd(IDEBus *bus, uint32_t val); + +void ide_transfer_start(IDEState *s, uint8_t *buf, int size, + EndTransferFunc *end_transfer_func); +void ide_transfer_stop(IDEState *s); +void ide_set_inactive(IDEState *s, bool more); +BlockAIOCB *ide_issue_trim(BlockBackend *blk, + int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque); +BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num, + QEMUIOVector *iov, int nb_sectors, + BlockCompletionFunc *cb, void *opaque); + +/* hw/ide/atapi.c */ +void ide_atapi_cmd(IDEState *s); +void ide_atapi_cmd_reply_end(IDEState *s); + +/* hw/ide/qdev.c */ +void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev, + int bus_id, int max_units); +IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive); + +#endif /* HW_IDE_INTERNAL_H */ diff --git a/src/hw/ide/isa.c b/src/hw/ide/isa.c new file mode 100644 index 0000000..9f80503 --- /dev/null +++ b/src/hw/ide/isa.c @@ -0,0 +1,134 @@ +/* + * QEMU IDE Emulation: ISA Bus support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include <hw/hw.h> +#include <hw/i386/pc.h> +#include <hw/isa/isa.h> +#include "sysemu/block-backend.h" +#include "sysemu/dma.h" + +#include <hw/ide/internal.h> + +/***********************************************************/ +/* ISA IDE definitions */ + +#define TYPE_ISA_IDE "isa-ide" +#define ISA_IDE(obj) OBJECT_CHECK(ISAIDEState, (obj), TYPE_ISA_IDE) + +typedef struct ISAIDEState { + ISADevice parent_obj; + + IDEBus bus; + uint32_t iobase; + uint32_t iobase2; + uint32_t isairq; + qemu_irq irq; +} ISAIDEState; + +static void isa_ide_reset(DeviceState *d) +{ + ISAIDEState *s = ISA_IDE(d); + + ide_bus_reset(&s->bus); +} + +static const VMStateDescription vmstate_ide_isa = { + .name = "isa-ide", + .version_id = 3, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_IDE_BUS(bus, ISAIDEState), + VMSTATE_IDE_DRIVES(bus.ifs, ISAIDEState), + VMSTATE_END_OF_LIST() + } +}; + +static void isa_ide_realizefn(DeviceState *dev, Error **errp) +{ + ISADevice *isadev = ISA_DEVICE(dev); + ISAIDEState *s = ISA_IDE(dev); + + ide_bus_new(&s->bus, sizeof(s->bus), dev, 0, 2); + ide_init_ioport(&s->bus, isadev, s->iobase, s->iobase2); + isa_init_irq(isadev, &s->irq, s->isairq); + ide_init2(&s->bus, s->irq); + vmstate_register(dev, 0, &vmstate_ide_isa, s); + ide_register_restart_cb(&s->bus); +} + +ISADevice *isa_ide_init(ISABus *bus, int iobase, int iobase2, int isairq, + DriveInfo *hd0, DriveInfo *hd1) +{ + DeviceState *dev; + ISADevice *isadev; + ISAIDEState *s; + + isadev = isa_create(bus, TYPE_ISA_IDE); + dev = DEVICE(isadev); + qdev_prop_set_uint32(dev, "iobase", iobase); + qdev_prop_set_uint32(dev, "iobase2", iobase2); + qdev_prop_set_uint32(dev, "irq", isairq); + qdev_init_nofail(dev); + + s = ISA_IDE(dev); + if (hd0) { + ide_create_drive(&s->bus, 0, hd0); + } + if (hd1) { + ide_create_drive(&s->bus, 1, hd1); + } + return isadev; +} + +static Property isa_ide_properties[] = { + DEFINE_PROP_UINT32("iobase", ISAIDEState, iobase, 0x1f0), + DEFINE_PROP_UINT32("iobase2", ISAIDEState, iobase2, 0x3f6), + DEFINE_PROP_UINT32("irq", ISAIDEState, isairq, 14), + DEFINE_PROP_END_OF_LIST(), +}; + +static void isa_ide_class_initfn(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = isa_ide_realizefn; + dc->fw_name = "ide"; + dc->reset = isa_ide_reset; + dc->props = isa_ide_properties; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo isa_ide_info = { + .name = TYPE_ISA_IDE, + .parent = TYPE_ISA_DEVICE, + .instance_size = sizeof(ISAIDEState), + .class_init = isa_ide_class_initfn, +}; + +static void isa_ide_register_types(void) +{ + type_register_static(&isa_ide_info); +} + +type_init(isa_ide_register_types) diff --git a/src/hw/ide/macio.c b/src/hw/ide/macio.c new file mode 100644 index 0000000..3ee962f --- /dev/null +++ b/src/hw/ide/macio.c @@ -0,0 +1,636 @@ +/* + * QEMU IDE Emulation: MacIO support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "hw/hw.h" +#include "hw/ppc/mac.h" +#include "hw/ppc/mac_dbdma.h" +#include "sysemu/block-backend.h" +#include "sysemu/dma.h" + +#include <hw/ide/internal.h> + +/* debug MACIO */ +// #define DEBUG_MACIO + +#ifdef DEBUG_MACIO +static const int debug_macio = 1; +#else +static const int debug_macio = 0; +#endif + +#define MACIO_DPRINTF(fmt, ...) do { \ + if (debug_macio) { \ + printf(fmt , ## __VA_ARGS__); \ + } \ + } while (0) + + +/***********************************************************/ +/* MacIO based PowerPC IDE */ + +#define MACIO_PAGE_SIZE 4096 + +/* + * Unaligned DMA read/write access functions required for OS X/Darwin which + * don't perform DMA transactions on sector boundaries. These functions are + * modelled on bdrv_co_do_preadv()/bdrv_co_do_pwritev() and so should be + * easy to remove if the unaligned block APIs are ever exposed. + */ + +static void pmac_dma_read(BlockBackend *blk, + int64_t offset, unsigned int bytes, + void (*cb)(void *opaque, int ret), void *opaque) +{ + DBDMA_io *io = opaque; + MACIOIDEState *m = io->opaque; + IDEState *s = idebus_active_if(&m->bus); + dma_addr_t dma_addr, dma_len; + void *mem; + int64_t sector_num; + int nsector; + uint64_t align = BDRV_SECTOR_SIZE; + size_t head_bytes, tail_bytes; + + qemu_iovec_destroy(&io->iov); + qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1); + + sector_num = (offset >> 9); + nsector = (io->len >> 9); + + MACIO_DPRINTF("--- DMA read transfer (0x%" HWADDR_PRIx ",0x%x): " + "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len, + sector_num, nsector); + + dma_addr = io->addr; + dma_len = io->len; + mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len, + DMA_DIRECTION_FROM_DEVICE); + + if (offset & (align - 1)) { + head_bytes = offset & (align - 1); + + MACIO_DPRINTF("--- DMA unaligned head: sector %" PRId64 ", " + "discarding %zu bytes\n", sector_num, head_bytes); + + qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes); + + bytes += offset & (align - 1); + offset = offset & ~(align - 1); + } + + qemu_iovec_add(&io->iov, mem, io->len); + + if ((offset + bytes) & (align - 1)) { + tail_bytes = (offset + bytes) & (align - 1); + + MACIO_DPRINTF("--- DMA unaligned tail: sector %" PRId64 ", " + "discarding bytes %zu\n", sector_num, tail_bytes); + + qemu_iovec_add(&io->iov, &io->tail_remainder, align - tail_bytes); + bytes = ROUND_UP(bytes, align); + } + + s->io_buffer_size -= io->len; + s->io_buffer_index += io->len; + + io->len = 0; + + MACIO_DPRINTF("--- Block read transfer - sector_num: %" PRIx64 " " + "nsector: %x\n", (offset >> 9), (bytes >> 9)); + + m->aiocb = blk_aio_readv(blk, (offset >> 9), &io->iov, (bytes >> 9), + cb, io); +} + +static void pmac_dma_write(BlockBackend *blk, + int64_t offset, int bytes, + void (*cb)(void *opaque, int ret), void *opaque) +{ + DBDMA_io *io = opaque; + MACIOIDEState *m = io->opaque; + IDEState *s = idebus_active_if(&m->bus); + dma_addr_t dma_addr, dma_len; + void *mem; + int64_t sector_num; + int nsector; + uint64_t align = BDRV_SECTOR_SIZE; + size_t head_bytes, tail_bytes; + bool unaligned_head = false, unaligned_tail = false; + + qemu_iovec_destroy(&io->iov); + qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1); + + sector_num = (offset >> 9); + nsector = (io->len >> 9); + + MACIO_DPRINTF("--- DMA write transfer (0x%" HWADDR_PRIx ",0x%x): " + "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len, + sector_num, nsector); + + dma_addr = io->addr; + dma_len = io->len; + mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len, + DMA_DIRECTION_TO_DEVICE); + + if (offset & (align - 1)) { + head_bytes = offset & (align - 1); + sector_num = ((offset & ~(align - 1)) >> 9); + + MACIO_DPRINTF("--- DMA unaligned head: pre-reading head sector %" + PRId64 "\n", sector_num); + + blk_pread(s->blk, (sector_num << 9), &io->head_remainder, align); + + qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes); + qemu_iovec_add(&io->iov, mem, io->len); + + bytes += offset & (align - 1); + offset = offset & ~(align - 1); + + unaligned_head = true; + } + + if ((offset + bytes) & (align - 1)) { + tail_bytes = (offset + bytes) & (align - 1); + sector_num = (((offset + bytes) & ~(align - 1)) >> 9); + + MACIO_DPRINTF("--- DMA unaligned tail: pre-reading tail sector %" + PRId64 "\n", sector_num); + + blk_pread(s->blk, (sector_num << 9), &io->tail_remainder, align); + + if (!unaligned_head) { + qemu_iovec_add(&io->iov, mem, io->len); + } + + qemu_iovec_add(&io->iov, &io->tail_remainder + tail_bytes, + align - tail_bytes); + + bytes = ROUND_UP(bytes, align); + + unaligned_tail = true; + } + + if (!unaligned_head && !unaligned_tail) { + qemu_iovec_add(&io->iov, mem, io->len); + } + + s->io_buffer_size -= io->len; + s->io_buffer_index += io->len; + + io->len = 0; + + MACIO_DPRINTF("--- Block write transfer - sector_num: %" PRIx64 " " + "nsector: %x\n", (offset >> 9), (bytes >> 9)); + + m->aiocb = blk_aio_writev(blk, (offset >> 9), &io->iov, (bytes >> 9), + cb, io); +} + +static void pmac_dma_trim(BlockBackend *blk, + int64_t offset, int bytes, + void (*cb)(void *opaque, int ret), void *opaque) +{ + DBDMA_io *io = opaque; + MACIOIDEState *m = io->opaque; + IDEState *s = idebus_active_if(&m->bus); + dma_addr_t dma_addr, dma_len; + void *mem; + + qemu_iovec_destroy(&io->iov); + qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1); + + dma_addr = io->addr; + dma_len = io->len; + mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len, + DMA_DIRECTION_TO_DEVICE); + + qemu_iovec_add(&io->iov, mem, io->len); + s->io_buffer_size -= io->len; + s->io_buffer_index += io->len; + io->len = 0; + + m->aiocb = ide_issue_trim(blk, (offset >> 9), &io->iov, (bytes >> 9), + cb, io); +} + +static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) +{ + DBDMA_io *io = opaque; + MACIOIDEState *m = io->opaque; + IDEState *s = idebus_active_if(&m->bus); + int64_t offset; + + MACIO_DPRINTF("pmac_ide_atapi_transfer_cb\n"); + + if (ret < 0) { + MACIO_DPRINTF("DMA error: %d\n", ret); + ide_atapi_io_error(s, ret); + goto done; + } + + if (!m->dma_active) { + MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n", + s->nsector, io->len, s->status); + /* data not ready yet, wait for the channel to get restarted */ + io->processing = false; + return; + } + + if (s->io_buffer_size <= 0) { + MACIO_DPRINTF("End of IDE transfer\n"); + ide_atapi_cmd_ok(s); + m->dma_active = false; + goto done; + } + + if (io->len == 0) { + MACIO_DPRINTF("End of DMA transfer\n"); + goto done; + } + + if (s->lba == -1) { + /* Non-block ATAPI transfer - just copy to RAM */ + s->io_buffer_size = MIN(s->io_buffer_size, io->len); + cpu_physical_memory_write(io->addr, s->io_buffer, s->io_buffer_size); + ide_atapi_cmd_ok(s); + m->dma_active = false; + goto done; + } + + /* Calculate current offset */ + offset = (int64_t)(s->lba << 11) + s->io_buffer_index; + + pmac_dma_read(s->blk, offset, io->len, pmac_ide_atapi_transfer_cb, io); + return; + +done: + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + } else { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } + io->dma_end(opaque); + + return; +} + +static void pmac_ide_transfer_cb(void *opaque, int ret) +{ + DBDMA_io *io = opaque; + MACIOIDEState *m = io->opaque; + IDEState *s = idebus_active_if(&m->bus); + int64_t offset; + + MACIO_DPRINTF("pmac_ide_transfer_cb\n"); + + if (ret < 0) { + MACIO_DPRINTF("DMA error: %d\n", ret); + m->aiocb = NULL; + ide_dma_error(s); + goto done; + } + + if (!m->dma_active) { + MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n", + s->nsector, io->len, s->status); + /* data not ready yet, wait for the channel to get restarted */ + io->processing = false; + return; + } + + if (s->io_buffer_size <= 0) { + MACIO_DPRINTF("End of IDE transfer\n"); + s->status = READY_STAT | SEEK_STAT; + ide_set_irq(s->bus); + m->dma_active = false; + goto done; + } + + if (io->len == 0) { + MACIO_DPRINTF("End of DMA transfer\n"); + goto done; + } + + /* Calculate number of sectors */ + offset = (ide_get_sector(s) << 9) + s->io_buffer_index; + + switch (s->dma_cmd) { + case IDE_DMA_READ: + pmac_dma_read(s->blk, offset, io->len, pmac_ide_transfer_cb, io); + break; + case IDE_DMA_WRITE: + pmac_dma_write(s->blk, offset, io->len, pmac_ide_transfer_cb, io); + break; + case IDE_DMA_TRIM: + pmac_dma_trim(s->blk, offset, io->len, pmac_ide_transfer_cb, io); + break; + } + + return; + +done: + if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + } else { + block_acct_done(blk_get_stats(s->blk), &s->acct); + } + } + io->dma_end(opaque); +} + +static void pmac_ide_transfer(DBDMA_io *io) +{ + MACIOIDEState *m = io->opaque; + IDEState *s = idebus_active_if(&m->bus); + + MACIO_DPRINTF("\n"); + + if (s->drive_kind == IDE_CD) { + block_acct_start(blk_get_stats(s->blk), &s->acct, io->len, + BLOCK_ACCT_READ); + + pmac_ide_atapi_transfer_cb(io, 0); + return; + } + + switch (s->dma_cmd) { + case IDE_DMA_READ: + block_acct_start(blk_get_stats(s->blk), &s->acct, io->len, + BLOCK_ACCT_READ); + break; + case IDE_DMA_WRITE: + block_acct_start(blk_get_stats(s->blk), &s->acct, io->len, + BLOCK_ACCT_WRITE); + break; + default: + break; + } + + pmac_ide_transfer_cb(io, 0); +} + +static void pmac_ide_flush(DBDMA_io *io) +{ + MACIOIDEState *m = io->opaque; + + if (m->aiocb) { + blk_drain_all(); + } +} + +/* PowerMac IDE memory IO */ +static void pmac_ide_writeb (void *opaque, + hwaddr addr, uint32_t val) +{ + MACIOIDEState *d = opaque; + + addr = (addr & 0xFFF) >> 4; + switch (addr) { + case 1 ... 7: + ide_ioport_write(&d->bus, addr, val); + break; + case 8: + case 22: + ide_cmd_write(&d->bus, 0, val); + break; + default: + break; + } +} + +static uint32_t pmac_ide_readb (void *opaque,hwaddr addr) +{ + uint8_t retval; + MACIOIDEState *d = opaque; + + addr = (addr & 0xFFF) >> 4; + switch (addr) { + case 1 ... 7: + retval = ide_ioport_read(&d->bus, addr); + break; + case 8: + case 22: + retval = ide_status_read(&d->bus, 0); + break; + default: + retval = 0xFF; + break; + } + return retval; +} + +static void pmac_ide_writew (void *opaque, + hwaddr addr, uint32_t val) +{ + MACIOIDEState *d = opaque; + + addr = (addr & 0xFFF) >> 4; + val = bswap16(val); + if (addr == 0) { + ide_data_writew(&d->bus, 0, val); + } +} + +static uint32_t pmac_ide_readw (void *opaque,hwaddr addr) +{ + uint16_t retval; + MACIOIDEState *d = opaque; + + addr = (addr & 0xFFF) >> 4; + if (addr == 0) { + retval = ide_data_readw(&d->bus, 0); + } else { + retval = 0xFFFF; + } + retval = bswap16(retval); + return retval; +} + +static void pmac_ide_writel (void *opaque, + hwaddr addr, uint32_t val) +{ + MACIOIDEState *d = opaque; + + addr = (addr & 0xFFF) >> 4; + val = bswap32(val); + if (addr == 0) { + ide_data_writel(&d->bus, 0, val); + } +} + +static uint32_t pmac_ide_readl (void *opaque,hwaddr addr) +{ + uint32_t retval; + MACIOIDEState *d = opaque; + + addr = (addr & 0xFFF) >> 4; + if (addr == 0) { + retval = ide_data_readl(&d->bus, 0); + } else { + retval = 0xFFFFFFFF; + } + retval = bswap32(retval); + return retval; +} + +static const MemoryRegionOps pmac_ide_ops = { + .old_mmio = { + .write = { + pmac_ide_writeb, + pmac_ide_writew, + pmac_ide_writel, + }, + .read = { + pmac_ide_readb, + pmac_ide_readw, + pmac_ide_readl, + }, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_pmac = { + .name = "ide", + .version_id = 3, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_IDE_BUS(bus, MACIOIDEState), + VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState), + VMSTATE_END_OF_LIST() + } +}; + +static void macio_ide_reset(DeviceState *dev) +{ + MACIOIDEState *d = MACIO_IDE(dev); + + ide_bus_reset(&d->bus); +} + +static int ide_nop_int(IDEDMA *dma, int x) +{ + return 0; +} + +static int32_t ide_nop_int32(IDEDMA *dma, int32_t l) +{ + return 0; +} + +static void ide_dbdma_start(IDEDMA *dma, IDEState *s, + BlockCompletionFunc *cb) +{ + MACIOIDEState *m = container_of(dma, MACIOIDEState, dma); + + s->io_buffer_index = 0; + if (s->drive_kind == IDE_CD) { + s->io_buffer_size = s->packet_transfer_size; + } else { + s->io_buffer_size = s->nsector * BDRV_SECTOR_SIZE; + } + + MACIO_DPRINTF("\n\n------------ IDE transfer\n"); + MACIO_DPRINTF("buffer_size: %x buffer_index: %x\n", + s->io_buffer_size, s->io_buffer_index); + MACIO_DPRINTF("lba: %x size: %x\n", s->lba, s->io_buffer_size); + MACIO_DPRINTF("-------------------------\n"); + + m->dma_active = true; + DBDMA_kick(m->dbdma); +} + +static const IDEDMAOps dbdma_ops = { + .start_dma = ide_dbdma_start, + .prepare_buf = ide_nop_int32, + .rw_buf = ide_nop_int, +}; + +static void macio_ide_realizefn(DeviceState *dev, Error **errp) +{ + MACIOIDEState *s = MACIO_IDE(dev); + + ide_init2(&s->bus, s->irq); + + /* Register DMA callbacks */ + s->dma.ops = &dbdma_ops; + s->bus.dma = &s->dma; +} + +static void macio_ide_initfn(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + MACIOIDEState *s = MACIO_IDE(obj); + + ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); + memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000); + sysbus_init_mmio(d, &s->mem); + sysbus_init_irq(d, &s->irq); + sysbus_init_irq(d, &s->dma_irq); +} + +static void macio_ide_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = macio_ide_realizefn; + dc->reset = macio_ide_reset; + dc->vmsd = &vmstate_pmac; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo macio_ide_type_info = { + .name = TYPE_MACIO_IDE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MACIOIDEState), + .instance_init = macio_ide_initfn, + .class_init = macio_ide_class_init, +}; + +static void macio_ide_register_types(void) +{ + type_register_static(&macio_ide_type_info); +} + +/* hd_table must contain 2 block drivers */ +void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table) +{ + int i; + + for (i = 0; i < 2; i++) { + if (hd_table[i]) { + ide_create_drive(&s->bus, i, hd_table[i]); + } + } +} + +void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel) +{ + s->dbdma = dbdma; + DBDMA_register_channel(dbdma, channel, s->dma_irq, + pmac_ide_transfer, pmac_ide_flush, s); +} + +type_init(macio_ide_register_types) diff --git a/src/hw/ide/microdrive.c b/src/hw/ide/microdrive.c new file mode 100644 index 0000000..6639dd4 --- /dev/null +++ b/src/hw/ide/microdrive.c @@ -0,0 +1,637 @@ +/* + * QEMU IDE Emulation: microdrive (CF / PCMCIA) + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include <hw/hw.h> +#include <hw/i386/pc.h> +#include <hw/pcmcia.h> +#include "sysemu/block-backend.h" +#include "sysemu/dma.h" + +#include <hw/ide/internal.h> + +#define TYPE_MICRODRIVE "microdrive" +#define MICRODRIVE(obj) OBJECT_CHECK(MicroDriveState, (obj), TYPE_MICRODRIVE) + +/***********************************************************/ +/* CF-ATA Microdrive */ + +#define METADATA_SIZE 0x20 + +/* DSCM-1XXXX Microdrive hard disk with CF+ II / PCMCIA interface. */ + +typedef struct MicroDriveState { + /*< private >*/ + PCMCIACardState parent_obj; + /*< public >*/ + + IDEBus bus; + uint32_t attr_base; + uint32_t io_base; + + /* Card state */ + uint8_t opt; + uint8_t stat; + uint8_t pins; + + uint8_t ctrl; + uint16_t io; + uint8_t cycle; +} MicroDriveState; + +/* Register bitfields */ +enum md_opt { + OPT_MODE_MMAP = 0, + OPT_MODE_IOMAP16 = 1, + OPT_MODE_IOMAP1 = 2, + OPT_MODE_IOMAP2 = 3, + OPT_MODE = 0x3f, + OPT_LEVIREQ = 0x40, + OPT_SRESET = 0x80, +}; +enum md_cstat { + STAT_INT = 0x02, + STAT_PWRDWN = 0x04, + STAT_XE = 0x10, + STAT_IOIS8 = 0x20, + STAT_SIGCHG = 0x40, + STAT_CHANGED = 0x80, +}; +enum md_pins { + PINS_MRDY = 0x02, + PINS_CRDY = 0x20, +}; +enum md_ctrl { + CTRL_IEN = 0x02, + CTRL_SRST = 0x04, +}; + +static inline void md_interrupt_update(MicroDriveState *s) +{ + PCMCIACardState *card = PCMCIA_CARD(s); + + if (card->slot == NULL) { + return; + } + + qemu_set_irq(card->slot->irq, + !(s->stat & STAT_INT) && /* Inverted */ + !(s->ctrl & (CTRL_IEN | CTRL_SRST)) && + !(s->opt & OPT_SRESET)); +} + +static void md_set_irq(void *opaque, int irq, int level) +{ + MicroDriveState *s = opaque; + + if (level) { + s->stat |= STAT_INT; + } else { + s->stat &= ~STAT_INT; + } + + md_interrupt_update(s); +} + +static void md_reset(DeviceState *dev) +{ + MicroDriveState *s = MICRODRIVE(dev); + + s->opt = OPT_MODE_MMAP; + s->stat = 0; + s->pins = 0; + s->cycle = 0; + s->ctrl = 0; + ide_bus_reset(&s->bus); +} + +static uint8_t md_attr_read(PCMCIACardState *card, uint32_t at) +{ + MicroDriveState *s = MICRODRIVE(card); + PCMCIACardClass *pcc = PCMCIA_CARD_GET_CLASS(card); + + if (at < s->attr_base) { + if (at < pcc->cis_len) { + return pcc->cis[at]; + } else { + return 0x00; + } + } + + at -= s->attr_base; + + switch (at) { + case 0x00: /* Configuration Option Register */ + return s->opt; + case 0x02: /* Card Configuration Status Register */ + if (s->ctrl & CTRL_IEN) { + return s->stat & ~STAT_INT; + } else { + return s->stat; + } + case 0x04: /* Pin Replacement Register */ + return (s->pins & PINS_CRDY) | 0x0c; + case 0x06: /* Socket and Copy Register */ + return 0x00; +#ifdef VERBOSE + default: + printf("%s: Bad attribute space register %02x\n", __FUNCTION__, at); +#endif + } + + return 0; +} + +static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value) +{ + MicroDriveState *s = MICRODRIVE(card); + + at -= s->attr_base; + + switch (at) { + case 0x00: /* Configuration Option Register */ + s->opt = value & 0xcf; + if (value & OPT_SRESET) { + device_reset(DEVICE(s)); + } + md_interrupt_update(s); + break; + case 0x02: /* Card Configuration Status Register */ + if ((s->stat ^ value) & STAT_PWRDWN) { + s->pins |= PINS_CRDY; + } + s->stat &= 0x82; + s->stat |= value & 0x74; + md_interrupt_update(s); + /* Word 170 in Identify Device must be equal to STAT_XE */ + break; + case 0x04: /* Pin Replacement Register */ + s->pins &= PINS_CRDY; + s->pins |= value & PINS_MRDY; + break; + case 0x06: /* Socket and Copy Register */ + break; + default: + printf("%s: Bad attribute space register %02x\n", __FUNCTION__, at); + } +} + +static uint16_t md_common_read(PCMCIACardState *card, uint32_t at) +{ + MicroDriveState *s = MICRODRIVE(card); + IDEState *ifs; + uint16_t ret; + at -= s->io_base; + + switch (s->opt & OPT_MODE) { + case OPT_MODE_MMAP: + if ((at & ~0x3ff) == 0x400) { + at = 0; + } + break; + case OPT_MODE_IOMAP16: + at &= 0xf; + break; + case OPT_MODE_IOMAP1: + if ((at & ~0xf) == 0x3f0) { + at -= 0x3e8; + } else if ((at & ~0xf) == 0x1f0) { + at -= 0x1f0; + } + break; + case OPT_MODE_IOMAP2: + if ((at & ~0xf) == 0x370) { + at -= 0x368; + } else if ((at & ~0xf) == 0x170) { + at -= 0x170; + } + } + + switch (at) { + case 0x0: /* Even RD Data */ + case 0x8: + return ide_data_readw(&s->bus, 0); + + /* TODO: 8-bit accesses */ + if (s->cycle) { + ret = s->io >> 8; + } else { + s->io = ide_data_readw(&s->bus, 0); + ret = s->io & 0xff; + } + s->cycle = !s->cycle; + return ret; + case 0x9: /* Odd RD Data */ + return s->io >> 8; + case 0xd: /* Error */ + return ide_ioport_read(&s->bus, 0x1); + case 0xe: /* Alternate Status */ + ifs = idebus_active_if(&s->bus); + if (ifs->blk) { + return ifs->status; + } else { + return 0; + } + case 0xf: /* Device Address */ + ifs = idebus_active_if(&s->bus); + return 0xc2 | ((~ifs->select << 2) & 0x3c); + default: + return ide_ioport_read(&s->bus, at); + } + + return 0; +} + +static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value) +{ + MicroDriveState *s = MICRODRIVE(card); + at -= s->io_base; + + switch (s->opt & OPT_MODE) { + case OPT_MODE_MMAP: + if ((at & ~0x3ff) == 0x400) { + at = 0; + } + break; + case OPT_MODE_IOMAP16: + at &= 0xf; + break; + case OPT_MODE_IOMAP1: + if ((at & ~0xf) == 0x3f0) { + at -= 0x3e8; + } else if ((at & ~0xf) == 0x1f0) { + at -= 0x1f0; + } + break; + case OPT_MODE_IOMAP2: + if ((at & ~0xf) == 0x370) { + at -= 0x368; + } else if ((at & ~0xf) == 0x170) { + at -= 0x170; + } + } + + switch (at) { + case 0x0: /* Even WR Data */ + case 0x8: + ide_data_writew(&s->bus, 0, value); + break; + + /* TODO: 8-bit accesses */ + if (s->cycle) { + ide_data_writew(&s->bus, 0, s->io | (value << 8)); + } else { + s->io = value & 0xff; + } + s->cycle = !s->cycle; + break; + case 0x9: + s->io = value & 0xff; + s->cycle = !s->cycle; + break; + case 0xd: /* Features */ + ide_ioport_write(&s->bus, 0x1, value); + break; + case 0xe: /* Device Control */ + s->ctrl = value; + if (value & CTRL_SRST) { + device_reset(DEVICE(s)); + } + md_interrupt_update(s); + break; + default: + if (s->stat & STAT_PWRDWN) { + s->pins |= PINS_CRDY; + s->stat &= ~STAT_PWRDWN; + } + ide_ioport_write(&s->bus, at, value); + } +} + +static const VMStateDescription vmstate_microdrive = { + .name = "microdrive", + .version_id = 3, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT8(opt, MicroDriveState), + VMSTATE_UINT8(stat, MicroDriveState), + VMSTATE_UINT8(pins, MicroDriveState), + VMSTATE_UINT8(ctrl, MicroDriveState), + VMSTATE_UINT16(io, MicroDriveState), + VMSTATE_UINT8(cycle, MicroDriveState), + VMSTATE_IDE_BUS(bus, MicroDriveState), + VMSTATE_IDE_DRIVES(bus.ifs, MicroDriveState), + VMSTATE_END_OF_LIST() + } +}; + +static const uint8_t dscm1xxxx_cis[0x14a] = { + [0x000] = CISTPL_DEVICE, /* 5V Device Information */ + [0x002] = 0x03, /* Tuple length = 4 bytes */ + [0x004] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ + [0x006] = 0x01, /* Size = 2K bytes */ + [0x008] = CISTPL_ENDMARK, + + [0x00a] = CISTPL_DEVICE_OC, /* Additional Device Information */ + [0x00c] = 0x04, /* Tuple length = 4 byest */ + [0x00e] = 0x03, /* Conditions: Ext = 0, Vcc 3.3V, MWAIT = 1 */ + [0x010] = 0xdb, /* ID: DTYPE_FUNCSPEC, non WP, DSPEED_150NS */ + [0x012] = 0x01, /* Size = 2K bytes */ + [0x014] = CISTPL_ENDMARK, + + [0x016] = CISTPL_JEDEC_C, /* JEDEC ID */ + [0x018] = 0x02, /* Tuple length = 2 bytes */ + [0x01a] = 0xdf, /* PC Card ATA with no Vpp required */ + [0x01c] = 0x01, + + [0x01e] = CISTPL_MANFID, /* Manufacture ID */ + [0x020] = 0x04, /* Tuple length = 4 bytes */ + [0x022] = 0xa4, /* TPLMID_MANF = 00a4 (IBM) */ + [0x024] = 0x00, + [0x026] = 0x00, /* PLMID_CARD = 0000 */ + [0x028] = 0x00, + + [0x02a] = CISTPL_VERS_1, /* Level 1 Version */ + [0x02c] = 0x12, /* Tuple length = 23 bytes */ + [0x02e] = 0x04, /* Major Version = JEIDA 4.2 / PCMCIA 2.1 */ + [0x030] = 0x01, /* Minor Version = 1 */ + [0x032] = 'I', + [0x034] = 'B', + [0x036] = 'M', + [0x038] = 0x00, + [0x03a] = 'm', + [0x03c] = 'i', + [0x03e] = 'c', + [0x040] = 'r', + [0x042] = 'o', + [0x044] = 'd', + [0x046] = 'r', + [0x048] = 'i', + [0x04a] = 'v', + [0x04c] = 'e', + [0x04e] = 0x00, + [0x050] = CISTPL_ENDMARK, + + [0x052] = CISTPL_FUNCID, /* Function ID */ + [0x054] = 0x02, /* Tuple length = 2 bytes */ + [0x056] = 0x04, /* TPLFID_FUNCTION = Fixed Disk */ + [0x058] = 0x01, /* TPLFID_SYSINIT: POST = 1, ROM = 0 */ + + [0x05a] = CISTPL_FUNCE, /* Function Extension */ + [0x05c] = 0x02, /* Tuple length = 2 bytes */ + [0x05e] = 0x01, /* TPLFE_TYPE = Disk Device Interface */ + [0x060] = 0x01, /* TPLFE_DATA = PC Card ATA Interface */ + + [0x062] = CISTPL_FUNCE, /* Function Extension */ + [0x064] = 0x03, /* Tuple length = 3 bytes */ + [0x066] = 0x02, /* TPLFE_TYPE = Basic PC Card ATA Interface */ + [0x068] = 0x08, /* TPLFE_DATA: Rotating, Unique, Single */ + [0x06a] = 0x0f, /* TPLFE_DATA: Sleep, Standby, Idle, Auto */ + + [0x06c] = CISTPL_CONFIG, /* Configuration */ + [0x06e] = 0x05, /* Tuple length = 5 bytes */ + [0x070] = 0x01, /* TPCC_RASZ = 2 bytes, TPCC_RMSZ = 1 byte */ + [0x072] = 0x07, /* TPCC_LAST = 7 */ + [0x074] = 0x00, /* TPCC_RADR = 0200 */ + [0x076] = 0x02, + [0x078] = 0x0f, /* TPCC_RMSK = 200, 202, 204, 206 */ + + [0x07a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x07c] = 0x0b, /* Tuple length = 11 bytes */ + [0x07e] = 0xc0, /* TPCE_INDX = Memory Mode, Default, Iface */ + [0x080] = 0xc0, /* TPCE_IF = Memory, no BVDs, no WP, READY */ + [0x082] = 0xa1, /* TPCE_FS = Vcc only, no I/O, Memory, Misc */ + [0x084] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x086] = 0x55, /* NomV: 5.0 V */ + [0x088] = 0x4d, /* MinV: 4.5 V */ + [0x08a] = 0x5d, /* MaxV: 5.5 V */ + [0x08c] = 0x4e, /* Peakl: 450 mA */ + [0x08e] = 0x08, /* TPCE_MS = 1 window, 1 byte, Host address */ + [0x090] = 0x00, /* Window descriptor: Window length = 0 */ + [0x092] = 0x20, /* TPCE_MI: support power down mode, RW */ + + [0x094] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x096] = 0x06, /* Tuple length = 6 bytes */ + [0x098] = 0x00, /* TPCE_INDX = Memory Mode, no Default */ + [0x09a] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x09c] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x09e] = 0xb5, /* NomV: 3.3 V */ + [0x0a0] = 0x1e, + [0x0a2] = 0x3e, /* Peakl: 350 mA */ + + [0x0a4] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0a6] = 0x0d, /* Tuple length = 13 bytes */ + [0x0a8] = 0xc1, /* TPCE_INDX = I/O and Memory Mode, Default */ + [0x0aa] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x0ac] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x0ae] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x0b0] = 0x55, /* NomV: 5.0 V */ + [0x0b2] = 0x4d, /* MinV: 4.5 V */ + [0x0b4] = 0x5d, /* MaxV: 5.5 V */ + [0x0b6] = 0x4e, /* Peakl: 450 mA */ + [0x0b8] = 0x64, /* TPCE_IO = 16-byte boundary, 16/8 accesses */ + [0x0ba] = 0xf0, /* TPCE_IR = MASK, Level, Pulse, Share */ + [0x0bc] = 0xff, /* IRQ0..IRQ7 supported */ + [0x0be] = 0xff, /* IRQ8..IRQ15 supported */ + [0x0c0] = 0x20, /* TPCE_MI = support power down mode */ + + [0x0c2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0c4] = 0x06, /* Tuple length = 6 bytes */ + [0x0c6] = 0x01, /* TPCE_INDX = I/O and Memory Mode */ + [0x0c8] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x0ca] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x0cc] = 0xb5, /* NomV: 3.3 V */ + [0x0ce] = 0x1e, + [0x0d0] = 0x3e, /* Peakl: 350 mA */ + + [0x0d2] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0d4] = 0x12, /* Tuple length = 18 bytes */ + [0x0d6] = 0xc2, /* TPCE_INDX = I/O Primary Mode */ + [0x0d8] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x0da] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x0dc] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x0de] = 0x55, /* NomV: 5.0 V */ + [0x0e0] = 0x4d, /* MinV: 4.5 V */ + [0x0e2] = 0x5d, /* MaxV: 5.5 V */ + [0x0e4] = 0x4e, /* Peakl: 450 mA */ + [0x0e6] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ + [0x0e8] = 0x61, /* Range: 2 fields, 2 bytes addr, 1 byte len */ + [0x0ea] = 0xf0, /* Field 1 address = 0x01f0 */ + [0x0ec] = 0x01, + [0x0ee] = 0x07, /* Address block length = 8 */ + [0x0f0] = 0xf6, /* Field 2 address = 0x03f6 */ + [0x0f2] = 0x03, + [0x0f4] = 0x01, /* Address block length = 2 */ + [0x0f6] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ + [0x0f8] = 0x20, /* TPCE_MI = support power down mode */ + + [0x0fa] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x0fc] = 0x06, /* Tuple length = 6 bytes */ + [0x0fe] = 0x02, /* TPCE_INDX = I/O Primary Mode, no Default */ + [0x100] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x102] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x104] = 0xb5, /* NomV: 3.3 V */ + [0x106] = 0x1e, + [0x108] = 0x3e, /* Peakl: 350 mA */ + + [0x10a] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x10c] = 0x12, /* Tuple length = 18 bytes */ + [0x10e] = 0xc3, /* TPCE_INDX = I/O Secondary Mode, Default */ + [0x110] = 0x41, /* TPCE_IF = I/O and Memory, no BVD, no WP */ + [0x112] = 0x99, /* TPCE_FS = Vcc only, I/O, Interrupt, Misc */ + [0x114] = 0x27, /* NomV = 1, MinV = 1, MaxV = 1, Peakl = 1 */ + [0x116] = 0x55, /* NomV: 5.0 V */ + [0x118] = 0x4d, /* MinV: 4.5 V */ + [0x11a] = 0x5d, /* MaxV: 5.5 V */ + [0x11c] = 0x4e, /* Peakl: 450 mA */ + [0x11e] = 0xea, /* TPCE_IO = 1K boundary, 16/8 access, Range */ + [0x120] = 0x61, /* Range: 2 fields, 2 byte addr, 1 byte len */ + [0x122] = 0x70, /* Field 1 address = 0x0170 */ + [0x124] = 0x01, + [0x126] = 0x07, /* Address block length = 8 */ + [0x128] = 0x76, /* Field 2 address = 0x0376 */ + [0x12a] = 0x03, + [0x12c] = 0x01, /* Address block length = 2 */ + [0x12e] = 0xee, /* TPCE_IR = IRQ E, Level, Pulse, Share */ + [0x130] = 0x20, /* TPCE_MI = support power down mode */ + + [0x132] = CISTPL_CFTABLE_ENTRY, /* 16-bit PC Card Configuration */ + [0x134] = 0x06, /* Tuple length = 6 bytes */ + [0x136] = 0x03, /* TPCE_INDX = I/O Secondary Mode */ + [0x138] = 0x01, /* TPCE_FS = Vcc only, no I/O, no Memory */ + [0x13a] = 0x21, /* NomV = 1, MinV = 0, MaxV = 0, Peakl = 1 */ + [0x13c] = 0xb5, /* NomV: 3.3 V */ + [0x13e] = 0x1e, + [0x140] = 0x3e, /* Peakl: 350 mA */ + + [0x142] = CISTPL_NO_LINK, /* No Link */ + [0x144] = 0x00, /* Tuple length = 0 bytes */ + + [0x146] = CISTPL_END, /* Tuple End */ +}; + +#define TYPE_DSCM1XXXX "dscm1xxxx" + +static int dscm1xxxx_attach(PCMCIACardState *card) +{ + MicroDriveState *md = MICRODRIVE(card); + PCMCIACardClass *pcc = PCMCIA_CARD_GET_CLASS(card); + + md->attr_base = pcc->cis[0x74] | (pcc->cis[0x76] << 8); + md->io_base = 0x0; + + device_reset(DEVICE(md)); + md_interrupt_update(md); + + return 0; +} + +static int dscm1xxxx_detach(PCMCIACardState *card) +{ + MicroDriveState *md = MICRODRIVE(card); + + device_reset(DEVICE(md)); + return 0; +} + +PCMCIACardState *dscm1xxxx_init(DriveInfo *dinfo) +{ + MicroDriveState *md; + + md = MICRODRIVE(object_new(TYPE_DSCM1XXXX)); + qdev_init_nofail(DEVICE(md)); + + if (dinfo != NULL) { + ide_create_drive(&md->bus, 0, dinfo); + } + md->bus.ifs[0].drive_kind = IDE_CFATA; + md->bus.ifs[0].mdata_size = METADATA_SIZE; + md->bus.ifs[0].mdata_storage = g_malloc0(METADATA_SIZE); + + return PCMCIA_CARD(md); +} + +static void dscm1xxxx_class_init(ObjectClass *oc, void *data) +{ + PCMCIACardClass *pcc = PCMCIA_CARD_CLASS(oc); + + pcc->cis = dscm1xxxx_cis; + pcc->cis_len = sizeof(dscm1xxxx_cis); + + pcc->attach = dscm1xxxx_attach; + pcc->detach = dscm1xxxx_detach; +} + +static const TypeInfo dscm1xxxx_type_info = { + .name = TYPE_DSCM1XXXX, + .parent = TYPE_MICRODRIVE, + .class_init = dscm1xxxx_class_init, +}; + +static void microdrive_realize(DeviceState *dev, Error **errp) +{ + MicroDriveState *md = MICRODRIVE(dev); + + ide_init2(&md->bus, qemu_allocate_irq(md_set_irq, md, 0)); +} + +static void microdrive_init(Object *obj) +{ + MicroDriveState *md = MICRODRIVE(obj); + + ide_bus_new(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1); +} + +static void microdrive_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PCMCIACardClass *pcc = PCMCIA_CARD_CLASS(oc); + + pcc->attr_read = md_attr_read; + pcc->attr_write = md_attr_write; + pcc->common_read = md_common_read; + pcc->common_write = md_common_write; + pcc->io_read = md_common_read; + pcc->io_write = md_common_write; + + dc->realize = microdrive_realize; + dc->reset = md_reset; + dc->vmsd = &vmstate_microdrive; +} + +static const TypeInfo microdrive_type_info = { + .name = TYPE_MICRODRIVE, + .parent = TYPE_PCMCIA_CARD, + .instance_size = sizeof(MicroDriveState), + .instance_init = microdrive_init, + .abstract = true, + .class_init = microdrive_class_init, +}; + +static void microdrive_register_types(void) +{ + type_register_static(µdrive_type_info); + type_register_static(&dscm1xxxx_type_info); +} + +type_init(microdrive_register_types) diff --git a/src/hw/ide/mmio.c b/src/hw/ide/mmio.c new file mode 100644 index 0000000..b6ce62a --- /dev/null +++ b/src/hw/ide/mmio.c @@ -0,0 +1,183 @@ +/* + * QEMU IDE Emulation: mmio support (for embedded). + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "hw/hw.h" +#include "hw/sysbus.h" +#include "sysemu/block-backend.h" +#include "sysemu/dma.h" + +#include <hw/ide/internal.h> + +/***********************************************************/ +/* MMIO based ide port + * This emulates IDE device connected directly to the CPU bus without + * dedicated ide controller, which is often seen on embedded boards. + */ + +#define TYPE_MMIO_IDE "mmio-ide" +#define MMIO_IDE(obj) OBJECT_CHECK(MMIOState, (obj), TYPE_MMIO_IDE) + +typedef struct MMIOIDEState { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + IDEBus bus; + + uint32_t shift; + qemu_irq irq; + MemoryRegion iomem1, iomem2; +} MMIOState; + +static void mmio_ide_reset(DeviceState *dev) +{ + MMIOState *s = MMIO_IDE(dev); + + ide_bus_reset(&s->bus); +} + +static uint64_t mmio_ide_read(void *opaque, hwaddr addr, + unsigned size) +{ + MMIOState *s = opaque; + addr >>= s->shift; + if (addr & 7) + return ide_ioport_read(&s->bus, addr); + else + return ide_data_readw(&s->bus, 0); +} + +static void mmio_ide_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MMIOState *s = opaque; + addr >>= s->shift; + if (addr & 7) + ide_ioport_write(&s->bus, addr, val); + else + ide_data_writew(&s->bus, 0, val); +} + +static const MemoryRegionOps mmio_ide_ops = { + .read = mmio_ide_read, + .write = mmio_ide_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static uint64_t mmio_ide_status_read(void *opaque, hwaddr addr, + unsigned size) +{ + MMIOState *s= opaque; + return ide_status_read(&s->bus, 0); +} + +static void mmio_ide_cmd_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + MMIOState *s = opaque; + ide_cmd_write(&s->bus, 0, val); +} + +static const MemoryRegionOps mmio_ide_cs_ops = { + .read = mmio_ide_status_read, + .write = mmio_ide_cmd_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static const VMStateDescription vmstate_ide_mmio = { + .name = "mmio-ide", + .version_id = 3, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_IDE_BUS(bus, MMIOState), + VMSTATE_IDE_DRIVES(bus.ifs, MMIOState), + VMSTATE_END_OF_LIST() + } +}; + +static void mmio_ide_realizefn(DeviceState *dev, Error **errp) +{ + SysBusDevice *d = SYS_BUS_DEVICE(dev); + MMIOState *s = MMIO_IDE(dev); + + ide_init2(&s->bus, s->irq); + + memory_region_init_io(&s->iomem1, OBJECT(s), &mmio_ide_ops, s, + "ide-mmio.1", 16 << s->shift); + memory_region_init_io(&s->iomem2, OBJECT(s), &mmio_ide_cs_ops, s, + "ide-mmio.2", 2 << s->shift); + sysbus_init_mmio(d, &s->iomem1); + sysbus_init_mmio(d, &s->iomem2); +} + +static void mmio_ide_initfn(Object *obj) +{ + SysBusDevice *d = SYS_BUS_DEVICE(obj); + MMIOState *s = MMIO_IDE(obj); + + ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); + sysbus_init_irq(d, &s->irq); +} + +static Property mmio_ide_properties[] = { + DEFINE_PROP_UINT32("shift", MMIOState, shift, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void mmio_ide_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = mmio_ide_realizefn; + dc->reset = mmio_ide_reset; + dc->props = mmio_ide_properties; + dc->vmsd = &vmstate_ide_mmio; +} + +static const TypeInfo mmio_ide_type_info = { + .name = TYPE_MMIO_IDE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MMIOState), + .instance_init = mmio_ide_initfn, + .class_init = mmio_ide_class_init, +}; + +static void mmio_ide_register_types(void) +{ + type_register_static(&mmio_ide_type_info); +} + +void mmio_ide_init_drives(DeviceState *dev, DriveInfo *hd0, DriveInfo *hd1) +{ + MMIOState *s = MMIO_IDE(dev); + + if (hd0 != NULL) { + ide_create_drive(&s->bus, 0, hd0); + } + if (hd1 != NULL) { + ide_create_drive(&s->bus, 1, hd1); + } +} + +type_init(mmio_ide_register_types) diff --git a/src/hw/ide/pci.c b/src/hw/ide/pci.c new file mode 100644 index 0000000..37dbc29 --- /dev/null +++ b/src/hw/ide/pci.c @@ -0,0 +1,497 @@ +/* + * QEMU IDE Emulation: PCI Bus support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include <hw/hw.h> +#include <hw/i386/pc.h> +#include <hw/pci/pci.h> +#include <hw/isa/isa.h> +#include "sysemu/block-backend.h" +#include "sysemu/dma.h" +#include "qemu/error-report.h" +#include <hw/ide/pci.h> + +#define BMDMA_PAGE_SIZE 4096 + +#define BM_MIGRATION_COMPAT_STATUS_BITS \ + (IDE_RETRY_DMA | IDE_RETRY_PIO | \ + IDE_RETRY_READ | IDE_RETRY_FLUSH) + +static void bmdma_start_dma(IDEDMA *dma, IDEState *s, + BlockCompletionFunc *dma_cb) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + + bm->dma_cb = dma_cb; + bm->cur_prd_last = 0; + bm->cur_prd_addr = 0; + bm->cur_prd_len = 0; + + if (bm->status & BM_STATUS_DMAING) { + bm->dma_cb(bmdma_active_if(bm), 0); + } +} + +/** + * Prepare an sglist based on available PRDs. + * @limit: How many bytes to prepare total. + * + * Returns the number of bytes prepared, -1 on error. + * IDEState.io_buffer_size will contain the number of bytes described + * by the PRDs, whether or not we added them to the sglist. + */ +static int32_t bmdma_prepare_buf(IDEDMA *dma, int32_t limit) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + IDEState *s = bmdma_active_if(bm); + PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); + struct { + uint32_t addr; + uint32_t size; + } prd; + int l, len; + + pci_dma_sglist_init(&s->sg, pci_dev, + s->nsector / (BMDMA_PAGE_SIZE / 512) + 1); + s->io_buffer_size = 0; + for(;;) { + if (bm->cur_prd_len == 0) { + /* end of table (with a fail safe of one page) */ + if (bm->cur_prd_last || + (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) { + return s->sg.size; + } + pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); + bm->cur_addr += 8; + prd.addr = le32_to_cpu(prd.addr); + prd.size = le32_to_cpu(prd.size); + len = prd.size & 0xfffe; + if (len == 0) + len = 0x10000; + bm->cur_prd_len = len; + bm->cur_prd_addr = prd.addr; + bm->cur_prd_last = (prd.size & 0x80000000); + } + l = bm->cur_prd_len; + if (l > 0) { + uint64_t sg_len; + + /* Don't add extra bytes to the SGList; consume any remaining + * PRDs from the guest, but ignore them. */ + sg_len = MIN(limit - s->sg.size, bm->cur_prd_len); + if (sg_len) { + qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len); + } + + bm->cur_prd_addr += l; + bm->cur_prd_len -= l; + s->io_buffer_size += l; + } + } + + qemu_sglist_destroy(&s->sg); + s->io_buffer_size = 0; + return -1; +} + +/* return 0 if buffer completed */ +static int bmdma_rw_buf(IDEDMA *dma, int is_write) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + IDEState *s = bmdma_active_if(bm); + PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev); + struct { + uint32_t addr; + uint32_t size; + } prd; + int l, len; + + for(;;) { + l = s->io_buffer_size - s->io_buffer_index; + if (l <= 0) + break; + if (bm->cur_prd_len == 0) { + /* end of table (with a fail safe of one page) */ + if (bm->cur_prd_last || + (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) + return 0; + pci_dma_read(pci_dev, bm->cur_addr, &prd, 8); + bm->cur_addr += 8; + prd.addr = le32_to_cpu(prd.addr); + prd.size = le32_to_cpu(prd.size); + len = prd.size & 0xfffe; + if (len == 0) + len = 0x10000; + bm->cur_prd_len = len; + bm->cur_prd_addr = prd.addr; + bm->cur_prd_last = (prd.size & 0x80000000); + } + if (l > bm->cur_prd_len) + l = bm->cur_prd_len; + if (l > 0) { + if (is_write) { + pci_dma_write(pci_dev, bm->cur_prd_addr, + s->io_buffer + s->io_buffer_index, l); + } else { + pci_dma_read(pci_dev, bm->cur_prd_addr, + s->io_buffer + s->io_buffer_index, l); + } + bm->cur_prd_addr += l; + bm->cur_prd_len -= l; + s->io_buffer_index += l; + } + } + return 1; +} + +static void bmdma_set_inactive(IDEDMA *dma, bool more) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + + bm->dma_cb = NULL; + if (more) { + bm->status |= BM_STATUS_DMAING; + } else { + bm->status &= ~BM_STATUS_DMAING; + } +} + +static void bmdma_restart_dma(IDEDMA *dma) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + + bm->cur_addr = bm->addr; +} + +static void bmdma_cancel(BMDMAState *bm) +{ + if (bm->status & BM_STATUS_DMAING) { + /* cancel DMA request */ + bmdma_set_inactive(&bm->dma, false); + } +} + +static void bmdma_reset(IDEDMA *dma) +{ + BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma); + +#ifdef DEBUG_IDE + printf("ide: dma_reset\n"); +#endif + bmdma_cancel(bm); + bm->cmd = 0; + bm->status = 0; + bm->addr = 0; + bm->cur_addr = 0; + bm->cur_prd_last = 0; + bm->cur_prd_addr = 0; + bm->cur_prd_len = 0; +} + +static void bmdma_irq(void *opaque, int n, int level) +{ + BMDMAState *bm = opaque; + + if (!level) { + /* pass through lower */ + qemu_set_irq(bm->irq, level); + return; + } + + bm->status |= BM_STATUS_INT; + + /* trigger the real irq */ + qemu_set_irq(bm->irq, level); +} + +void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val) +{ +#ifdef DEBUG_IDE + printf("%s: 0x%08x\n", __func__, val); +#endif + + /* Ignore writes to SSBM if it keeps the old value */ + if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) { + if (!(val & BM_CMD_START)) { + /* First invoke the callbacks of all buffered requests + * and flag those requests as orphaned. Ideally there + * are no unbuffered (Scatter Gather DMA Requests or + * write requests) pending and we can avoid to drain. */ + IDEBufferedRequest *req; + IDEState *s = idebus_active_if(bm->bus); + QLIST_FOREACH(req, &s->buffered_requests, list) { + if (!req->orphaned) { +#ifdef DEBUG_IDE + printf("%s: invoking cb %p of buffered request %p with" + " -ECANCELED\n", __func__, req->original_cb, req); +#endif + req->original_cb(req->original_opaque, -ECANCELED); + } + req->orphaned = true; + } + /* + * We can't cancel Scatter Gather DMA in the middle of the + * operation or a partial (not full) DMA transfer would reach + * the storage so we wait for completion instead (we beahve + * like if the DMA was completed by the time the guest trying + * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not + * set). + * + * In the future we'll be able to safely cancel the I/O if the + * whole DMA operation will be submitted to disk with a single + * aio operation with preadv/pwritev. + */ + if (bm->bus->dma->aiocb) { +#ifdef DEBUG_IDE + printf("%s: draining all remaining requests", __func__); +#endif + blk_drain_all(); + assert(bm->bus->dma->aiocb == NULL); + } + bm->status &= ~BM_STATUS_DMAING; + } else { + bm->cur_addr = bm->addr; + if (!(bm->status & BM_STATUS_DMAING)) { + bm->status |= BM_STATUS_DMAING; + /* start dma transfer if possible */ + if (bm->dma_cb) + bm->dma_cb(bmdma_active_if(bm), 0); + } + } + } + + bm->cmd = val & 0x09; +} + +static uint64_t bmdma_addr_read(void *opaque, hwaddr addr, + unsigned width) +{ + BMDMAState *bm = opaque; + uint32_t mask = (1ULL << (width * 8)) - 1; + uint64_t data; + + data = (bm->addr >> (addr * 8)) & mask; +#ifdef DEBUG_IDE + printf("%s: 0x%08x\n", __func__, (unsigned)data); +#endif + return data; +} + +static void bmdma_addr_write(void *opaque, hwaddr addr, + uint64_t data, unsigned width) +{ + BMDMAState *bm = opaque; + int shift = addr * 8; + uint32_t mask = (1ULL << (width * 8)) - 1; + +#ifdef DEBUG_IDE + printf("%s: 0x%08x\n", __func__, (unsigned)data); +#endif + bm->addr &= ~(mask << shift); + bm->addr |= ((data & mask) << shift) & ~3; +} + +MemoryRegionOps bmdma_addr_ioport_ops = { + .read = bmdma_addr_read, + .write = bmdma_addr_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static bool ide_bmdma_current_needed(void *opaque) +{ + BMDMAState *bm = opaque; + + return (bm->cur_prd_len != 0); +} + +static bool ide_bmdma_status_needed(void *opaque) +{ + BMDMAState *bm = opaque; + + /* Older versions abused some bits in the status register for internal + * error state. If any of these bits are set, we must add a subsection to + * transfer the real status register */ + uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; + + return ((bm->status & abused_bits) != 0); +} + +static void ide_bmdma_pre_save(void *opaque) +{ + BMDMAState *bm = opaque; + uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; + + bm->migration_retry_unit = bm->bus->retry_unit; + bm->migration_retry_sector_num = bm->bus->retry_sector_num; + bm->migration_retry_nsector = bm->bus->retry_nsector; + bm->migration_compat_status = + (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits); +} + +/* This function accesses bm->bus->error_status which is loaded only after + * BMDMA itself. This is why the function is called from ide_pci_post_load + * instead of being registered with VMState where it would run too early. */ +static int ide_bmdma_post_load(void *opaque, int version_id) +{ + BMDMAState *bm = opaque; + uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS; + + if (bm->status == 0) { + bm->status = bm->migration_compat_status & ~abused_bits; + bm->bus->error_status |= bm->migration_compat_status & abused_bits; + } + if (bm->bus->error_status) { + bm->bus->retry_sector_num = bm->migration_retry_sector_num; + bm->bus->retry_nsector = bm->migration_retry_nsector; + bm->bus->retry_unit = bm->migration_retry_unit; + } + + return 0; +} + +static const VMStateDescription vmstate_bmdma_current = { + .name = "ide bmdma_current", + .version_id = 1, + .minimum_version_id = 1, + .needed = ide_bmdma_current_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT32(cur_addr, BMDMAState), + VMSTATE_UINT32(cur_prd_last, BMDMAState), + VMSTATE_UINT32(cur_prd_addr, BMDMAState), + VMSTATE_UINT32(cur_prd_len, BMDMAState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_bmdma_status = { + .name ="ide bmdma/status", + .version_id = 1, + .minimum_version_id = 1, + .needed = ide_bmdma_status_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(status, BMDMAState), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_bmdma = { + .name = "ide bmdma", + .version_id = 3, + .minimum_version_id = 0, + .pre_save = ide_bmdma_pre_save, + .fields = (VMStateField[]) { + VMSTATE_UINT8(cmd, BMDMAState), + VMSTATE_UINT8(migration_compat_status, BMDMAState), + VMSTATE_UINT32(addr, BMDMAState), + VMSTATE_INT64(migration_retry_sector_num, BMDMAState), + VMSTATE_UINT32(migration_retry_nsector, BMDMAState), + VMSTATE_UINT8(migration_retry_unit, BMDMAState), + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription*[]) { + &vmstate_bmdma_current, + &vmstate_bmdma_status, + NULL + } +}; + +static int ide_pci_post_load(void *opaque, int version_id) +{ + PCIIDEState *d = opaque; + int i; + + for(i = 0; i < 2; i++) { + /* current versions always store 0/1, but older version + stored bigger values. We only need last bit */ + d->bmdma[i].migration_retry_unit &= 1; + ide_bmdma_post_load(&d->bmdma[i], -1); + } + + return 0; +} + +const VMStateDescription vmstate_ide_pci = { + .name = "ide", + .version_id = 3, + .minimum_version_id = 0, + .post_load = ide_pci_post_load, + .fields = (VMStateField[]) { + VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState), + VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0, + vmstate_bmdma, BMDMAState), + VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2), + VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState), + VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState), + VMSTATE_END_OF_LIST() + } +}; + +void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table) +{ + PCIIDEState *d = PCI_IDE(dev); + static const int bus[4] = { 0, 0, 1, 1 }; + static const int unit[4] = { 0, 1, 0, 1 }; + int i; + + for (i = 0; i < 4; i++) { + if (hd_table[i] == NULL) + continue; + ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]); + } +} + +static const struct IDEDMAOps bmdma_ops = { + .start_dma = bmdma_start_dma, + .prepare_buf = bmdma_prepare_buf, + .rw_buf = bmdma_rw_buf, + .restart_dma = bmdma_restart_dma, + .set_inactive = bmdma_set_inactive, + .reset = bmdma_reset, +}; + +void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d) +{ + if (bus->dma == &bm->dma) { + return; + } + + bm->dma.ops = &bmdma_ops; + bus->dma = &bm->dma; + bm->irq = bus->irq; + bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0); + bm->pci_dev = d; +} + +static const TypeInfo pci_ide_type_info = { + .name = TYPE_PCI_IDE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(PCIIDEState), + .abstract = true, +}; + +static void pci_ide_register_types(void) +{ + type_register_static(&pci_ide_type_info); +} + +type_init(pci_ide_register_types) diff --git a/src/hw/ide/pci.h b/src/hw/ide/pci.h new file mode 100644 index 0000000..0f2d4b9 --- /dev/null +++ b/src/hw/ide/pci.h @@ -0,0 +1,76 @@ +#ifndef HW_IDE_PCI_H +#define HW_IDE_PCI_H + +#include <hw/ide/internal.h> + +#define BM_STATUS_DMAING 0x01 +#define BM_STATUS_ERROR 0x02 +#define BM_STATUS_INT 0x04 + +#define BM_CMD_START 0x01 +#define BM_CMD_READ 0x08 + +typedef struct BMDMAState { + IDEDMA dma; + uint8_t cmd; + uint8_t status; + uint32_t addr; + + IDEBus *bus; + /* current transfer state */ + uint32_t cur_addr; + uint32_t cur_prd_last; + uint32_t cur_prd_addr; + uint32_t cur_prd_len; + BlockCompletionFunc *dma_cb; + MemoryRegion addr_ioport; + MemoryRegion extra_io; + qemu_irq irq; + + /* Bit 0-2 and 7: BM status register + * Bit 3-6: bus->error_status */ + uint8_t migration_compat_status; + uint8_t migration_retry_unit; + int64_t migration_retry_sector_num; + uint32_t migration_retry_nsector; + + struct PCIIDEState *pci_dev; +} BMDMAState; + +typedef struct CMD646BAR { + MemoryRegion cmd; + MemoryRegion data; + IDEBus *bus; + struct PCIIDEState *pci_dev; +} CMD646BAR; + +#define TYPE_PCI_IDE "pci-ide" +#define PCI_IDE(obj) OBJECT_CHECK(PCIIDEState, (obj), TYPE_PCI_IDE) + +typedef struct PCIIDEState { + /*< private >*/ + PCIDevice parent_obj; + /*< public >*/ + + IDEBus bus[2]; + BMDMAState bmdma[2]; + uint32_t secondary; /* used only for cmd646 */ + MemoryRegion bmdma_bar; + CMD646BAR cmd646_bar[2]; /* used only for cmd646 */ +} PCIIDEState; + + +static inline IDEState *bmdma_active_if(BMDMAState *bmdma) +{ + assert(bmdma->bus->retry_unit != (uint8_t)-1); + return bmdma->bus->ifs + bmdma->bus->retry_unit; +} + + +void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d); +void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val); +extern MemoryRegionOps bmdma_addr_ioport_ops; +void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table); + +extern const VMStateDescription vmstate_ide_pci; +#endif diff --git a/src/hw/ide/piix.c b/src/hw/ide/piix.c new file mode 100644 index 0000000..5a26c86 --- /dev/null +++ b/src/hw/ide/piix.c @@ -0,0 +1,305 @@ +/* + * QEMU IDE Emulation: PCI PIIX3/4 support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include <hw/hw.h> +#include <hw/i386/pc.h> +#include <hw/pci/pci.h> +#include <hw/isa/isa.h> +#include "sysemu/block-backend.h" +#include "sysemu/sysemu.h" +#include "sysemu/dma.h" + +#include <hw/ide/pci.h> + +static uint64_t bmdma_read(void *opaque, hwaddr addr, unsigned size) +{ + BMDMAState *bm = opaque; + uint32_t val; + + if (size != 1) { + return ((uint64_t)1 << (size * 8)) - 1; + } + + switch(addr & 3) { + case 0: + val = bm->cmd; + break; + case 2: + val = bm->status; + break; + default: + val = 0xff; + break; + } +#ifdef DEBUG_IDE + printf("bmdma: readb 0x%02x : 0x%02x\n", (uint8_t)addr, val); +#endif + return val; +} + +static void bmdma_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + BMDMAState *bm = opaque; + + if (size != 1) { + return; + } + +#ifdef DEBUG_IDE + printf("bmdma: writeb 0x%02x : 0x%02x\n", (uint8_t)addr, (uint8_t)val); +#endif + switch(addr & 3) { + case 0: + bmdma_cmd_writeb(bm, val); + break; + case 2: + bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06); + break; + } +} + +static const MemoryRegionOps piix_bmdma_ops = { + .read = bmdma_read, + .write = bmdma_write, +}; + +static void bmdma_setup_bar(PCIIDEState *d) +{ + int i; + + memory_region_init(&d->bmdma_bar, OBJECT(d), "piix-bmdma-container", 16); + for(i = 0;i < 2; i++) { + BMDMAState *bm = &d->bmdma[i]; + + memory_region_init_io(&bm->extra_io, OBJECT(d), &piix_bmdma_ops, bm, + "piix-bmdma", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8, &bm->extra_io); + memory_region_init_io(&bm->addr_ioport, OBJECT(d), + &bmdma_addr_ioport_ops, bm, "bmdma", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8 + 4, &bm->addr_ioport); + } +} + +static void piix3_reset(void *opaque) +{ + PCIIDEState *d = opaque; + PCIDevice *pd = PCI_DEVICE(d); + uint8_t *pci_conf = pd->config; + int i; + + for (i = 0; i < 2; i++) { + ide_bus_reset(&d->bus[i]); + } + + /* TODO: this is the default. do not override. */ + pci_conf[PCI_COMMAND] = 0x00; + /* TODO: this is the default. do not override. */ + pci_conf[PCI_COMMAND + 1] = 0x00; + /* TODO: use pci_set_word */ + pci_conf[PCI_STATUS] = PCI_STATUS_FAST_BACK; + pci_conf[PCI_STATUS + 1] = PCI_STATUS_DEVSEL_MEDIUM >> 8; + pci_conf[0x20] = 0x01; /* BMIBA: 20-23h */ +} + +static void pci_piix_init_ports(PCIIDEState *d) { + static const struct { + int iobase; + int iobase2; + int isairq; + } port_info[] = { + {0x1f0, 0x3f6, 14}, + {0x170, 0x376, 15}, + }; + int i; + + for (i = 0; i < 2; i++) { + ide_bus_new(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2); + ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase, + port_info[i].iobase2); + ide_init2(&d->bus[i], isa_get_irq(NULL, port_info[i].isairq)); + + bmdma_init(&d->bus[i], &d->bmdma[i], d); + d->bmdma[i].bus = &d->bus[i]; + ide_register_restart_cb(&d->bus[i]); + } +} + +static void pci_piix_ide_realize(PCIDevice *dev, Error **errp) +{ + PCIIDEState *d = PCI_IDE(dev); + uint8_t *pci_conf = dev->config; + + pci_conf[PCI_CLASS_PROG] = 0x80; // legacy ATA mode + + qemu_register_reset(piix3_reset, d); + + bmdma_setup_bar(d); + pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar); + + vmstate_register(DEVICE(dev), 0, &vmstate_ide_pci, d); + + pci_piix_init_ports(d); +} + +int pci_piix3_xen_ide_unplug(DeviceState *dev) +{ + PCIIDEState *pci_ide; + DriveInfo *di; + int i; + IDEDevice *idedev; + + pci_ide = PCI_IDE(dev); + + for (i = 0; i < 4; i++) { + di = drive_get_by_index(IF_IDE, i); + if (di != NULL && !di->media_cd) { + BlockBackend *blk = blk_by_legacy_dinfo(di); + DeviceState *ds = blk_get_attached_dev(blk); + if (ds) { + blk_detach_dev(blk, ds); + } + pci_ide->bus[di->bus].ifs[di->unit].blk = NULL; + if (!(i % 2)) { + idedev = pci_ide->bus[di->bus].master; + } else { + idedev = pci_ide->bus[di->bus].slave; + } + idedev->conf.blk = NULL; + blk_unref(blk); + } + } + qdev_reset_all(DEVICE(dev)); + return 0; +} + +PCIDevice *pci_piix3_xen_ide_init(PCIBus *bus, DriveInfo **hd_table, int devfn) +{ + PCIDevice *dev; + + dev = pci_create_simple(bus, devfn, "piix3-ide-xen"); + pci_ide_create_devs(dev, hd_table); + return dev; +} + +static void pci_piix_ide_exitfn(PCIDevice *dev) +{ + PCIIDEState *d = PCI_IDE(dev); + unsigned i; + + for (i = 0; i < 2; ++i) { + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io); + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport); + } +} + +/* hd_table must contain 4 block drivers */ +/* NOTE: for the PIIX3, the IRQs and IOports are hardcoded */ +PCIDevice *pci_piix3_ide_init(PCIBus *bus, DriveInfo **hd_table, int devfn) +{ + PCIDevice *dev; + + dev = pci_create_simple(bus, devfn, "piix3-ide"); + pci_ide_create_devs(dev, hd_table); + return dev; +} + +/* hd_table must contain 4 block drivers */ +/* NOTE: for the PIIX4, the IRQs and IOports are hardcoded */ +PCIDevice *pci_piix4_ide_init(PCIBus *bus, DriveInfo **hd_table, int devfn) +{ + PCIDevice *dev; + + dev = pci_create_simple(bus, devfn, "piix4-ide"); + pci_ide_create_devs(dev, hd_table); + return dev; +} + +static void piix3_ide_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_piix_ide_realize; + k->exit = pci_piix_ide_exitfn; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82371SB_1; + k->class_id = PCI_CLASS_STORAGE_IDE; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->hotpluggable = false; +} + +static const TypeInfo piix3_ide_info = { + .name = "piix3-ide", + .parent = TYPE_PCI_IDE, + .class_init = piix3_ide_class_init, +}; + +static void piix3_ide_xen_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_piix_ide_realize; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82371SB_1; + k->class_id = PCI_CLASS_STORAGE_IDE; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo piix3_ide_xen_info = { + .name = "piix3-ide-xen", + .parent = TYPE_PCI_IDE, + .class_init = piix3_ide_xen_class_init, +}; + +static void piix4_ide_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = pci_piix_ide_realize; + k->exit = pci_piix_ide_exitfn; + k->vendor_id = PCI_VENDOR_ID_INTEL; + k->device_id = PCI_DEVICE_ID_INTEL_82371AB; + k->class_id = PCI_CLASS_STORAGE_IDE; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); + dc->hotpluggable = false; +} + +static const TypeInfo piix4_ide_info = { + .name = "piix4-ide", + .parent = TYPE_PCI_IDE, + .class_init = piix4_ide_class_init, +}; + +static void piix_ide_register_types(void) +{ + type_register_static(&piix3_ide_info); + type_register_static(&piix3_ide_xen_info); + type_register_static(&piix4_ide_info); +} + +type_init(piix_ide_register_types) diff --git a/src/hw/ide/qdev.c b/src/hw/ide/qdev.c new file mode 100644 index 0000000..788b361 --- /dev/null +++ b/src/hw/ide/qdev.c @@ -0,0 +1,367 @@ +/* + * ide bus support for qdev. + * + * Copyright (c) 2009 Gerd Hoffmann <kraxel@redhat.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ +#include <hw/hw.h> +#include "sysemu/dma.h" +#include "qemu/error-report.h" +#include <hw/ide/internal.h> +#include "sysemu/block-backend.h" +#include "sysemu/blockdev.h" +#include "hw/block/block.h" +#include "sysemu/sysemu.h" +#include "qapi/visitor.h" + +/* --------------------------------- */ + +static char *idebus_get_fw_dev_path(DeviceState *dev); + +static Property ide_props[] = { + DEFINE_PROP_UINT32("unit", IDEDevice, unit, -1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ide_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + k->get_fw_dev_path = idebus_get_fw_dev_path; +} + +static const TypeInfo ide_bus_info = { + .name = TYPE_IDE_BUS, + .parent = TYPE_BUS, + .instance_size = sizeof(IDEBus), + .class_init = ide_bus_class_init, +}; + +void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev, + int bus_id, int max_units) +{ + qbus_create_inplace(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL); + idebus->bus_id = bus_id; + idebus->max_units = max_units; +} + +static char *idebus_get_fw_dev_path(DeviceState *dev) +{ + char path[30]; + + snprintf(path, sizeof(path), "%s@%x", qdev_fw_name(dev), + ((IDEBus*)dev->parent_bus)->bus_id); + + return g_strdup(path); +} + +static int ide_qdev_init(DeviceState *qdev) +{ + IDEDevice *dev = IDE_DEVICE(qdev); + IDEDeviceClass *dc = IDE_DEVICE_GET_CLASS(dev); + IDEBus *bus = DO_UPCAST(IDEBus, qbus, qdev->parent_bus); + + if (!dev->conf.blk) { + error_report("No drive specified"); + goto err; + } + if (dev->unit == -1) { + dev->unit = bus->master ? 1 : 0; + } + + if (dev->unit >= bus->max_units) { + error_report("Can't create IDE unit %d, bus supports only %d units", + dev->unit, bus->max_units); + goto err; + } + + switch (dev->unit) { + case 0: + if (bus->master) { + error_report("IDE unit %d is in use", dev->unit); + goto err; + } + bus->master = dev; + break; + case 1: + if (bus->slave) { + error_report("IDE unit %d is in use", dev->unit); + goto err; + } + bus->slave = dev; + break; + default: + error_report("Invalid IDE unit %d", dev->unit); + goto err; + } + return dc->init(dev); + +err: + return -1; +} + +IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive) +{ + DeviceState *dev; + + dev = qdev_create(&bus->qbus, drive->media_cd ? "ide-cd" : "ide-hd"); + qdev_prop_set_uint32(dev, "unit", unit); + qdev_prop_set_drive_nofail(dev, "drive", blk_by_legacy_dinfo(drive)); + qdev_init_nofail(dev); + return DO_UPCAST(IDEDevice, qdev, dev); +} + +int ide_get_geometry(BusState *bus, int unit, + int16_t *cyls, int8_t *heads, int8_t *secs) +{ + IDEState *s = &DO_UPCAST(IDEBus, qbus, bus)->ifs[unit]; + + if (s->drive_kind != IDE_HD || !s->blk) { + return -1; + } + + *cyls = s->cylinders; + *heads = s->heads; + *secs = s->sectors; + return 0; +} + +int ide_get_bios_chs_trans(BusState *bus, int unit) +{ + return DO_UPCAST(IDEBus, qbus, bus)->ifs[unit].chs_trans; +} + +/* --------------------------------- */ + +typedef struct IDEDrive { + IDEDevice dev; +} IDEDrive; + +static int ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind) +{ + IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus); + IDEState *s = bus->ifs + dev->unit; + Error *err = NULL; + + if (dev->conf.discard_granularity == -1) { + dev->conf.discard_granularity = 512; + } else if (dev->conf.discard_granularity && + dev->conf.discard_granularity != 512) { + error_report("discard_granularity must be 512 for ide"); + return -1; + } + + blkconf_blocksizes(&dev->conf); + if (dev->conf.logical_block_size != 512) { + error_report("logical_block_size must be 512 for IDE"); + return -1; + } + + blkconf_serial(&dev->conf, &dev->serial); + if (kind != IDE_CD) { + blkconf_geometry(&dev->conf, &dev->chs_trans, 65536, 16, 255, &err); + if (err) { + error_report_err(err); + return -1; + } + } + + if (ide_init_drive(s, dev->conf.blk, kind, + dev->version, dev->serial, dev->model, dev->wwn, + dev->conf.cyls, dev->conf.heads, dev->conf.secs, + dev->chs_trans) < 0) { + return -1; + } + + if (!dev->version) { + dev->version = g_strdup(s->version); + } + if (!dev->serial) { + dev->serial = g_strdup(s->drive_serial_str); + } + + add_boot_device_path(dev->conf.bootindex, &dev->qdev, + dev->unit ? "/disk@1" : "/disk@0"); + + return 0; +} + +static void ide_dev_get_bootindex(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + IDEDevice *d = IDE_DEVICE(obj); + + visit_type_int32(v, &d->conf.bootindex, name, errp); +} + +static void ide_dev_set_bootindex(Object *obj, Visitor *v, void *opaque, + const char *name, Error **errp) +{ + IDEDevice *d = IDE_DEVICE(obj); + int32_t boot_index; + Error *local_err = NULL; + + visit_type_int32(v, &boot_index, name, &local_err); + if (local_err) { + goto out; + } + /* check whether bootindex is present in fw_boot_order list */ + check_boot_index(boot_index, &local_err); + if (local_err) { + goto out; + } + /* change bootindex to a new one */ + d->conf.bootindex = boot_index; + + if (d->unit != -1) { + add_boot_device_path(d->conf.bootindex, &d->qdev, + d->unit ? "/disk@1" : "/disk@0"); + } +out: + if (local_err) { + error_propagate(errp, local_err); + } +} + +static void ide_dev_instance_init(Object *obj) +{ + object_property_add(obj, "bootindex", "int32", + ide_dev_get_bootindex, + ide_dev_set_bootindex, NULL, NULL, NULL); + object_property_set_int(obj, -1, "bootindex", NULL); +} + +static int ide_hd_initfn(IDEDevice *dev) +{ + return ide_dev_initfn(dev, IDE_HD); +} + +static int ide_cd_initfn(IDEDevice *dev) +{ + return ide_dev_initfn(dev, IDE_CD); +} + +static int ide_drive_initfn(IDEDevice *dev) +{ + DriveInfo *dinfo = blk_legacy_dinfo(dev->conf.blk); + + return ide_dev_initfn(dev, dinfo && dinfo->media_cd ? IDE_CD : IDE_HD); +} + +#define DEFINE_IDE_DEV_PROPERTIES() \ + DEFINE_BLOCK_PROPERTIES(IDEDrive, dev.conf), \ + DEFINE_PROP_STRING("ver", IDEDrive, dev.version), \ + DEFINE_PROP_UINT64("wwn", IDEDrive, dev.wwn, 0), \ + DEFINE_PROP_STRING("serial", IDEDrive, dev.serial),\ + DEFINE_PROP_STRING("model", IDEDrive, dev.model) + +static Property ide_hd_properties[] = { + DEFINE_IDE_DEV_PROPERTIES(), + DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf), + DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans", + IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ide_hd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IDEDeviceClass *k = IDE_DEVICE_CLASS(klass); + k->init = ide_hd_initfn; + dc->fw_name = "drive"; + dc->desc = "virtual IDE disk"; + dc->props = ide_hd_properties; +} + +static const TypeInfo ide_hd_info = { + .name = "ide-hd", + .parent = TYPE_IDE_DEVICE, + .instance_size = sizeof(IDEDrive), + .class_init = ide_hd_class_init, +}; + +static Property ide_cd_properties[] = { + DEFINE_IDE_DEV_PROPERTIES(), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ide_cd_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IDEDeviceClass *k = IDE_DEVICE_CLASS(klass); + k->init = ide_cd_initfn; + dc->fw_name = "drive"; + dc->desc = "virtual IDE CD-ROM"; + dc->props = ide_cd_properties; +} + +static const TypeInfo ide_cd_info = { + .name = "ide-cd", + .parent = TYPE_IDE_DEVICE, + .instance_size = sizeof(IDEDrive), + .class_init = ide_cd_class_init, +}; + +static Property ide_drive_properties[] = { + DEFINE_IDE_DEV_PROPERTIES(), + DEFINE_PROP_END_OF_LIST(), +}; + +static void ide_drive_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + IDEDeviceClass *k = IDE_DEVICE_CLASS(klass); + k->init = ide_drive_initfn; + dc->fw_name = "drive"; + dc->desc = "virtual IDE disk or CD-ROM (legacy)"; + dc->props = ide_drive_properties; +} + +static const TypeInfo ide_drive_info = { + .name = "ide-drive", + .parent = TYPE_IDE_DEVICE, + .instance_size = sizeof(IDEDrive), + .class_init = ide_drive_class_init, +}; + +static void ide_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *k = DEVICE_CLASS(klass); + k->init = ide_qdev_init; + set_bit(DEVICE_CATEGORY_STORAGE, k->categories); + k->bus_type = TYPE_IDE_BUS; + k->props = ide_props; +} + +static const TypeInfo ide_device_type_info = { + .name = TYPE_IDE_DEVICE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(IDEDevice), + .abstract = true, + .class_size = sizeof(IDEDeviceClass), + .class_init = ide_device_class_init, + .instance_init = ide_dev_instance_init, +}; + +static void ide_register_types(void) +{ + type_register_static(&ide_bus_info); + type_register_static(&ide_hd_info); + type_register_static(&ide_cd_info); + type_register_static(&ide_drive_info); + type_register_static(&ide_device_type_info); +} + +type_init(ide_register_types) diff --git a/src/hw/ide/via.c b/src/hw/ide/via.c new file mode 100644 index 0000000..e2da9ef --- /dev/null +++ b/src/hw/ide/via.c @@ -0,0 +1,235 @@ +/* + * QEMU IDE Emulation: PCI VIA82C686B support. + * + * Copyright (c) 2003 Fabrice Bellard + * Copyright (c) 2006 Openedhand Ltd. + * Copyright (c) 2010 Huacai Chen <zltjiangshi@gmail.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include <hw/hw.h> +#include <hw/i386/pc.h> +#include <hw/pci/pci.h> +#include <hw/isa/isa.h> +#include "sysemu/block-backend.h" +#include "sysemu/sysemu.h" +#include "sysemu/dma.h" + +#include <hw/ide/pci.h> + +static uint64_t bmdma_read(void *opaque, hwaddr addr, + unsigned size) +{ + BMDMAState *bm = opaque; + uint32_t val; + + if (size != 1) { + return ((uint64_t)1 << (size * 8)) - 1; + } + + switch (addr & 3) { + case 0: + val = bm->cmd; + break; + case 2: + val = bm->status; + break; + default: + val = 0xff; + break; + } +#ifdef DEBUG_IDE + printf("bmdma: readb 0x%02x : 0x%02x\n", addr, val); +#endif + return val; +} + +static void bmdma_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + BMDMAState *bm = opaque; + + if (size != 1) { + return; + } + +#ifdef DEBUG_IDE + printf("bmdma: writeb 0x%02x : 0x%02x\n", addr, val); +#endif + switch (addr & 3) { + case 0: + bmdma_cmd_writeb(bm, val); + break; + case 2: + bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06); + break; + default:; + } +} + +static const MemoryRegionOps via_bmdma_ops = { + .read = bmdma_read, + .write = bmdma_write, +}; + +static void bmdma_setup_bar(PCIIDEState *d) +{ + int i; + + memory_region_init(&d->bmdma_bar, OBJECT(d), "via-bmdma-container", 16); + for(i = 0;i < 2; i++) { + BMDMAState *bm = &d->bmdma[i]; + + memory_region_init_io(&bm->extra_io, OBJECT(d), &via_bmdma_ops, bm, + "via-bmdma", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8, &bm->extra_io); + memory_region_init_io(&bm->addr_ioport, OBJECT(d), + &bmdma_addr_ioport_ops, bm, "bmdma", 4); + memory_region_add_subregion(&d->bmdma_bar, i * 8 + 4, &bm->addr_ioport); + } +} + +static void via_reset(void *opaque) +{ + PCIIDEState *d = opaque; + PCIDevice *pd = PCI_DEVICE(d); + uint8_t *pci_conf = pd->config; + int i; + + for (i = 0; i < 2; i++) { + ide_bus_reset(&d->bus[i]); + } + + pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_WAIT); + pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_FAST_BACK | + PCI_STATUS_DEVSEL_MEDIUM); + + pci_set_long(pci_conf + PCI_BASE_ADDRESS_0, 0x000001f0); + pci_set_long(pci_conf + PCI_BASE_ADDRESS_1, 0x000003f4); + pci_set_long(pci_conf + PCI_BASE_ADDRESS_2, 0x00000170); + pci_set_long(pci_conf + PCI_BASE_ADDRESS_3, 0x00000374); + pci_set_long(pci_conf + PCI_BASE_ADDRESS_4, 0x0000cc01); /* BMIBA: 20-23h */ + pci_set_long(pci_conf + PCI_INTERRUPT_LINE, 0x0000010e); + + /* IDE chip enable, IDE configuration 1/2, IDE FIFO Configuration*/ + pci_set_long(pci_conf + 0x40, 0x0a090600); + /* IDE misc configuration 1/2/3 */ + pci_set_long(pci_conf + 0x44, 0x00c00068); + /* IDE Timing control */ + pci_set_long(pci_conf + 0x48, 0xa8a8a8a8); + /* IDE Address Setup Time */ + pci_set_long(pci_conf + 0x4c, 0x000000ff); + /* UltraDMA Extended Timing Control*/ + pci_set_long(pci_conf + 0x50, 0x07070707); + /* UltraDMA FIFO Control */ + pci_set_long(pci_conf + 0x54, 0x00000004); + /* IDE primary sector size */ + pci_set_long(pci_conf + 0x60, 0x00000200); + /* IDE secondary sector size */ + pci_set_long(pci_conf + 0x68, 0x00000200); + /* PCI PM Block */ + pci_set_long(pci_conf + 0xc0, 0x00020001); +} + +static void vt82c686b_init_ports(PCIIDEState *d) { + static const struct { + int iobase; + int iobase2; + int isairq; + } port_info[] = { + {0x1f0, 0x3f6, 14}, + {0x170, 0x376, 15}, + }; + int i; + + for (i = 0; i < 2; i++) { + ide_bus_new(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2); + ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase, + port_info[i].iobase2); + ide_init2(&d->bus[i], isa_get_irq(NULL, port_info[i].isairq)); + + bmdma_init(&d->bus[i], &d->bmdma[i], d); + d->bmdma[i].bus = &d->bus[i]; + ide_register_restart_cb(&d->bus[i]); + } +} + +/* via ide func */ +static void vt82c686b_ide_realize(PCIDevice *dev, Error **errp) +{ + PCIIDEState *d = PCI_IDE(dev); + uint8_t *pci_conf = dev->config; + + pci_config_set_prog_interface(pci_conf, 0x8a); /* legacy ATA mode */ + pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x000000c0); + + qemu_register_reset(via_reset, d); + bmdma_setup_bar(d); + pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar); + + vmstate_register(DEVICE(dev), 0, &vmstate_ide_pci, d); + + vt82c686b_init_ports(d); +} + +static void vt82c686b_ide_exitfn(PCIDevice *dev) +{ + PCIIDEState *d = PCI_IDE(dev); + unsigned i; + + for (i = 0; i < 2; ++i) { + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].extra_io); + memory_region_del_subregion(&d->bmdma_bar, &d->bmdma[i].addr_ioport); + } +} + +void vt82c686b_ide_init(PCIBus *bus, DriveInfo **hd_table, int devfn) +{ + PCIDevice *dev; + + dev = pci_create_simple(bus, devfn, "via-ide"); + pci_ide_create_devs(dev, hd_table); +} + +static void via_ide_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + + k->realize = vt82c686b_ide_realize; + k->exit = vt82c686b_ide_exitfn; + k->vendor_id = PCI_VENDOR_ID_VIA; + k->device_id = PCI_DEVICE_ID_VIA_IDE; + k->revision = 0x06; + k->class_id = PCI_CLASS_STORAGE_IDE; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static const TypeInfo via_ide_info = { + .name = "via-ide", + .parent = TYPE_PCI_IDE, + .class_init = via_ide_class_init, +}; + +static void via_ide_register_types(void) +{ + type_register_static(&via_ide_info); +} + +type_init(via_ide_register_types) |