summaryrefslogtreecommitdiffstats
path: root/src/hw/ppc
diff options
context:
space:
mode:
Diffstat (limited to 'src/hw/ppc')
-rw-r--r--src/hw/ppc/Makefile.objs23
-rw-r--r--src/hw/ppc/e500-ccsr.h17
-rw-r--r--src/hw/ppc/e500.c1081
-rw-r--r--src/hw/ppc/e500.h29
-rw-r--r--src/hw/ppc/e500plat.c68
-rw-r--r--src/hw/ppc/mac.h186
-rw-r--r--src/hw/ppc/mac_newworld.c530
-rw-r--r--src/hw/ppc/mac_oldworld.c373
-rw-r--r--src/hw/ppc/mpc8544_guts.c140
-rw-r--r--src/hw/ppc/mpc8544ds.c60
-rw-r--r--src/hw/ppc/ppc.c1339
-rw-r--r--src/hw/ppc/ppc405.h81
-rw-r--r--src/hw/ppc/ppc405_boards.c660
-rw-r--r--src/hw/ppc/ppc405_uc.c2551
-rw-r--r--src/hw/ppc/ppc440_bamboo.c299
-rw-r--r--src/hw/ppc/ppc4xx_devs.c734
-rw-r--r--src/hw/ppc/ppc4xx_pci.c392
-rw-r--r--src/hw/ppc/ppc_booke.c365
-rw-r--r--src/hw/ppc/ppce500_spin.c224
-rw-r--r--src/hw/ppc/prep.c674
-rw-r--r--src/hw/ppc/spapr.c2464
-rw-r--r--src/hw/ppc/spapr_drc.c802
-rw-r--r--src/hw/ppc/spapr_events.c596
-rw-r--r--src/hw/ppc/spapr_hcall.c1058
-rw-r--r--src/hw/ppc/spapr_iommu.c511
-rw-r--r--src/hw/ppc/spapr_pci.c1843
-rw-r--r--src/hw/ppc/spapr_pci_vfio.c280
-rw-r--r--src/hw/ppc/spapr_rng.c186
-rw-r--r--src/hw/ppc/spapr_rtas.c769
-rw-r--r--src/hw/ppc/spapr_rtc.c211
-rw-r--r--src/hw/ppc/spapr_vio.c705
-rw-r--r--src/hw/ppc/virtex_ml507.c306
32 files changed, 19557 insertions, 0 deletions
diff --git a/src/hw/ppc/Makefile.objs b/src/hw/ppc/Makefile.objs
new file mode 100644
index 0000000..c1ffc77
--- /dev/null
+++ b/src/hw/ppc/Makefile.objs
@@ -0,0 +1,23 @@
+# shared objects
+obj-y += ppc.o ppc_booke.o
+# IBM pSeries (sPAPR)
+obj-$(CONFIG_PSERIES) += spapr.o spapr_vio.o spapr_events.o
+obj-$(CONFIG_PSERIES) += spapr_hcall.o spapr_iommu.o spapr_rtas.o
+obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o spapr_drc.o spapr_rng.o
+ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy)
+obj-y += spapr_pci_vfio.o
+endif
+# PowerPC 4xx boards
+obj-y += ppc405_boards.o ppc4xx_devs.o ppc405_uc.o ppc440_bamboo.o
+obj-y += ppc4xx_pci.o
+# PReP
+obj-$(CONFIG_PREP) += prep.o
+# OldWorld PowerMac
+obj-$(CONFIG_MAC) += mac_oldworld.o
+# NewWorld PowerMac
+obj-$(CONFIG_MAC) += mac_newworld.o
+# e500
+obj-$(CONFIG_E500) += e500.o mpc8544ds.o e500plat.o
+obj-$(CONFIG_E500) += mpc8544_guts.o ppce500_spin.o
+# PowerPC 440 Xilinx ML507 reference board.
+obj-$(CONFIG_XILINX) += virtex_ml507.o
diff --git a/src/hw/ppc/e500-ccsr.h b/src/hw/ppc/e500-ccsr.h
new file mode 100644
index 0000000..12a2ba4
--- /dev/null
+++ b/src/hw/ppc/e500-ccsr.h
@@ -0,0 +1,17 @@
+#ifndef E500_CCSR_H
+#define E500_CCSR_H
+
+#include "hw/sysbus.h"
+
+typedef struct PPCE500CCSRState {
+ /*< private >*/
+ SysBusDevice parent;
+ /*< public >*/
+
+ MemoryRegion ccsr_space;
+} PPCE500CCSRState;
+
+#define TYPE_CCSR "e500-ccsr"
+#define CCSR(obj) OBJECT_CHECK(PPCE500CCSRState, (obj), TYPE_CCSR)
+
+#endif /* E500_CCSR_H */
diff --git a/src/hw/ppc/e500.c b/src/hw/ppc/e500.c
new file mode 100644
index 0000000..b3418db
--- /dev/null
+++ b/src/hw/ppc/e500.c
@@ -0,0 +1,1081 @@
+/*
+ * QEMU PowerPC e500-based platforms
+ *
+ * Copyright (C) 2009 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Yu Liu, <yu.liu@freescale.com>
+ *
+ * This file is derived from hw/ppc440_bamboo.c,
+ * the copyright for that material belongs to the original owners.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "config.h"
+#include "qemu-common.h"
+#include "e500.h"
+#include "e500-ccsr.h"
+#include "net/net.h"
+#include "qemu/config-file.h"
+#include "hw/hw.h"
+#include "hw/char/serial.h"
+#include "hw/pci/pci.h"
+#include "hw/boards.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "sysemu/device_tree.h"
+#include "hw/ppc/openpic.h"
+#include "hw/ppc/ppc.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "hw/sysbus.h"
+#include "exec/address-spaces.h"
+#include "qemu/host-utils.h"
+#include "hw/pci-host/ppce500.h"
+#include "qemu/error-report.h"
+#include "hw/platform-bus.h"
+#include "hw/net/fsl_etsec/etsec.h"
+
+#define EPAPR_MAGIC (0x45504150)
+#define BINARY_DEVICE_TREE_FILE "mpc8544ds.dtb"
+#define DTC_LOAD_PAD 0x1800000
+#define DTC_PAD_MASK 0xFFFFF
+#define DTB_MAX_SIZE (8 * 1024 * 1024)
+#define INITRD_LOAD_PAD 0x2000000
+#define INITRD_PAD_MASK 0xFFFFFF
+
+#define RAM_SIZES_ALIGN (64UL << 20)
+
+/* TODO: parameterize */
+#define MPC8544_CCSRBAR_SIZE 0x00100000ULL
+#define MPC8544_MPIC_REGS_OFFSET 0x40000ULL
+#define MPC8544_MSI_REGS_OFFSET 0x41600ULL
+#define MPC8544_SERIAL0_REGS_OFFSET 0x4500ULL
+#define MPC8544_SERIAL1_REGS_OFFSET 0x4600ULL
+#define MPC8544_PCI_REGS_OFFSET 0x8000ULL
+#define MPC8544_PCI_REGS_SIZE 0x1000ULL
+#define MPC8544_UTIL_OFFSET 0xe0000ULL
+#define MPC8XXX_GPIO_OFFSET 0x000FF000ULL
+#define MPC8XXX_GPIO_IRQ 47
+
+struct boot_info
+{
+ uint32_t dt_base;
+ uint32_t dt_size;
+ uint32_t entry;
+};
+
+static uint32_t *pci_map_create(void *fdt, uint32_t mpic, int first_slot,
+ int nr_slots, int *len)
+{
+ int i = 0;
+ int slot;
+ int pci_irq;
+ int host_irq;
+ int last_slot = first_slot + nr_slots;
+ uint32_t *pci_map;
+
+ *len = nr_slots * 4 * 7 * sizeof(uint32_t);
+ pci_map = g_malloc(*len);
+
+ for (slot = first_slot; slot < last_slot; slot++) {
+ for (pci_irq = 0; pci_irq < 4; pci_irq++) {
+ pci_map[i++] = cpu_to_be32(slot << 11);
+ pci_map[i++] = cpu_to_be32(0x0);
+ pci_map[i++] = cpu_to_be32(0x0);
+ pci_map[i++] = cpu_to_be32(pci_irq + 1);
+ pci_map[i++] = cpu_to_be32(mpic);
+ host_irq = ppce500_pci_map_irq_slot(slot, pci_irq);
+ pci_map[i++] = cpu_to_be32(host_irq + 1);
+ pci_map[i++] = cpu_to_be32(0x1);
+ }
+ }
+
+ assert((i * sizeof(uint32_t)) == *len);
+
+ return pci_map;
+}
+
+static void dt_serial_create(void *fdt, unsigned long long offset,
+ const char *soc, const char *mpic,
+ const char *alias, int idx, bool defcon)
+{
+ char ser[128];
+
+ snprintf(ser, sizeof(ser), "%s/serial@%llx", soc, offset);
+ qemu_fdt_add_subnode(fdt, ser);
+ qemu_fdt_setprop_string(fdt, ser, "device_type", "serial");
+ qemu_fdt_setprop_string(fdt, ser, "compatible", "ns16550");
+ qemu_fdt_setprop_cells(fdt, ser, "reg", offset, 0x100);
+ qemu_fdt_setprop_cell(fdt, ser, "cell-index", idx);
+ qemu_fdt_setprop_cell(fdt, ser, "clock-frequency", 0);
+ qemu_fdt_setprop_cells(fdt, ser, "interrupts", 42, 2);
+ qemu_fdt_setprop_phandle(fdt, ser, "interrupt-parent", mpic);
+ qemu_fdt_setprop_string(fdt, "/aliases", alias, ser);
+
+ if (defcon) {
+ qemu_fdt_setprop_string(fdt, "/chosen", "linux,stdout-path", ser);
+ }
+}
+
+static void create_dt_mpc8xxx_gpio(void *fdt, const char *soc, const char *mpic)
+{
+ hwaddr mmio0 = MPC8XXX_GPIO_OFFSET;
+ int irq0 = MPC8XXX_GPIO_IRQ;
+ gchar *node = g_strdup_printf("%s/gpio@%"PRIx64, soc, mmio0);
+ gchar *poweroff = g_strdup_printf("%s/power-off", soc);
+ int gpio_ph;
+
+ qemu_fdt_add_subnode(fdt, node);
+ qemu_fdt_setprop_string(fdt, node, "compatible", "fsl,qoriq-gpio");
+ qemu_fdt_setprop_cells(fdt, node, "reg", mmio0, 0x1000);
+ qemu_fdt_setprop_cells(fdt, node, "interrupts", irq0, 0x2);
+ qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", mpic);
+ qemu_fdt_setprop_cells(fdt, node, "#gpio-cells", 2);
+ qemu_fdt_setprop(fdt, node, "gpio-controller", NULL, 0);
+ gpio_ph = qemu_fdt_alloc_phandle(fdt);
+ qemu_fdt_setprop_cell(fdt, node, "phandle", gpio_ph);
+ qemu_fdt_setprop_cell(fdt, node, "linux,phandle", gpio_ph);
+
+ /* Power Off Pin */
+ qemu_fdt_add_subnode(fdt, poweroff);
+ qemu_fdt_setprop_string(fdt, poweroff, "compatible", "gpio-poweroff");
+ qemu_fdt_setprop_cells(fdt, poweroff, "gpios", gpio_ph, 0, 0);
+
+ g_free(node);
+ g_free(poweroff);
+}
+
+typedef struct PlatformDevtreeData {
+ void *fdt;
+ const char *mpic;
+ int irq_start;
+ const char *node;
+ PlatformBusDevice *pbus;
+} PlatformDevtreeData;
+
+static int create_devtree_etsec(SysBusDevice *sbdev, PlatformDevtreeData *data)
+{
+ eTSEC *etsec = ETSEC_COMMON(sbdev);
+ PlatformBusDevice *pbus = data->pbus;
+ hwaddr mmio0 = platform_bus_get_mmio_addr(pbus, sbdev, 0);
+ int irq0 = platform_bus_get_irqn(pbus, sbdev, 0);
+ int irq1 = platform_bus_get_irqn(pbus, sbdev, 1);
+ int irq2 = platform_bus_get_irqn(pbus, sbdev, 2);
+ gchar *node = g_strdup_printf("/platform/ethernet@%"PRIx64, mmio0);
+ gchar *group = g_strdup_printf("%s/queue-group", node);
+ void *fdt = data->fdt;
+
+ assert((int64_t)mmio0 >= 0);
+ assert(irq0 >= 0);
+ assert(irq1 >= 0);
+ assert(irq2 >= 0);
+
+ qemu_fdt_add_subnode(fdt, node);
+ qemu_fdt_setprop_string(fdt, node, "device_type", "network");
+ qemu_fdt_setprop_string(fdt, node, "compatible", "fsl,etsec2");
+ qemu_fdt_setprop_string(fdt, node, "model", "eTSEC");
+ qemu_fdt_setprop(fdt, node, "local-mac-address", etsec->conf.macaddr.a, 6);
+ qemu_fdt_setprop_cells(fdt, node, "fixed-link", 0, 1, 1000, 0, 0);
+
+ qemu_fdt_add_subnode(fdt, group);
+ qemu_fdt_setprop_cells(fdt, group, "reg", mmio0, 0x1000);
+ qemu_fdt_setprop_cells(fdt, group, "interrupts",
+ data->irq_start + irq0, 0x2,
+ data->irq_start + irq1, 0x2,
+ data->irq_start + irq2, 0x2);
+
+ g_free(node);
+ g_free(group);
+
+ return 0;
+}
+
+static int sysbus_device_create_devtree(SysBusDevice *sbdev, void *opaque)
+{
+ PlatformDevtreeData *data = opaque;
+ bool matched = false;
+
+ if (object_dynamic_cast(OBJECT(sbdev), TYPE_ETSEC_COMMON)) {
+ create_devtree_etsec(sbdev, data);
+ matched = true;
+ }
+
+ if (!matched) {
+ error_report("Device %s is not supported by this machine yet.",
+ qdev_fw_name(DEVICE(sbdev)));
+ exit(1);
+ }
+
+ return 0;
+}
+
+static void platform_bus_create_devtree(PPCE500Params *params, void *fdt,
+ const char *mpic)
+{
+ gchar *node = g_strdup_printf("/platform@%"PRIx64, params->platform_bus_base);
+ const char platcomp[] = "qemu,platform\0simple-bus";
+ uint64_t addr = params->platform_bus_base;
+ uint64_t size = params->platform_bus_size;
+ int irq_start = params->platform_bus_first_irq;
+ PlatformBusDevice *pbus;
+ DeviceState *dev;
+
+ /* Create a /platform node that we can put all devices into */
+
+ qemu_fdt_add_subnode(fdt, node);
+ qemu_fdt_setprop(fdt, node, "compatible", platcomp, sizeof(platcomp));
+
+ /* Our platform bus region is less than 32bit big, so 1 cell is enough for
+ address and size */
+ qemu_fdt_setprop_cells(fdt, node, "#size-cells", 1);
+ qemu_fdt_setprop_cells(fdt, node, "#address-cells", 1);
+ qemu_fdt_setprop_cells(fdt, node, "ranges", 0, addr >> 32, addr, size);
+
+ qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", mpic);
+
+ dev = qdev_find_recursive(sysbus_get_default(), TYPE_PLATFORM_BUS_DEVICE);
+ pbus = PLATFORM_BUS_DEVICE(dev);
+
+ /* We can only create dt nodes for dynamic devices when they're ready */
+ if (pbus->done_gathering) {
+ PlatformDevtreeData data = {
+ .fdt = fdt,
+ .mpic = mpic,
+ .irq_start = irq_start,
+ .node = node,
+ .pbus = pbus,
+ };
+
+ /* Loop through all dynamic sysbus devices and create nodes for them */
+ foreach_dynamic_sysbus_device(sysbus_device_create_devtree, &data);
+ }
+
+ g_free(node);
+}
+
+static int ppce500_load_device_tree(MachineState *machine,
+ PPCE500Params *params,
+ hwaddr addr,
+ hwaddr initrd_base,
+ hwaddr initrd_size,
+ hwaddr kernel_base,
+ hwaddr kernel_size,
+ bool dry_run)
+{
+ CPUPPCState *env = first_cpu->env_ptr;
+ int ret = -1;
+ uint64_t mem_reg_property[] = { 0, cpu_to_be64(machine->ram_size) };
+ int fdt_size;
+ void *fdt;
+ uint8_t hypercall[16];
+ uint32_t clock_freq = 400000000;
+ uint32_t tb_freq = 400000000;
+ int i;
+ char compatible_sb[] = "fsl,mpc8544-immr\0simple-bus";
+ char soc[128];
+ char mpic[128];
+ uint32_t mpic_ph;
+ uint32_t msi_ph;
+ char gutil[128];
+ char pci[128];
+ char msi[128];
+ uint32_t *pci_map = NULL;
+ int len;
+ uint32_t pci_ranges[14] =
+ {
+ 0x2000000, 0x0, params->pci_mmio_bus_base,
+ params->pci_mmio_base >> 32, params->pci_mmio_base,
+ 0x0, 0x20000000,
+
+ 0x1000000, 0x0, 0x0,
+ params->pci_pio_base >> 32, params->pci_pio_base,
+ 0x0, 0x10000,
+ };
+ QemuOpts *machine_opts = qemu_get_machine_opts();
+ const char *dtb_file = qemu_opt_get(machine_opts, "dtb");
+ const char *toplevel_compat = qemu_opt_get(machine_opts, "dt_compatible");
+
+ if (dtb_file) {
+ char *filename;
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_file);
+ if (!filename) {
+ goto out;
+ }
+
+ fdt = load_device_tree(filename, &fdt_size);
+ g_free(filename);
+ if (!fdt) {
+ goto out;
+ }
+ goto done;
+ }
+
+ fdt = create_device_tree(&fdt_size);
+ if (fdt == NULL) {
+ goto out;
+ }
+
+ /* Manipulate device tree in memory. */
+ qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 2);
+ qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 2);
+
+ qemu_fdt_add_subnode(fdt, "/memory");
+ qemu_fdt_setprop_string(fdt, "/memory", "device_type", "memory");
+ qemu_fdt_setprop(fdt, "/memory", "reg", mem_reg_property,
+ sizeof(mem_reg_property));
+
+ qemu_fdt_add_subnode(fdt, "/chosen");
+ if (initrd_size) {
+ ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start",
+ initrd_base);
+ if (ret < 0) {
+ fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n");
+ }
+
+ ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end",
+ (initrd_base + initrd_size));
+ if (ret < 0) {
+ fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n");
+ }
+
+ }
+
+ if (kernel_base != -1ULL) {
+ qemu_fdt_setprop_cells(fdt, "/chosen", "qemu,boot-kernel",
+ kernel_base >> 32, kernel_base,
+ kernel_size >> 32, kernel_size);
+ }
+
+ ret = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
+ machine->kernel_cmdline);
+ if (ret < 0)
+ fprintf(stderr, "couldn't set /chosen/bootargs\n");
+
+ if (kvm_enabled()) {
+ /* Read out host's frequencies */
+ clock_freq = kvmppc_get_clockfreq();
+ tb_freq = kvmppc_get_tbfreq();
+
+ /* indicate KVM hypercall interface */
+ qemu_fdt_add_subnode(fdt, "/hypervisor");
+ qemu_fdt_setprop_string(fdt, "/hypervisor", "compatible",
+ "linux,kvm");
+ kvmppc_get_hypercall(env, hypercall, sizeof(hypercall));
+ qemu_fdt_setprop(fdt, "/hypervisor", "hcall-instructions",
+ hypercall, sizeof(hypercall));
+ /* if KVM supports the idle hcall, set property indicating this */
+ if (kvmppc_get_hasidle(env)) {
+ qemu_fdt_setprop(fdt, "/hypervisor", "has-idle", NULL, 0);
+ }
+ }
+
+ /* Create CPU nodes */
+ qemu_fdt_add_subnode(fdt, "/cpus");
+ qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 1);
+ qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0);
+
+ /* We need to generate the cpu nodes in reverse order, so Linux can pick
+ the first node as boot node and be happy */
+ for (i = smp_cpus - 1; i >= 0; i--) {
+ CPUState *cpu;
+ PowerPCCPU *pcpu;
+ char cpu_name[128];
+ uint64_t cpu_release_addr = params->spin_base + (i * 0x20);
+
+ cpu = qemu_get_cpu(i);
+ if (cpu == NULL) {
+ continue;
+ }
+ env = cpu->env_ptr;
+ pcpu = POWERPC_CPU(cpu);
+
+ snprintf(cpu_name, sizeof(cpu_name), "/cpus/PowerPC,8544@%x",
+ ppc_get_vcpu_dt_id(pcpu));
+ qemu_fdt_add_subnode(fdt, cpu_name);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "clock-frequency", clock_freq);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "timebase-frequency", tb_freq);
+ qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
+ qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
+ ppc_get_vcpu_dt_id(pcpu));
+ qemu_fdt_setprop_cell(fdt, cpu_name, "d-cache-line-size",
+ env->dcache_line_size);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "i-cache-line-size",
+ env->icache_line_size);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "d-cache-size", 0x8000);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "i-cache-size", 0x8000);
+ qemu_fdt_setprop_cell(fdt, cpu_name, "bus-frequency", 0);
+ if (cpu->cpu_index) {
+ qemu_fdt_setprop_string(fdt, cpu_name, "status", "disabled");
+ qemu_fdt_setprop_string(fdt, cpu_name, "enable-method",
+ "spin-table");
+ qemu_fdt_setprop_u64(fdt, cpu_name, "cpu-release-addr",
+ cpu_release_addr);
+ } else {
+ qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
+ }
+ }
+
+ qemu_fdt_add_subnode(fdt, "/aliases");
+ /* XXX These should go into their respective devices' code */
+ snprintf(soc, sizeof(soc), "/soc@%"PRIx64, params->ccsrbar_base);
+ qemu_fdt_add_subnode(fdt, soc);
+ qemu_fdt_setprop_string(fdt, soc, "device_type", "soc");
+ qemu_fdt_setprop(fdt, soc, "compatible", compatible_sb,
+ sizeof(compatible_sb));
+ qemu_fdt_setprop_cell(fdt, soc, "#address-cells", 1);
+ qemu_fdt_setprop_cell(fdt, soc, "#size-cells", 1);
+ qemu_fdt_setprop_cells(fdt, soc, "ranges", 0x0,
+ params->ccsrbar_base >> 32, params->ccsrbar_base,
+ MPC8544_CCSRBAR_SIZE);
+ /* XXX should contain a reasonable value */
+ qemu_fdt_setprop_cell(fdt, soc, "bus-frequency", 0);
+
+ snprintf(mpic, sizeof(mpic), "%s/pic@%llx", soc, MPC8544_MPIC_REGS_OFFSET);
+ qemu_fdt_add_subnode(fdt, mpic);
+ qemu_fdt_setprop_string(fdt, mpic, "device_type", "open-pic");
+ qemu_fdt_setprop_string(fdt, mpic, "compatible", "fsl,mpic");
+ qemu_fdt_setprop_cells(fdt, mpic, "reg", MPC8544_MPIC_REGS_OFFSET,
+ 0x40000);
+ qemu_fdt_setprop_cell(fdt, mpic, "#address-cells", 0);
+ qemu_fdt_setprop_cell(fdt, mpic, "#interrupt-cells", 2);
+ mpic_ph = qemu_fdt_alloc_phandle(fdt);
+ qemu_fdt_setprop_cell(fdt, mpic, "phandle", mpic_ph);
+ qemu_fdt_setprop_cell(fdt, mpic, "linux,phandle", mpic_ph);
+ qemu_fdt_setprop(fdt, mpic, "interrupt-controller", NULL, 0);
+
+ /*
+ * We have to generate ser1 first, because Linux takes the first
+ * device it finds in the dt as serial output device. And we generate
+ * devices in reverse order to the dt.
+ */
+ if (serial_hds[1]) {
+ dt_serial_create(fdt, MPC8544_SERIAL1_REGS_OFFSET,
+ soc, mpic, "serial1", 1, false);
+ }
+
+ if (serial_hds[0]) {
+ dt_serial_create(fdt, MPC8544_SERIAL0_REGS_OFFSET,
+ soc, mpic, "serial0", 0, true);
+ }
+
+ snprintf(gutil, sizeof(gutil), "%s/global-utilities@%llx", soc,
+ MPC8544_UTIL_OFFSET);
+ qemu_fdt_add_subnode(fdt, gutil);
+ qemu_fdt_setprop_string(fdt, gutil, "compatible", "fsl,mpc8544-guts");
+ qemu_fdt_setprop_cells(fdt, gutil, "reg", MPC8544_UTIL_OFFSET, 0x1000);
+ qemu_fdt_setprop(fdt, gutil, "fsl,has-rstcr", NULL, 0);
+
+ snprintf(msi, sizeof(msi), "/%s/msi@%llx", soc, MPC8544_MSI_REGS_OFFSET);
+ qemu_fdt_add_subnode(fdt, msi);
+ qemu_fdt_setprop_string(fdt, msi, "compatible", "fsl,mpic-msi");
+ qemu_fdt_setprop_cells(fdt, msi, "reg", MPC8544_MSI_REGS_OFFSET, 0x200);
+ msi_ph = qemu_fdt_alloc_phandle(fdt);
+ qemu_fdt_setprop_cells(fdt, msi, "msi-available-ranges", 0x0, 0x100);
+ qemu_fdt_setprop_phandle(fdt, msi, "interrupt-parent", mpic);
+ qemu_fdt_setprop_cells(fdt, msi, "interrupts",
+ 0xe0, 0x0,
+ 0xe1, 0x0,
+ 0xe2, 0x0,
+ 0xe3, 0x0,
+ 0xe4, 0x0,
+ 0xe5, 0x0,
+ 0xe6, 0x0,
+ 0xe7, 0x0);
+ qemu_fdt_setprop_cell(fdt, msi, "phandle", msi_ph);
+ qemu_fdt_setprop_cell(fdt, msi, "linux,phandle", msi_ph);
+
+ snprintf(pci, sizeof(pci), "/pci@%llx",
+ params->ccsrbar_base + MPC8544_PCI_REGS_OFFSET);
+ qemu_fdt_add_subnode(fdt, pci);
+ qemu_fdt_setprop_cell(fdt, pci, "cell-index", 0);
+ qemu_fdt_setprop_string(fdt, pci, "compatible", "fsl,mpc8540-pci");
+ qemu_fdt_setprop_string(fdt, pci, "device_type", "pci");
+ qemu_fdt_setprop_cells(fdt, pci, "interrupt-map-mask", 0xf800, 0x0,
+ 0x0, 0x7);
+ pci_map = pci_map_create(fdt, qemu_fdt_get_phandle(fdt, mpic),
+ params->pci_first_slot, params->pci_nr_slots,
+ &len);
+ qemu_fdt_setprop(fdt, pci, "interrupt-map", pci_map, len);
+ qemu_fdt_setprop_phandle(fdt, pci, "interrupt-parent", mpic);
+ qemu_fdt_setprop_cells(fdt, pci, "interrupts", 24, 2);
+ qemu_fdt_setprop_cells(fdt, pci, "bus-range", 0, 255);
+ for (i = 0; i < 14; i++) {
+ pci_ranges[i] = cpu_to_be32(pci_ranges[i]);
+ }
+ qemu_fdt_setprop_cell(fdt, pci, "fsl,msi", msi_ph);
+ qemu_fdt_setprop(fdt, pci, "ranges", pci_ranges, sizeof(pci_ranges));
+ qemu_fdt_setprop_cells(fdt, pci, "reg",
+ (params->ccsrbar_base + MPC8544_PCI_REGS_OFFSET) >> 32,
+ (params->ccsrbar_base + MPC8544_PCI_REGS_OFFSET),
+ 0, 0x1000);
+ qemu_fdt_setprop_cell(fdt, pci, "clock-frequency", 66666666);
+ qemu_fdt_setprop_cell(fdt, pci, "#interrupt-cells", 1);
+ qemu_fdt_setprop_cell(fdt, pci, "#size-cells", 2);
+ qemu_fdt_setprop_cell(fdt, pci, "#address-cells", 3);
+ qemu_fdt_setprop_string(fdt, "/aliases", "pci0", pci);
+
+ if (params->has_mpc8xxx_gpio) {
+ create_dt_mpc8xxx_gpio(fdt, soc, mpic);
+ }
+
+ if (params->has_platform_bus) {
+ platform_bus_create_devtree(params, fdt, mpic);
+ }
+
+ params->fixup_devtree(params, fdt);
+
+ if (toplevel_compat) {
+ qemu_fdt_setprop(fdt, "/", "compatible", toplevel_compat,
+ strlen(toplevel_compat) + 1);
+ }
+
+done:
+ if (!dry_run) {
+ qemu_fdt_dumpdtb(fdt, fdt_size);
+ cpu_physical_memory_write(addr, fdt, fdt_size);
+ }
+ ret = fdt_size;
+
+out:
+ g_free(pci_map);
+
+ return ret;
+}
+
+typedef struct DeviceTreeParams {
+ MachineState *machine;
+ PPCE500Params params;
+ hwaddr addr;
+ hwaddr initrd_base;
+ hwaddr initrd_size;
+ hwaddr kernel_base;
+ hwaddr kernel_size;
+ Notifier notifier;
+} DeviceTreeParams;
+
+static void ppce500_reset_device_tree(void *opaque)
+{
+ DeviceTreeParams *p = opaque;
+ ppce500_load_device_tree(p->machine, &p->params, p->addr, p->initrd_base,
+ p->initrd_size, p->kernel_base, p->kernel_size,
+ false);
+}
+
+static void ppce500_init_notify(Notifier *notifier, void *data)
+{
+ DeviceTreeParams *p = container_of(notifier, DeviceTreeParams, notifier);
+ ppce500_reset_device_tree(p);
+}
+
+static int ppce500_prep_device_tree(MachineState *machine,
+ PPCE500Params *params,
+ hwaddr addr,
+ hwaddr initrd_base,
+ hwaddr initrd_size,
+ hwaddr kernel_base,
+ hwaddr kernel_size)
+{
+ DeviceTreeParams *p = g_new(DeviceTreeParams, 1);
+ p->machine = machine;
+ p->params = *params;
+ p->addr = addr;
+ p->initrd_base = initrd_base;
+ p->initrd_size = initrd_size;
+ p->kernel_base = kernel_base;
+ p->kernel_size = kernel_size;
+
+ qemu_register_reset(ppce500_reset_device_tree, p);
+ p->notifier.notify = ppce500_init_notify;
+ qemu_add_machine_init_done_notifier(&p->notifier);
+
+ /* Issue the device tree loader once, so that we get the size of the blob */
+ return ppce500_load_device_tree(machine, params, addr, initrd_base,
+ initrd_size, kernel_base, kernel_size,
+ true);
+}
+
+/* Create -kernel TLB entries for BookE. */
+static inline hwaddr booke206_page_size_to_tlb(uint64_t size)
+{
+ return 63 - clz64(size >> 10);
+}
+
+static int booke206_initial_map_tsize(CPUPPCState *env)
+{
+ struct boot_info *bi = env->load_info;
+ hwaddr dt_end;
+ int ps;
+
+ /* Our initial TLB entry needs to cover everything from 0 to
+ the device tree top */
+ dt_end = bi->dt_base + bi->dt_size;
+ ps = booke206_page_size_to_tlb(dt_end) + 1;
+ if (ps & 1) {
+ /* e500v2 can only do even TLB size bits */
+ ps++;
+ }
+ return ps;
+}
+
+static uint64_t mmubooke_initial_mapsize(CPUPPCState *env)
+{
+ int tsize;
+
+ tsize = booke206_initial_map_tsize(env);
+ return (1ULL << 10 << tsize);
+}
+
+static void mmubooke_create_initial_mapping(CPUPPCState *env)
+{
+ ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 0);
+ hwaddr size;
+ int ps;
+
+ ps = booke206_initial_map_tsize(env);
+ size = (ps << MAS1_TSIZE_SHIFT);
+ tlb->mas1 = MAS1_VALID | size;
+ tlb->mas2 = 0;
+ tlb->mas7_3 = 0;
+ tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX;
+
+ env->tlb_dirty = true;
+}
+
+static void ppce500_cpu_reset_sec(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+
+ cpu_reset(cs);
+
+ /* Secondary CPU starts in halted state for now. Needs to change when
+ implementing non-kernel boot. */
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+}
+
+static void ppce500_cpu_reset(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ struct boot_info *bi = env->load_info;
+
+ cpu_reset(cs);
+
+ /* Set initial guest state. */
+ cs->halted = 0;
+ env->gpr[1] = (16<<20) - 8;
+ env->gpr[3] = bi->dt_base;
+ env->gpr[4] = 0;
+ env->gpr[5] = 0;
+ env->gpr[6] = EPAPR_MAGIC;
+ env->gpr[7] = mmubooke_initial_mapsize(env);
+ env->gpr[8] = 0;
+ env->gpr[9] = 0;
+ env->nip = bi->entry;
+ mmubooke_create_initial_mapping(env);
+}
+
+static DeviceState *ppce500_init_mpic_qemu(PPCE500Params *params,
+ qemu_irq **irqs)
+{
+ DeviceState *dev;
+ SysBusDevice *s;
+ int i, j, k;
+
+ dev = qdev_create(NULL, TYPE_OPENPIC);
+ qdev_prop_set_uint32(dev, "model", params->mpic_version);
+ qdev_prop_set_uint32(dev, "nb_cpus", smp_cpus);
+
+ qdev_init_nofail(dev);
+ s = SYS_BUS_DEVICE(dev);
+
+ k = 0;
+ for (i = 0; i < smp_cpus; i++) {
+ for (j = 0; j < OPENPIC_OUTPUT_NB; j++) {
+ sysbus_connect_irq(s, k++, irqs[i][j]);
+ }
+ }
+
+ return dev;
+}
+
+static DeviceState *ppce500_init_mpic_kvm(PPCE500Params *params,
+ qemu_irq **irqs, Error **errp)
+{
+ Error *err = NULL;
+ DeviceState *dev;
+ CPUState *cs;
+
+ dev = qdev_create(NULL, TYPE_KVM_OPENPIC);
+ qdev_prop_set_uint32(dev, "model", params->mpic_version);
+
+ object_property_set_bool(OBJECT(dev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ object_unparent(OBJECT(dev));
+ return NULL;
+ }
+
+ CPU_FOREACH(cs) {
+ if (kvm_openpic_connect_vcpu(dev, cs)) {
+ fprintf(stderr, "%s: failed to connect vcpu to irqchip\n",
+ __func__);
+ abort();
+ }
+ }
+
+ return dev;
+}
+
+static qemu_irq *ppce500_init_mpic(MachineState *machine, PPCE500Params *params,
+ MemoryRegion *ccsr, qemu_irq **irqs)
+{
+ qemu_irq *mpic;
+ DeviceState *dev = NULL;
+ SysBusDevice *s;
+ int i;
+
+ mpic = g_new0(qemu_irq, 256);
+
+ if (kvm_enabled()) {
+ Error *err = NULL;
+
+ if (machine_kernel_irqchip_allowed(machine)) {
+ dev = ppce500_init_mpic_kvm(params, irqs, &err);
+ }
+ if (machine_kernel_irqchip_required(machine) && !dev) {
+ error_report("kernel_irqchip requested but unavailable: %s",
+ error_get_pretty(err));
+ exit(1);
+ }
+ }
+
+ if (!dev) {
+ dev = ppce500_init_mpic_qemu(params, irqs);
+ }
+
+ for (i = 0; i < 256; i++) {
+ mpic[i] = qdev_get_gpio_in(dev, i);
+ }
+
+ s = SYS_BUS_DEVICE(dev);
+ memory_region_add_subregion(ccsr, MPC8544_MPIC_REGS_OFFSET,
+ s->mmio[0].memory);
+
+ return mpic;
+}
+
+static void ppce500_power_off(void *opaque, int line, int on)
+{
+ if (on) {
+ qemu_system_shutdown_request();
+ }
+}
+
+void ppce500_init(MachineState *machine, PPCE500Params *params)
+{
+ MemoryRegion *address_space_mem = get_system_memory();
+ MemoryRegion *ram = g_new(MemoryRegion, 1);
+ PCIBus *pci_bus;
+ CPUPPCState *env = NULL;
+ uint64_t loadaddr;
+ hwaddr kernel_base = -1LL;
+ int kernel_size = 0;
+ hwaddr dt_base = 0;
+ hwaddr initrd_base = 0;
+ int initrd_size = 0;
+ hwaddr cur_base = 0;
+ char *filename;
+ hwaddr bios_entry = 0;
+ target_long bios_size;
+ struct boot_info *boot_info;
+ int dt_size;
+ int i;
+ /* irq num for pin INTA, INTB, INTC and INTD is 1, 2, 3 and
+ * 4 respectively */
+ unsigned int pci_irq_nrs[PCI_NUM_PINS] = {1, 2, 3, 4};
+ qemu_irq **irqs, *mpic;
+ DeviceState *dev;
+ CPUPPCState *firstenv = NULL;
+ MemoryRegion *ccsr_addr_space;
+ SysBusDevice *s;
+ PPCE500CCSRState *ccsr;
+
+ /* Setup CPUs */
+ if (machine->cpu_model == NULL) {
+ machine->cpu_model = "e500v2_v30";
+ }
+
+ irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *));
+ irqs[0] = g_malloc0(smp_cpus * sizeof(qemu_irq) * OPENPIC_OUTPUT_NB);
+ for (i = 0; i < smp_cpus; i++) {
+ PowerPCCPU *cpu;
+ CPUState *cs;
+ qemu_irq *input;
+
+ cpu = cpu_ppc_init(machine->cpu_model);
+ if (cpu == NULL) {
+ fprintf(stderr, "Unable to initialize CPU!\n");
+ exit(1);
+ }
+ env = &cpu->env;
+ cs = CPU(cpu);
+
+ if (!firstenv) {
+ firstenv = env;
+ }
+
+ irqs[i] = irqs[0] + (i * OPENPIC_OUTPUT_NB);
+ input = (qemu_irq *)env->irq_inputs;
+ irqs[i][OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT];
+ irqs[i][OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT];
+ env->spr_cb[SPR_BOOKE_PIR].default_value = cs->cpu_index = i;
+ env->mpic_iack = params->ccsrbar_base +
+ MPC8544_MPIC_REGS_OFFSET + 0xa0;
+
+ ppc_booke_timers_init(cpu, 400000000, PPC_TIMER_E500);
+
+ /* Register reset handler */
+ if (!i) {
+ /* Primary CPU */
+ struct boot_info *boot_info;
+ boot_info = g_malloc0(sizeof(struct boot_info));
+ qemu_register_reset(ppce500_cpu_reset, cpu);
+ env->load_info = boot_info;
+ } else {
+ /* Secondary CPUs */
+ qemu_register_reset(ppce500_cpu_reset_sec, cpu);
+ }
+ }
+
+ env = firstenv;
+
+ /* Fixup Memory size on a alignment boundary */
+ ram_size &= ~(RAM_SIZES_ALIGN - 1);
+ machine->ram_size = ram_size;
+
+ /* Register Memory */
+ memory_region_allocate_system_memory(ram, NULL, "mpc8544ds.ram", ram_size);
+ memory_region_add_subregion(address_space_mem, 0, ram);
+
+ dev = qdev_create(NULL, "e500-ccsr");
+ object_property_add_child(qdev_get_machine(), "e500-ccsr",
+ OBJECT(dev), NULL);
+ qdev_init_nofail(dev);
+ ccsr = CCSR(dev);
+ ccsr_addr_space = &ccsr->ccsr_space;
+ memory_region_add_subregion(address_space_mem, params->ccsrbar_base,
+ ccsr_addr_space);
+
+ mpic = ppce500_init_mpic(machine, params, ccsr_addr_space, irqs);
+
+ /* Serial */
+ if (serial_hds[0]) {
+ serial_mm_init(ccsr_addr_space, MPC8544_SERIAL0_REGS_OFFSET,
+ 0, mpic[42], 399193,
+ serial_hds[0], DEVICE_BIG_ENDIAN);
+ }
+
+ if (serial_hds[1]) {
+ serial_mm_init(ccsr_addr_space, MPC8544_SERIAL1_REGS_OFFSET,
+ 0, mpic[42], 399193,
+ serial_hds[1], DEVICE_BIG_ENDIAN);
+ }
+
+ /* General Utility device */
+ dev = qdev_create(NULL, "mpc8544-guts");
+ qdev_init_nofail(dev);
+ s = SYS_BUS_DEVICE(dev);
+ memory_region_add_subregion(ccsr_addr_space, MPC8544_UTIL_OFFSET,
+ sysbus_mmio_get_region(s, 0));
+
+ /* PCI */
+ dev = qdev_create(NULL, "e500-pcihost");
+ qdev_prop_set_uint32(dev, "first_slot", params->pci_first_slot);
+ qdev_prop_set_uint32(dev, "first_pin_irq", pci_irq_nrs[0]);
+ qdev_init_nofail(dev);
+ s = SYS_BUS_DEVICE(dev);
+ for (i = 0; i < PCI_NUM_PINS; i++) {
+ sysbus_connect_irq(s, i, mpic[pci_irq_nrs[i]]);
+ }
+
+ memory_region_add_subregion(ccsr_addr_space, MPC8544_PCI_REGS_OFFSET,
+ sysbus_mmio_get_region(s, 0));
+
+ pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci.0");
+ if (!pci_bus)
+ printf("couldn't create PCI controller!\n");
+
+ if (pci_bus) {
+ /* Register network interfaces. */
+ for (i = 0; i < nb_nics; i++) {
+ pci_nic_init_nofail(&nd_table[i], pci_bus, "virtio", NULL);
+ }
+ }
+
+ /* Register spinning region */
+ sysbus_create_simple("e500-spin", params->spin_base, NULL);
+
+ if (cur_base < (32 * 1024 * 1024)) {
+ /* u-boot occupies memory up to 32MB, so load blobs above */
+ cur_base = (32 * 1024 * 1024);
+ }
+
+ if (params->has_mpc8xxx_gpio) {
+ qemu_irq poweroff_irq;
+
+ dev = qdev_create(NULL, "mpc8xxx_gpio");
+ s = SYS_BUS_DEVICE(dev);
+ qdev_init_nofail(dev);
+ sysbus_connect_irq(s, 0, mpic[MPC8XXX_GPIO_IRQ]);
+ memory_region_add_subregion(ccsr_addr_space, MPC8XXX_GPIO_OFFSET,
+ sysbus_mmio_get_region(s, 0));
+
+ /* Power Off GPIO at Pin 0 */
+ poweroff_irq = qemu_allocate_irq(ppce500_power_off, NULL, 0);
+ qdev_connect_gpio_out(dev, 0, poweroff_irq);
+ }
+
+ /* Platform Bus Device */
+ if (params->has_platform_bus) {
+ dev = qdev_create(NULL, TYPE_PLATFORM_BUS_DEVICE);
+ dev->id = TYPE_PLATFORM_BUS_DEVICE;
+ qdev_prop_set_uint32(dev, "num_irqs", params->platform_bus_num_irqs);
+ qdev_prop_set_uint32(dev, "mmio_size", params->platform_bus_size);
+ qdev_init_nofail(dev);
+ s = SYS_BUS_DEVICE(dev);
+
+ for (i = 0; i < params->platform_bus_num_irqs; i++) {
+ int irqn = params->platform_bus_first_irq + i;
+ sysbus_connect_irq(s, i, mpic[irqn]);
+ }
+
+ memory_region_add_subregion(address_space_mem,
+ params->platform_bus_base,
+ sysbus_mmio_get_region(s, 0));
+ }
+
+ /* Load kernel. */
+ if (machine->kernel_filename) {
+ kernel_base = cur_base;
+ kernel_size = load_image_targphys(machine->kernel_filename,
+ cur_base,
+ ram_size - cur_base);
+ if (kernel_size < 0) {
+ fprintf(stderr, "qemu: could not load kernel '%s'\n",
+ machine->kernel_filename);
+ exit(1);
+ }
+
+ cur_base += kernel_size;
+ }
+
+ /* Load initrd. */
+ if (machine->initrd_filename) {
+ initrd_base = (cur_base + INITRD_LOAD_PAD) & ~INITRD_PAD_MASK;
+ initrd_size = load_image_targphys(machine->initrd_filename, initrd_base,
+ ram_size - initrd_base);
+
+ if (initrd_size < 0) {
+ fprintf(stderr, "qemu: could not load initial ram disk '%s'\n",
+ machine->initrd_filename);
+ exit(1);
+ }
+
+ cur_base = initrd_base + initrd_size;
+ }
+
+ /*
+ * Smart firmware defaults ahead!
+ *
+ * We follow the following table to select which payload we execute.
+ *
+ * -kernel | -bios | payload
+ * ---------+-------+---------
+ * N | Y | u-boot
+ * N | N | u-boot
+ * Y | Y | u-boot
+ * Y | N | kernel
+ *
+ * This ensures backwards compatibility with how we used to expose
+ * -kernel to users but allows them to run through u-boot as well.
+ */
+ if (bios_name == NULL) {
+ if (machine->kernel_filename) {
+ bios_name = machine->kernel_filename;
+ } else {
+ bios_name = "u-boot.e500";
+ }
+ }
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+
+ bios_size = load_elf(filename, NULL, NULL, &bios_entry, &loadaddr, NULL,
+ 1, PPC_ELF_MACHINE, 0);
+ if (bios_size < 0) {
+ /*
+ * Hrm. No ELF image? Try a uImage, maybe someone is giving us an
+ * ePAPR compliant kernel
+ */
+ kernel_size = load_uimage(filename, &bios_entry, &loadaddr, NULL,
+ NULL, NULL);
+ if (kernel_size < 0) {
+ fprintf(stderr, "qemu: could not load firmware '%s'\n", filename);
+ exit(1);
+ }
+ }
+ g_free(filename);
+
+ /* Reserve space for dtb */
+ dt_base = (loadaddr + bios_size + DTC_LOAD_PAD) & ~DTC_PAD_MASK;
+
+ dt_size = ppce500_prep_device_tree(machine, params, dt_base,
+ initrd_base, initrd_size,
+ kernel_base, kernel_size);
+ if (dt_size < 0) {
+ fprintf(stderr, "couldn't load device tree\n");
+ exit(1);
+ }
+ assert(dt_size < DTB_MAX_SIZE);
+
+ boot_info = env->load_info;
+ boot_info->entry = bios_entry;
+ boot_info->dt_base = dt_base;
+ boot_info->dt_size = dt_size;
+}
+
+static int e500_ccsr_initfn(SysBusDevice *dev)
+{
+ PPCE500CCSRState *ccsr;
+
+ ccsr = CCSR(dev);
+ memory_region_init(&ccsr->ccsr_space, OBJECT(ccsr), "e500-ccsr",
+ MPC8544_CCSRBAR_SIZE);
+ return 0;
+}
+
+static void e500_ccsr_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ k->init = e500_ccsr_initfn;
+}
+
+static const TypeInfo e500_ccsr_info = {
+ .name = TYPE_CCSR,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(PPCE500CCSRState),
+ .class_init = e500_ccsr_class_init,
+};
+
+static void e500_register_types(void)
+{
+ type_register_static(&e500_ccsr_info);
+}
+
+type_init(e500_register_types)
diff --git a/src/hw/ppc/e500.h b/src/hw/ppc/e500.h
new file mode 100644
index 0000000..ef224ea
--- /dev/null
+++ b/src/hw/ppc/e500.h
@@ -0,0 +1,29 @@
+#ifndef PPCE500_H
+#define PPCE500_H
+
+#include "hw/boards.h"
+
+typedef struct PPCE500Params {
+ int pci_first_slot;
+ int pci_nr_slots;
+
+ /* required -- must at least add toplevel board compatible */
+ void (*fixup_devtree)(struct PPCE500Params *params, void *fdt);
+
+ int mpic_version;
+ bool has_mpc8xxx_gpio;
+ bool has_platform_bus;
+ hwaddr platform_bus_base;
+ hwaddr platform_bus_size;
+ int platform_bus_first_irq;
+ int platform_bus_num_irqs;
+ hwaddr ccsrbar_base;
+ hwaddr pci_pio_base;
+ hwaddr pci_mmio_base;
+ hwaddr pci_mmio_bus_base;
+ hwaddr spin_base;
+} PPCE500Params;
+
+void ppce500_init(MachineState *machine, PPCE500Params *params);
+
+#endif
diff --git a/src/hw/ppc/e500plat.c b/src/hw/ppc/e500plat.c
new file mode 100644
index 0000000..384b6e8
--- /dev/null
+++ b/src/hw/ppc/e500plat.c
@@ -0,0 +1,68 @@
+/*
+ * Generic device-tree-driven paravirt PPC e500 platform
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "config.h"
+#include "qemu-common.h"
+#include "e500.h"
+#include "hw/boards.h"
+#include "sysemu/device_tree.h"
+#include "hw/pci/pci.h"
+#include "hw/ppc/openpic.h"
+#include "kvm_ppc.h"
+
+static void e500plat_fixup_devtree(PPCE500Params *params, void *fdt)
+{
+ const char model[] = "QEMU ppce500";
+ const char compatible[] = "fsl,qemu-e500";
+
+ qemu_fdt_setprop(fdt, "/", "model", model, sizeof(model));
+ qemu_fdt_setprop(fdt, "/", "compatible", compatible,
+ sizeof(compatible));
+}
+
+static void e500plat_init(MachineState *machine)
+{
+ PPCE500Params params = {
+ .pci_first_slot = 0x1,
+ .pci_nr_slots = PCI_SLOT_MAX - 1,
+ .fixup_devtree = e500plat_fixup_devtree,
+ .mpic_version = OPENPIC_MODEL_FSL_MPIC_42,
+ .has_mpc8xxx_gpio = true,
+ .has_platform_bus = true,
+ .platform_bus_base = 0xf00000000ULL,
+ .platform_bus_size = (128ULL * 1024 * 1024),
+ .platform_bus_first_irq = 5,
+ .platform_bus_num_irqs = 10,
+ .ccsrbar_base = 0xFE0000000ULL,
+ .pci_pio_base = 0xFE1000000ULL,
+ .pci_mmio_base = 0xC00000000ULL,
+ .pci_mmio_bus_base = 0xE0000000ULL,
+ .spin_base = 0xFEF000000ULL,
+ };
+
+ /* Older KVM versions don't support EPR which breaks guests when we announce
+ MPIC variants that support EPR. Revert to an older one for those */
+ if (kvm_enabled() && !kvmppc_has_cap_epr()) {
+ params.mpic_version = OPENPIC_MODEL_FSL_MPIC_20;
+ }
+
+ ppce500_init(machine, &params);
+}
+
+static void e500plat_machine_init(MachineClass *mc)
+{
+ mc->desc = "generic paravirt e500 platform";
+ mc->init = e500plat_init;
+ mc->max_cpus = 32;
+ mc->has_dynamic_sysbus = true;
+}
+
+DEFINE_MACHINE("ppce500", e500plat_machine_init)
diff --git a/src/hw/ppc/mac.h b/src/hw/ppc/mac.h
new file mode 100644
index 0000000..e375ed2
--- /dev/null
+++ b/src/hw/ppc/mac.h
@@ -0,0 +1,186 @@
+/*
+ * QEMU PowerMac emulation shared definitions and prototypes
+ *
+ * Copyright (c) 2004-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#if !defined(__PPC_MAC_H__)
+#define __PPC_MAC_H__
+
+#include "exec/memory.h"
+#include "hw/sysbus.h"
+#include "hw/ide/internal.h"
+#include "hw/input/adb.h"
+
+/* SMP is not enabled, for now */
+#define MAX_CPUS 1
+
+#define BIOS_SIZE (1024 * 1024)
+#define NVRAM_SIZE 0x2000
+#define PROM_FILENAME "openbios-ppc"
+#define PROM_ADDR 0xfff00000
+
+#define KERNEL_LOAD_ADDR 0x01000000
+#define KERNEL_GAP 0x00100000
+
+#define ESCC_CLOCK 3686400
+
+/* Cuda */
+#define TYPE_CUDA "cuda"
+#define CUDA(obj) OBJECT_CHECK(CUDAState, (obj), TYPE_CUDA)
+
+/**
+ * CUDATimer:
+ * @counter_value: counter value at load time
+ */
+typedef struct CUDATimer {
+ int index;
+ uint16_t latch;
+ uint16_t counter_value;
+ int64_t load_time;
+ int64_t next_irq_time;
+ uint64_t frequency;
+ QEMUTimer *timer;
+} CUDATimer;
+
+/**
+ * CUDAState:
+ * @b: B-side data
+ * @a: A-side data
+ * @dirb: B-side direction (1=output)
+ * @dira: A-side direction (1=output)
+ * @sr: Shift register
+ * @acr: Auxiliary control register
+ * @pcr: Peripheral control register
+ * @ifr: Interrupt flag register
+ * @ier: Interrupt enable register
+ * @anh: A-side data, no handshake
+ * @last_b: last value of B register
+ * @last_acr: last value of ACR register
+ */
+typedef struct CUDAState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion mem;
+ /* cuda registers */
+ uint8_t b;
+ uint8_t a;
+ uint8_t dirb;
+ uint8_t dira;
+ uint8_t sr;
+ uint8_t acr;
+ uint8_t pcr;
+ uint8_t ifr;
+ uint8_t ier;
+ uint8_t anh;
+
+ ADBBusState adb_bus;
+ CUDATimer timers[2];
+
+ uint32_t tick_offset;
+ uint64_t frequency;
+
+ uint8_t last_b;
+ uint8_t last_acr;
+
+ /* MacOS 9 is racy and requires a delay upon setting the SR_INT bit */
+ QEMUTimer *sr_delay_timer;
+
+ int data_in_size;
+ int data_in_index;
+ int data_out_index;
+
+ qemu_irq irq;
+ uint8_t autopoll;
+ uint8_t data_in[128];
+ uint8_t data_out[16];
+ QEMUTimer *adb_poll_timer;
+} CUDAState;
+
+/* MacIO */
+#define TYPE_OLDWORLD_MACIO "macio-oldworld"
+#define TYPE_NEWWORLD_MACIO "macio-newworld"
+
+#define TYPE_MACIO_IDE "macio-ide"
+#define MACIO_IDE(obj) OBJECT_CHECK(MACIOIDEState, (obj), TYPE_MACIO_IDE)
+
+typedef struct MACIOIDEState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ qemu_irq irq;
+ qemu_irq dma_irq;
+
+ MemoryRegion mem;
+ IDEBus bus;
+ BlockAIOCB *aiocb;
+ IDEDMA dma;
+ void *dbdma;
+ bool dma_active;
+} MACIOIDEState;
+
+void macio_ide_init_drives(MACIOIDEState *ide, DriveInfo **hd_table);
+void macio_ide_register_dma(MACIOIDEState *ide, void *dbdma, int channel);
+
+void macio_init(PCIDevice *dev,
+ MemoryRegion *pic_mem,
+ MemoryRegion *escc_mem);
+
+/* Heathrow PIC */
+qemu_irq *heathrow_pic_init(MemoryRegion **pmem,
+ int nb_cpus, qemu_irq **irqs);
+
+/* Grackle PCI */
+#define TYPE_GRACKLE_PCI_HOST_BRIDGE "grackle-pcihost"
+PCIBus *pci_grackle_init(uint32_t base, qemu_irq *pic,
+ MemoryRegion *address_space_mem,
+ MemoryRegion *address_space_io);
+
+/* UniNorth PCI */
+PCIBus *pci_pmac_init(qemu_irq *pic,
+ MemoryRegion *address_space_mem,
+ MemoryRegion *address_space_io);
+PCIBus *pci_pmac_u3_init(qemu_irq *pic,
+ MemoryRegion *address_space_mem,
+ MemoryRegion *address_space_io);
+
+/* Mac NVRAM */
+#define TYPE_MACIO_NVRAM "macio-nvram"
+#define MACIO_NVRAM(obj) \
+ OBJECT_CHECK(MacIONVRAMState, (obj), TYPE_MACIO_NVRAM)
+
+typedef struct MacIONVRAMState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ uint32_t size;
+ uint32_t it_shift;
+
+ MemoryRegion mem;
+ uint8_t *data;
+} MacIONVRAMState;
+
+void pmac_format_nvram_partition (MacIONVRAMState *nvr, int len);
+#endif /* !defined(__PPC_MAC_H__) */
diff --git a/src/hw/ppc/mac_newworld.c b/src/hw/ppc/mac_newworld.c
new file mode 100644
index 0000000..1b9a573
--- /dev/null
+++ b/src/hw/ppc/mac_newworld.c
@@ -0,0 +1,530 @@
+/*
+ * QEMU PowerPC CHRP (currently NewWorld PowerMac) hardware System Emulator
+ *
+ * Copyright (c) 2004-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * PCI bus layout on a real G5 (U3 based):
+ *
+ * 0000:f0:0b.0 Host bridge [0600]: Apple Computer Inc. U3 AGP [106b:004b]
+ * 0000:f0:10.0 VGA compatible controller [0300]: ATI Technologies Inc RV350 AP [Radeon 9600] [1002:4150]
+ * 0001:00:00.0 Host bridge [0600]: Apple Computer Inc. CPC945 HT Bridge [106b:004a]
+ * 0001:00:01.0 PCI bridge [0604]: Advanced Micro Devices [AMD] AMD-8131 PCI-X Bridge [1022:7450] (rev 12)
+ * 0001:00:02.0 PCI bridge [0604]: Advanced Micro Devices [AMD] AMD-8131 PCI-X Bridge [1022:7450] (rev 12)
+ * 0001:00:03.0 PCI bridge [0604]: Apple Computer Inc. K2 HT-PCI Bridge [106b:0045]
+ * 0001:00:04.0 PCI bridge [0604]: Apple Computer Inc. K2 HT-PCI Bridge [106b:0046]
+ * 0001:00:05.0 PCI bridge [0604]: Apple Computer Inc. K2 HT-PCI Bridge [106b:0047]
+ * 0001:00:06.0 PCI bridge [0604]: Apple Computer Inc. K2 HT-PCI Bridge [106b:0048]
+ * 0001:00:07.0 PCI bridge [0604]: Apple Computer Inc. K2 HT-PCI Bridge [106b:0049]
+ * 0001:01:07.0 Class [ff00]: Apple Computer Inc. K2 KeyLargo Mac/IO [106b:0041] (rev 20)
+ * 0001:01:08.0 USB Controller [0c03]: Apple Computer Inc. K2 KeyLargo USB [106b:0040]
+ * 0001:01:09.0 USB Controller [0c03]: Apple Computer Inc. K2 KeyLargo USB [106b:0040]
+ * 0001:02:0b.0 USB Controller [0c03]: NEC Corporation USB [1033:0035] (rev 43)
+ * 0001:02:0b.1 USB Controller [0c03]: NEC Corporation USB [1033:0035] (rev 43)
+ * 0001:02:0b.2 USB Controller [0c03]: NEC Corporation USB 2.0 [1033:00e0] (rev 04)
+ * 0001:03:0d.0 Class [ff00]: Apple Computer Inc. K2 ATA/100 [106b:0043]
+ * 0001:03:0e.0 FireWire (IEEE 1394) [0c00]: Apple Computer Inc. K2 FireWire [106b:0042]
+ * 0001:04:0f.0 Ethernet controller [0200]: Apple Computer Inc. K2 GMAC (Sun GEM) [106b:004c]
+ * 0001:05:0c.0 IDE interface [0101]: Broadcom K2 SATA [1166:0240]
+ *
+ */
+#include "hw/hw.h"
+#include "hw/ppc/ppc.h"
+#include "hw/ppc/mac.h"
+#include "hw/input/adb.h"
+#include "hw/ppc/mac_dbdma.h"
+#include "hw/timer/m48t59.h"
+#include "hw/pci/pci.h"
+#include "net/net.h"
+#include "sysemu/sysemu.h"
+#include "hw/boards.h"
+#include "hw/nvram/fw_cfg.h"
+#include "hw/char/escc.h"
+#include "hw/ppc/openpic.h"
+#include "hw/ide.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "hw/usb.h"
+#include "sysemu/block-backend.h"
+#include "exec/address-spaces.h"
+#include "hw/sysbus.h"
+
+#define MAX_IDE_BUS 2
+#define CFG_ADDR 0xf0000510
+#define TBFREQ (100UL * 1000UL * 1000UL)
+#define CLOCKFREQ (266UL * 1000UL * 1000UL)
+#define BUSFREQ (100UL * 1000UL * 1000UL)
+
+/* debug UniNorth */
+//#define DEBUG_UNIN
+
+#ifdef DEBUG_UNIN
+#define UNIN_DPRINTF(fmt, ...) \
+ do { printf("UNIN: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define UNIN_DPRINTF(fmt, ...)
+#endif
+
+/* UniN device */
+static void unin_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size)
+{
+ UNIN_DPRINTF("write addr " TARGET_FMT_plx " val %"PRIx64"\n", addr, value);
+ if (addr == 0x0) {
+ *(int*)opaque = value;
+ }
+}
+
+static uint64_t unin_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint32_t value;
+
+ value = 0;
+ switch (addr) {
+ case 0:
+ value = *(int*)opaque;
+ }
+
+ UNIN_DPRINTF("readl addr " TARGET_FMT_plx " val %x\n", addr, value);
+
+ return value;
+}
+
+static const MemoryRegionOps unin_ops = {
+ .read = unin_read,
+ .write = unin_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void fw_cfg_boot_set(void *opaque, const char *boot_device,
+ Error **errp)
+{
+ fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
+}
+
+static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
+{
+ return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR;
+}
+
+static hwaddr round_page(hwaddr addr)
+{
+ return (addr + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
+}
+
+static void ppc_core99_reset(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ cpu_reset(CPU(cpu));
+ /* 970 CPUs want to get their initial IP as part of their boot protocol */
+ cpu->env.nip = PROM_ADDR + 0x100;
+}
+
+/* PowerPC Mac99 hardware initialisation */
+static void ppc_core99_init(MachineState *machine)
+{
+ ram_addr_t ram_size = machine->ram_size;
+ const char *kernel_filename = machine->kernel_filename;
+ const char *kernel_cmdline = machine->kernel_cmdline;
+ const char *initrd_filename = machine->initrd_filename;
+ const char *boot_device = machine->boot_order;
+ PowerPCCPU *cpu = NULL;
+ CPUPPCState *env = NULL;
+ char *filename;
+ qemu_irq *pic, **openpic_irqs;
+ MemoryRegion *isa = g_new(MemoryRegion, 1);
+ MemoryRegion *unin_memory = g_new(MemoryRegion, 1);
+ MemoryRegion *unin2_memory = g_new(MemoryRegion, 1);
+ int linux_boot, i, j, k;
+ MemoryRegion *ram = g_new(MemoryRegion, 1), *bios = g_new(MemoryRegion, 1);
+ hwaddr kernel_base, initrd_base, cmdline_base = 0;
+ long kernel_size, initrd_size;
+ PCIBus *pci_bus;
+ PCIDevice *macio;
+ MACIOIDEState *macio_ide;
+ BusState *adb_bus;
+ MacIONVRAMState *nvr;
+ int bios_size;
+ MemoryRegion *pic_mem, *escc_mem;
+ MemoryRegion *escc_bar = g_new(MemoryRegion, 1);
+ int ppc_boot_device;
+ DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
+ void *fw_cfg;
+ int machine_arch;
+ SysBusDevice *s;
+ DeviceState *dev;
+ int *token = g_new(int, 1);
+ hwaddr nvram_addr = 0xFFF04000;
+ uint64_t tbfreq;
+
+ linux_boot = (kernel_filename != NULL);
+
+ /* init CPUs */
+ if (machine->cpu_model == NULL) {
+#ifdef TARGET_PPC64
+ machine->cpu_model = "970fx";
+#else
+ machine->cpu_model = "G4";
+#endif
+ }
+ for (i = 0; i < smp_cpus; i++) {
+ cpu = cpu_ppc_init(machine->cpu_model);
+ if (cpu == NULL) {
+ fprintf(stderr, "Unable to find PowerPC CPU definition\n");
+ exit(1);
+ }
+ env = &cpu->env;
+
+ /* Set time-base frequency to 100 Mhz */
+ cpu_ppc_tb_init(env, TBFREQ);
+ qemu_register_reset(ppc_core99_reset, cpu);
+ }
+
+ /* allocate RAM */
+ memory_region_allocate_system_memory(ram, NULL, "ppc_core99.ram", ram_size);
+ memory_region_add_subregion(get_system_memory(), 0, ram);
+
+ /* allocate and load BIOS */
+ memory_region_init_ram(bios, NULL, "ppc_core99.bios", BIOS_SIZE,
+ &error_fatal);
+ vmstate_register_ram_global(bios);
+
+ if (bios_name == NULL)
+ bios_name = PROM_FILENAME;
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ memory_region_set_readonly(bios, true);
+ memory_region_add_subregion(get_system_memory(), PROM_ADDR, bios);
+
+ /* Load OpenBIOS (ELF) */
+ if (filename) {
+ bios_size = load_elf(filename, NULL, NULL, NULL,
+ NULL, NULL, 1, PPC_ELF_MACHINE, 0);
+
+ g_free(filename);
+ } else {
+ bios_size = -1;
+ }
+ if (bios_size < 0 || bios_size > BIOS_SIZE) {
+ hw_error("qemu: could not load PowerPC bios '%s'\n", bios_name);
+ exit(1);
+ }
+
+ if (linux_boot) {
+ uint64_t lowaddr = 0;
+ int bswap_needed;
+
+#ifdef BSWAP_NEEDED
+ bswap_needed = 1;
+#else
+ bswap_needed = 0;
+#endif
+ kernel_base = KERNEL_LOAD_ADDR;
+
+ kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL,
+ NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, 0);
+ if (kernel_size < 0)
+ kernel_size = load_aout(kernel_filename, kernel_base,
+ ram_size - kernel_base, bswap_needed,
+ TARGET_PAGE_SIZE);
+ if (kernel_size < 0)
+ kernel_size = load_image_targphys(kernel_filename,
+ kernel_base,
+ ram_size - kernel_base);
+ if (kernel_size < 0) {
+ hw_error("qemu: could not load kernel '%s'\n", kernel_filename);
+ exit(1);
+ }
+ /* load initrd */
+ if (initrd_filename) {
+ initrd_base = round_page(kernel_base + kernel_size + KERNEL_GAP);
+ initrd_size = load_image_targphys(initrd_filename, initrd_base,
+ ram_size - initrd_base);
+ if (initrd_size < 0) {
+ hw_error("qemu: could not load initial ram disk '%s'\n",
+ initrd_filename);
+ exit(1);
+ }
+ cmdline_base = round_page(initrd_base + initrd_size);
+ } else {
+ initrd_base = 0;
+ initrd_size = 0;
+ cmdline_base = round_page(kernel_base + kernel_size + KERNEL_GAP);
+ }
+ ppc_boot_device = 'm';
+ } else {
+ kernel_base = 0;
+ kernel_size = 0;
+ initrd_base = 0;
+ initrd_size = 0;
+ ppc_boot_device = '\0';
+ /* We consider that NewWorld PowerMac never have any floppy drive
+ * For now, OHW cannot boot from the network.
+ */
+ for (i = 0; boot_device[i] != '\0'; i++) {
+ if (boot_device[i] >= 'c' && boot_device[i] <= 'f') {
+ ppc_boot_device = boot_device[i];
+ break;
+ }
+ }
+ if (ppc_boot_device == '\0') {
+ fprintf(stderr, "No valid boot device for Mac99 machine\n");
+ exit(1);
+ }
+ }
+
+ /* Register 8 MB of ISA IO space */
+ memory_region_init_alias(isa, NULL, "isa_mmio",
+ get_system_io(), 0, 0x00800000);
+ memory_region_add_subregion(get_system_memory(), 0xf2000000, isa);
+
+ /* UniN init: XXX should be a real device */
+ memory_region_init_io(unin_memory, NULL, &unin_ops, token, "unin", 0x1000);
+ memory_region_add_subregion(get_system_memory(), 0xf8000000, unin_memory);
+
+ memory_region_init_io(unin2_memory, NULL, &unin_ops, token, "unin", 0x1000);
+ memory_region_add_subregion(get_system_memory(), 0xf3000000, unin2_memory);
+
+ openpic_irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *));
+ openpic_irqs[0] =
+ g_malloc0(smp_cpus * sizeof(qemu_irq) * OPENPIC_OUTPUT_NB);
+ for (i = 0; i < smp_cpus; i++) {
+ /* Mac99 IRQ connection between OpenPIC outputs pins
+ * and PowerPC input pins
+ */
+ switch (PPC_INPUT(env)) {
+ case PPC_FLAGS_INPUT_6xx:
+ openpic_irqs[i] = openpic_irqs[0] + (i * OPENPIC_OUTPUT_NB);
+ openpic_irqs[i][OPENPIC_OUTPUT_INT] =
+ ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT];
+ openpic_irqs[i][OPENPIC_OUTPUT_CINT] =
+ ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT];
+ openpic_irqs[i][OPENPIC_OUTPUT_MCK] =
+ ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_MCP];
+ /* Not connected ? */
+ openpic_irqs[i][OPENPIC_OUTPUT_DEBUG] = NULL;
+ /* Check this */
+ openpic_irqs[i][OPENPIC_OUTPUT_RESET] =
+ ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_HRESET];
+ break;
+#if defined(TARGET_PPC64)
+ case PPC_FLAGS_INPUT_970:
+ openpic_irqs[i] = openpic_irqs[0] + (i * OPENPIC_OUTPUT_NB);
+ openpic_irqs[i][OPENPIC_OUTPUT_INT] =
+ ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT];
+ openpic_irqs[i][OPENPIC_OUTPUT_CINT] =
+ ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_INT];
+ openpic_irqs[i][OPENPIC_OUTPUT_MCK] =
+ ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_MCP];
+ /* Not connected ? */
+ openpic_irqs[i][OPENPIC_OUTPUT_DEBUG] = NULL;
+ /* Check this */
+ openpic_irqs[i][OPENPIC_OUTPUT_RESET] =
+ ((qemu_irq *)env->irq_inputs)[PPC970_INPUT_HRESET];
+ break;
+#endif /* defined(TARGET_PPC64) */
+ default:
+ hw_error("Bus model not supported on mac99 machine\n");
+ exit(1);
+ }
+ }
+
+ pic = g_new0(qemu_irq, 64);
+
+ dev = qdev_create(NULL, TYPE_OPENPIC);
+ qdev_prop_set_uint32(dev, "model", OPENPIC_MODEL_RAVEN);
+ qdev_init_nofail(dev);
+ s = SYS_BUS_DEVICE(dev);
+ pic_mem = s->mmio[0].memory;
+ k = 0;
+ for (i = 0; i < smp_cpus; i++) {
+ for (j = 0; j < OPENPIC_OUTPUT_NB; j++) {
+ sysbus_connect_irq(s, k++, openpic_irqs[i][j]);
+ }
+ }
+
+ for (i = 0; i < 64; i++) {
+ pic[i] = qdev_get_gpio_in(dev, i);
+ }
+
+ if (PPC_INPUT(env) == PPC_FLAGS_INPUT_970) {
+ /* 970 gets a U3 bus */
+ pci_bus = pci_pmac_u3_init(pic, get_system_memory(), get_system_io());
+ machine_arch = ARCH_MAC99_U3;
+ } else {
+ pci_bus = pci_pmac_init(pic, get_system_memory(), get_system_io());
+ machine_arch = ARCH_MAC99;
+ }
+
+ machine->usb |= defaults_enabled() && !machine->usb_disabled;
+
+ /* Timebase Frequency */
+ if (kvm_enabled()) {
+ tbfreq = kvmppc_get_tbfreq();
+ } else {
+ tbfreq = TBFREQ;
+ }
+
+ /* init basic PC hardware */
+ escc_mem = escc_init(0, pic[0x25], pic[0x24],
+ serial_hds[0], serial_hds[1], ESCC_CLOCK, 4);
+ memory_region_init_alias(escc_bar, NULL, "escc-bar",
+ escc_mem, 0, memory_region_size(escc_mem));
+
+ macio = pci_create(pci_bus, -1, TYPE_NEWWORLD_MACIO);
+ dev = DEVICE(macio);
+ qdev_connect_gpio_out(dev, 0, pic[0x19]); /* CUDA */
+ qdev_connect_gpio_out(dev, 1, pic[0x0d]); /* IDE */
+ qdev_connect_gpio_out(dev, 2, pic[0x02]); /* IDE DMA */
+ qdev_connect_gpio_out(dev, 3, pic[0x0e]); /* IDE */
+ qdev_connect_gpio_out(dev, 4, pic[0x03]); /* IDE DMA */
+ qdev_prop_set_uint64(dev, "frequency", tbfreq);
+ macio_init(macio, pic_mem, escc_bar);
+
+ /* We only emulate 2 out of 3 IDE controllers for now */
+ ide_drive_get(hd, ARRAY_SIZE(hd));
+
+ macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio),
+ "ide[0]"));
+ macio_ide_init_drives(macio_ide, hd);
+
+ macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio),
+ "ide[1]"));
+ macio_ide_init_drives(macio_ide, &hd[MAX_IDE_DEVS]);
+
+ dev = DEVICE(object_resolve_path_component(OBJECT(macio), "cuda"));
+ adb_bus = qdev_get_child_bus(dev, "adb.0");
+ dev = qdev_create(adb_bus, TYPE_ADB_KEYBOARD);
+ qdev_init_nofail(dev);
+ dev = qdev_create(adb_bus, TYPE_ADB_MOUSE);
+ qdev_init_nofail(dev);
+
+ if (machine->usb) {
+ pci_create_simple(pci_bus, -1, "pci-ohci");
+
+ /* U3 needs to use USB for input because Linux doesn't support via-cuda
+ on PPC64 */
+ if (machine_arch == ARCH_MAC99_U3) {
+ USBBus *usb_bus = usb_bus_find(-1);
+
+ usb_create_simple(usb_bus, "usb-kbd");
+ usb_create_simple(usb_bus, "usb-mouse");
+ }
+ }
+
+ pci_vga_init(pci_bus);
+
+ if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8) {
+ graphic_depth = 15;
+ }
+
+ for (i = 0; i < nb_nics; i++) {
+ pci_nic_init_nofail(&nd_table[i], pci_bus, "ne2k_pci", NULL);
+ }
+
+ /* The NewWorld NVRAM is not located in the MacIO device */
+#ifdef CONFIG_KVM
+ if (kvm_enabled() && getpagesize() > 4096) {
+ /* We can't combine read-write and read-only in a single page, so
+ move the NVRAM out of ROM again for KVM */
+ nvram_addr = 0xFFE00000;
+ }
+#endif
+ dev = qdev_create(NULL, TYPE_MACIO_NVRAM);
+ qdev_prop_set_uint32(dev, "size", 0x2000);
+ qdev_prop_set_uint32(dev, "it_shift", 1);
+ qdev_init_nofail(dev);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, nvram_addr);
+ nvr = MACIO_NVRAM(dev);
+ pmac_format_nvram_partition(nvr, 0x2000);
+ /* No PCI init: the BIOS will do it */
+
+ fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
+ fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, machine_arch);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
+ if (kernel_cmdline) {
+ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, cmdline_base);
+ pstrcpy_targphys("cmdline", cmdline_base, TARGET_PAGE_SIZE, kernel_cmdline);
+ } else {
+ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0);
+ }
+ fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_base);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ppc_boot_device);
+
+ fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_WIDTH, graphic_width);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_HEIGHT, graphic_height);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_DEPTH, graphic_depth);
+
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled());
+ if (kvm_enabled()) {
+#ifdef CONFIG_KVM
+ uint8_t *hypercall;
+
+ hypercall = g_malloc(16);
+ kvmppc_get_hypercall(env, hypercall, 16);
+ fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid());
+#endif
+ }
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq);
+ /* Mac OS X requires a "known good" clock-frequency value; pass it one. */
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_CLOCKFREQ, CLOCKFREQ);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_BUSFREQ, BUSFREQ);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_NVRAM_ADDR, nvram_addr);
+
+ qemu_register_boot_set(fw_cfg_boot_set, fw_cfg);
+}
+
+static int core99_kvm_type(const char *arg)
+{
+ /* Always force PR KVM */
+ return 2;
+}
+
+static void core99_machine_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "Mac99 based PowerMAC";
+ mc->init = ppc_core99_init;
+ mc->max_cpus = MAX_CPUS;
+ mc->default_boot_order = "cd";
+ mc->kvm_type = core99_kvm_type;
+}
+
+static const TypeInfo core99_machine_info = {
+ .name = MACHINE_TYPE_NAME("mac99"),
+ .parent = TYPE_MACHINE,
+ .class_init = core99_machine_class_init,
+};
+
+static void mac_machine_register_types(void)
+{
+ type_register_static(&core99_machine_info);
+}
+
+type_init(mac_machine_register_types)
diff --git a/src/hw/ppc/mac_oldworld.c b/src/hw/ppc/mac_oldworld.c
new file mode 100644
index 0000000..21eaf0e
--- /dev/null
+++ b/src/hw/ppc/mac_oldworld.c
@@ -0,0 +1,373 @@
+
+/*
+ * QEMU OldWorld PowerMac (currently ~G3 Beige) hardware System Emulator
+ *
+ * Copyright (c) 2004-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/ppc.h"
+#include "mac.h"
+#include "hw/input/adb.h"
+#include "hw/timer/m48t59.h"
+#include "sysemu/sysemu.h"
+#include "net/net.h"
+#include "hw/isa/isa.h"
+#include "hw/pci/pci.h"
+#include "hw/boards.h"
+#include "hw/nvram/fw_cfg.h"
+#include "hw/char/escc.h"
+#include "hw/ide.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "sysemu/block-backend.h"
+#include "exec/address-spaces.h"
+
+#define MAX_IDE_BUS 2
+#define CFG_ADDR 0xf0000510
+#define TBFREQ 16600000UL
+#define CLOCKFREQ 266000000UL
+#define BUSFREQ 66000000UL
+
+static void fw_cfg_boot_set(void *opaque, const char *boot_device,
+ Error **errp)
+{
+ fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
+}
+
+static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
+{
+ return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR;
+}
+
+static hwaddr round_page(hwaddr addr)
+{
+ return (addr + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
+}
+
+static void ppc_heathrow_reset(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ cpu_reset(CPU(cpu));
+}
+
+static void ppc_heathrow_init(MachineState *machine)
+{
+ ram_addr_t ram_size = machine->ram_size;
+ const char *kernel_filename = machine->kernel_filename;
+ const char *kernel_cmdline = machine->kernel_cmdline;
+ const char *initrd_filename = machine->initrd_filename;
+ const char *boot_device = machine->boot_order;
+ MemoryRegion *sysmem = get_system_memory();
+ PowerPCCPU *cpu = NULL;
+ CPUPPCState *env = NULL;
+ char *filename;
+ qemu_irq *pic, **heathrow_irqs;
+ int linux_boot, i;
+ MemoryRegion *ram = g_new(MemoryRegion, 1);
+ MemoryRegion *bios = g_new(MemoryRegion, 1);
+ MemoryRegion *isa = g_new(MemoryRegion, 1);
+ uint32_t kernel_base, initrd_base, cmdline_base = 0;
+ int32_t kernel_size, initrd_size;
+ PCIBus *pci_bus;
+ PCIDevice *macio;
+ MACIOIDEState *macio_ide;
+ DeviceState *dev;
+ BusState *adb_bus;
+ int bios_size;
+ MemoryRegion *pic_mem;
+ MemoryRegion *escc_mem, *escc_bar = g_new(MemoryRegion, 1);
+ uint16_t ppc_boot_device;
+ DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
+ void *fw_cfg;
+ uint64_t tbfreq;
+
+ linux_boot = (kernel_filename != NULL);
+
+ /* init CPUs */
+ if (machine->cpu_model == NULL)
+ machine->cpu_model = "G3";
+ for (i = 0; i < smp_cpus; i++) {
+ cpu = cpu_ppc_init(machine->cpu_model);
+ if (cpu == NULL) {
+ fprintf(stderr, "Unable to find PowerPC CPU definition\n");
+ exit(1);
+ }
+ env = &cpu->env;
+
+ /* Set time-base frequency to 16.6 Mhz */
+ cpu_ppc_tb_init(env, TBFREQ);
+ qemu_register_reset(ppc_heathrow_reset, cpu);
+ }
+
+ /* allocate RAM */
+ if (ram_size > (2047 << 20)) {
+ fprintf(stderr,
+ "qemu: Too much memory for this machine: %d MB, maximum 2047 MB\n",
+ ((unsigned int)ram_size / (1 << 20)));
+ exit(1);
+ }
+
+ memory_region_allocate_system_memory(ram, NULL, "ppc_heathrow.ram",
+ ram_size);
+ memory_region_add_subregion(sysmem, 0, ram);
+
+ /* allocate and load BIOS */
+ memory_region_init_ram(bios, NULL, "ppc_heathrow.bios", BIOS_SIZE,
+ &error_fatal);
+ vmstate_register_ram_global(bios);
+
+ if (bios_name == NULL)
+ bios_name = PROM_FILENAME;
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ memory_region_set_readonly(bios, true);
+ memory_region_add_subregion(sysmem, PROM_ADDR, bios);
+
+ /* Load OpenBIOS (ELF) */
+ if (filename) {
+ bios_size = load_elf(filename, 0, NULL, NULL, NULL, NULL,
+ 1, PPC_ELF_MACHINE, 0);
+ g_free(filename);
+ } else {
+ bios_size = -1;
+ }
+ if (bios_size < 0 || bios_size > BIOS_SIZE) {
+ hw_error("qemu: could not load PowerPC bios '%s'\n", bios_name);
+ exit(1);
+ }
+
+ if (linux_boot) {
+ uint64_t lowaddr = 0;
+ int bswap_needed;
+
+#ifdef BSWAP_NEEDED
+ bswap_needed = 1;
+#else
+ bswap_needed = 0;
+#endif
+ kernel_base = KERNEL_LOAD_ADDR;
+ kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL,
+ NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, 0);
+ if (kernel_size < 0)
+ kernel_size = load_aout(kernel_filename, kernel_base,
+ ram_size - kernel_base, bswap_needed,
+ TARGET_PAGE_SIZE);
+ if (kernel_size < 0)
+ kernel_size = load_image_targphys(kernel_filename,
+ kernel_base,
+ ram_size - kernel_base);
+ if (kernel_size < 0) {
+ hw_error("qemu: could not load kernel '%s'\n",
+ kernel_filename);
+ exit(1);
+ }
+ /* load initrd */
+ if (initrd_filename) {
+ initrd_base = round_page(kernel_base + kernel_size + KERNEL_GAP);
+ initrd_size = load_image_targphys(initrd_filename, initrd_base,
+ ram_size - initrd_base);
+ if (initrd_size < 0) {
+ hw_error("qemu: could not load initial ram disk '%s'\n",
+ initrd_filename);
+ exit(1);
+ }
+ cmdline_base = round_page(initrd_base + initrd_size);
+ } else {
+ initrd_base = 0;
+ initrd_size = 0;
+ cmdline_base = round_page(kernel_base + kernel_size + KERNEL_GAP);
+ }
+ ppc_boot_device = 'm';
+ } else {
+ kernel_base = 0;
+ kernel_size = 0;
+ initrd_base = 0;
+ initrd_size = 0;
+ ppc_boot_device = '\0';
+ for (i = 0; boot_device[i] != '\0'; i++) {
+ /* TOFIX: for now, the second IDE channel is not properly
+ * used by OHW. The Mac floppy disk are not emulated.
+ * For now, OHW cannot boot from the network.
+ */
+#if 0
+ if (boot_device[i] >= 'a' && boot_device[i] <= 'f') {
+ ppc_boot_device = boot_device[i];
+ break;
+ }
+#else
+ if (boot_device[i] >= 'c' && boot_device[i] <= 'd') {
+ ppc_boot_device = boot_device[i];
+ break;
+ }
+#endif
+ }
+ if (ppc_boot_device == '\0') {
+ fprintf(stderr, "No valid boot device for G3 Beige machine\n");
+ exit(1);
+ }
+ }
+
+ /* Register 2 MB of ISA IO space */
+ memory_region_init_alias(isa, NULL, "isa_mmio",
+ get_system_io(), 0, 0x00200000);
+ memory_region_add_subregion(sysmem, 0xfe000000, isa);
+
+ /* XXX: we register only 1 output pin for heathrow PIC */
+ heathrow_irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *));
+ heathrow_irqs[0] =
+ g_malloc0(smp_cpus * sizeof(qemu_irq) * 1);
+ /* Connect the heathrow PIC outputs to the 6xx bus */
+ for (i = 0; i < smp_cpus; i++) {
+ switch (PPC_INPUT(env)) {
+ case PPC_FLAGS_INPUT_6xx:
+ heathrow_irqs[i] = heathrow_irqs[0] + (i * 1);
+ heathrow_irqs[i][0] =
+ ((qemu_irq *)env->irq_inputs)[PPC6xx_INPUT_INT];
+ break;
+ default:
+ hw_error("Bus model not supported on OldWorld Mac machine\n");
+ }
+ }
+
+ /* Timebase Frequency */
+ if (kvm_enabled()) {
+ tbfreq = kvmppc_get_tbfreq();
+ } else {
+ tbfreq = TBFREQ;
+ }
+
+ /* init basic PC hardware */
+ if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) {
+ hw_error("Only 6xx bus is supported on heathrow machine\n");
+ }
+ pic = heathrow_pic_init(&pic_mem, 1, heathrow_irqs);
+ pci_bus = pci_grackle_init(0xfec00000, pic,
+ get_system_memory(),
+ get_system_io());
+ pci_vga_init(pci_bus);
+
+ escc_mem = escc_init(0, pic[0x0f], pic[0x10], serial_hds[0],
+ serial_hds[1], ESCC_CLOCK, 4);
+ memory_region_init_alias(escc_bar, NULL, "escc-bar",
+ escc_mem, 0, memory_region_size(escc_mem));
+
+ for(i = 0; i < nb_nics; i++)
+ pci_nic_init_nofail(&nd_table[i], pci_bus, "ne2k_pci", NULL);
+
+
+ ide_drive_get(hd, ARRAY_SIZE(hd));
+
+ macio = pci_create(pci_bus, -1, TYPE_OLDWORLD_MACIO);
+ dev = DEVICE(macio);
+ qdev_connect_gpio_out(dev, 0, pic[0x12]); /* CUDA */
+ qdev_connect_gpio_out(dev, 1, pic[0x0D]); /* IDE-0 */
+ qdev_connect_gpio_out(dev, 2, pic[0x02]); /* IDE-0 DMA */
+ qdev_connect_gpio_out(dev, 3, pic[0x0E]); /* IDE-1 */
+ qdev_connect_gpio_out(dev, 4, pic[0x03]); /* IDE-1 DMA */
+ qdev_prop_set_uint64(dev, "frequency", tbfreq);
+ macio_init(macio, pic_mem, escc_bar);
+
+ macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio),
+ "ide[0]"));
+ macio_ide_init_drives(macio_ide, hd);
+
+ macio_ide = MACIO_IDE(object_resolve_path_component(OBJECT(macio),
+ "ide[1]"));
+ macio_ide_init_drives(macio_ide, &hd[MAX_IDE_DEVS]);
+
+ dev = DEVICE(object_resolve_path_component(OBJECT(macio), "cuda"));
+ adb_bus = qdev_get_child_bus(dev, "adb.0");
+ dev = qdev_create(adb_bus, TYPE_ADB_KEYBOARD);
+ qdev_init_nofail(dev);
+ dev = qdev_create(adb_bus, TYPE_ADB_MOUSE);
+ qdev_init_nofail(dev);
+
+ if (usb_enabled()) {
+ pci_create_simple(pci_bus, -1, "pci-ohci");
+ }
+
+ if (graphic_depth != 15 && graphic_depth != 32 && graphic_depth != 8)
+ graphic_depth = 15;
+
+ /* No PCI init: the BIOS will do it */
+
+ fw_cfg = fw_cfg_init_mem(CFG_ADDR, CFG_ADDR + 2);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
+ fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_MACHINE_ID, ARCH_HEATHROW);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, kernel_base);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size);
+ if (kernel_cmdline) {
+ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, cmdline_base);
+ pstrcpy_targphys("cmdline", cmdline_base, TARGET_PAGE_SIZE, kernel_cmdline);
+ } else {
+ fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_CMDLINE, 0);
+ }
+ fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_base);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_BOOT_DEVICE, ppc_boot_device);
+
+ fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_WIDTH, graphic_width);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_HEIGHT, graphic_height);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_PPC_DEPTH, graphic_depth);
+
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_IS_KVM, kvm_enabled());
+ if (kvm_enabled()) {
+#ifdef CONFIG_KVM
+ uint8_t *hypercall;
+
+ hypercall = g_malloc(16);
+ kvmppc_get_hypercall(env, hypercall, 16);
+ fw_cfg_add_bytes(fw_cfg, FW_CFG_PPC_KVM_HC, hypercall, 16);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_KVM_PID, getpid());
+#endif
+ }
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_TBFREQ, tbfreq);
+ /* Mac OS X requires a "known good" clock-frequency value; pass it one. */
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_CLOCKFREQ, CLOCKFREQ);
+ fw_cfg_add_i32(fw_cfg, FW_CFG_PPC_BUSFREQ, BUSFREQ);
+
+ qemu_register_boot_set(fw_cfg_boot_set, fw_cfg);
+}
+
+static int heathrow_kvm_type(const char *arg)
+{
+ /* Always force PR KVM */
+ return 2;
+}
+
+static void heathrow_machine_init(MachineClass *mc)
+{
+ mc->desc = "Heathrow based PowerMAC";
+ mc->init = ppc_heathrow_init;
+ mc->max_cpus = MAX_CPUS;
+#ifndef TARGET_PPC64
+ mc->is_default = 1;
+#endif
+ /* TOFIX "cad" when Mac floppy is implemented */
+ mc->default_boot_order = "cd";
+ mc->kvm_type = heathrow_kvm_type;
+}
+
+DEFINE_MACHINE("g3beige", heathrow_machine_init)
diff --git a/src/hw/ppc/mpc8544_guts.c b/src/hw/ppc/mpc8544_guts.c
new file mode 100644
index 0000000..a10abe9
--- /dev/null
+++ b/src/hw/ppc/mpc8544_guts.c
@@ -0,0 +1,140 @@
+/*
+ * QEMU PowerPC MPC8544 global util pseudo-device
+ *
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Alexander Graf, <alex@csgraf.de>
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * *****************************************************************
+ *
+ * The documentation for this device is noted in the MPC8544 documentation,
+ * file name "MPC8544ERM.pdf". You can easily find it on the web.
+ *
+ */
+
+#include "hw/hw.h"
+#include "sysemu/sysemu.h"
+#include "hw/sysbus.h"
+
+#define MPC8544_GUTS_MMIO_SIZE 0x1000
+#define MPC8544_GUTS_RSTCR_RESET 0x02
+
+#define MPC8544_GUTS_ADDR_PORPLLSR 0x00
+#define MPC8544_GUTS_ADDR_PORBMSR 0x04
+#define MPC8544_GUTS_ADDR_PORIMPSCR 0x08
+#define MPC8544_GUTS_ADDR_PORDEVSR 0x0C
+#define MPC8544_GUTS_ADDR_PORDBGMSR 0x10
+#define MPC8544_GUTS_ADDR_PORDEVSR2 0x14
+#define MPC8544_GUTS_ADDR_GPPORCR 0x20
+#define MPC8544_GUTS_ADDR_GPIOCR 0x30
+#define MPC8544_GUTS_ADDR_GPOUTDR 0x40
+#define MPC8544_GUTS_ADDR_GPINDR 0x50
+#define MPC8544_GUTS_ADDR_PMUXCR 0x60
+#define MPC8544_GUTS_ADDR_DEVDISR 0x70
+#define MPC8544_GUTS_ADDR_POWMGTCSR 0x80
+#define MPC8544_GUTS_ADDR_MCPSUMR 0x90
+#define MPC8544_GUTS_ADDR_RSTRSCR 0x94
+#define MPC8544_GUTS_ADDR_PVR 0xA0
+#define MPC8544_GUTS_ADDR_SVR 0xA4
+#define MPC8544_GUTS_ADDR_RSTCR 0xB0
+#define MPC8544_GUTS_ADDR_IOVSELSR 0xC0
+#define MPC8544_GUTS_ADDR_DDRCSR 0xB20
+#define MPC8544_GUTS_ADDR_DDRCDR 0xB24
+#define MPC8544_GUTS_ADDR_DDRCLKDR 0xB28
+#define MPC8544_GUTS_ADDR_CLKOCR 0xE00
+#define MPC8544_GUTS_ADDR_SRDS1CR1 0xF04
+#define MPC8544_GUTS_ADDR_SRDS2CR1 0xF10
+#define MPC8544_GUTS_ADDR_SRDS2CR3 0xF18
+
+#define TYPE_MPC8544_GUTS "mpc8544-guts"
+#define MPC8544_GUTS(obj) OBJECT_CHECK(GutsState, (obj), TYPE_MPC8544_GUTS)
+
+struct GutsState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ /*< public >*/
+
+ MemoryRegion iomem;
+};
+
+typedef struct GutsState GutsState;
+
+static uint64_t mpc8544_guts_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ uint32_t value = 0;
+ PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
+ CPUPPCState *env = &cpu->env;
+
+ addr &= MPC8544_GUTS_MMIO_SIZE - 1;
+ switch (addr) {
+ case MPC8544_GUTS_ADDR_PVR:
+ value = env->spr[SPR_PVR];
+ break;
+ case MPC8544_GUTS_ADDR_SVR:
+ value = env->spr[SPR_E500_SVR];
+ break;
+ default:
+ fprintf(stderr, "guts: Unknown register read: %x\n", (int)addr);
+ break;
+ }
+
+ return value;
+}
+
+static void mpc8544_guts_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ addr &= MPC8544_GUTS_MMIO_SIZE - 1;
+
+ switch (addr) {
+ case MPC8544_GUTS_ADDR_RSTCR:
+ if (value & MPC8544_GUTS_RSTCR_RESET) {
+ qemu_system_reset_request();
+ }
+ break;
+ default:
+ fprintf(stderr, "guts: Unknown register write: %x = %x\n",
+ (int)addr, (unsigned)value);
+ break;
+ }
+}
+
+static const MemoryRegionOps mpc8544_guts_ops = {
+ .read = mpc8544_guts_read,
+ .write = mpc8544_guts_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
+static void mpc8544_guts_initfn(Object *obj)
+{
+ SysBusDevice *d = SYS_BUS_DEVICE(obj);
+ GutsState *s = MPC8544_GUTS(obj);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &mpc8544_guts_ops, s,
+ "mpc8544.guts", MPC8544_GUTS_MMIO_SIZE);
+ sysbus_init_mmio(d, &s->iomem);
+}
+
+static const TypeInfo mpc8544_guts_info = {
+ .name = TYPE_MPC8544_GUTS,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(GutsState),
+ .instance_init = mpc8544_guts_initfn,
+};
+
+static void mpc8544_guts_register_types(void)
+{
+ type_register_static(&mpc8544_guts_info);
+}
+
+type_init(mpc8544_guts_register_types)
diff --git a/src/hw/ppc/mpc8544ds.c b/src/hw/ppc/mpc8544ds.c
new file mode 100644
index 0000000..0afbd34
--- /dev/null
+++ b/src/hw/ppc/mpc8544ds.c
@@ -0,0 +1,60 @@
+/*
+ * Support for the PPC e500-based mpc8544ds board
+ *
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "config.h"
+#include "qemu-common.h"
+#include "e500.h"
+#include "hw/boards.h"
+#include "sysemu/device_tree.h"
+#include "hw/ppc/openpic.h"
+#include "qemu/error-report.h"
+
+static void mpc8544ds_fixup_devtree(PPCE500Params *params, void *fdt)
+{
+ const char model[] = "MPC8544DS";
+ const char compatible[] = "MPC8544DS\0MPC85xxDS";
+
+ qemu_fdt_setprop(fdt, "/", "model", model, sizeof(model));
+ qemu_fdt_setprop(fdt, "/", "compatible", compatible,
+ sizeof(compatible));
+}
+
+static void mpc8544ds_init(MachineState *machine)
+{
+ PPCE500Params params = {
+ .pci_first_slot = 0x11,
+ .pci_nr_slots = 2,
+ .fixup_devtree = mpc8544ds_fixup_devtree,
+ .mpic_version = OPENPIC_MODEL_FSL_MPIC_20,
+ .ccsrbar_base = 0xE0000000ULL,
+ .pci_mmio_base = 0xC0000000ULL,
+ .pci_mmio_bus_base = 0xC0000000ULL,
+ .pci_pio_base = 0xE1000000ULL,
+ .spin_base = 0xEF000000ULL,
+ };
+
+ if (machine->ram_size > 0xc0000000) {
+ error_report("The MPC8544DS board only supports up to 3GB of RAM");
+ exit(1);
+ }
+
+ ppce500_init(machine, &params);
+}
+
+
+static void ppce500_machine_init(MachineClass *mc)
+{
+ mc->desc = "mpc8544ds";
+ mc->init = mpc8544ds_init;
+ mc->max_cpus = 15;
+}
+
+DEFINE_MACHINE("mpc8544ds", ppce500_machine_init)
diff --git a/src/hw/ppc/ppc.c b/src/hw/ppc/ppc.c
new file mode 100644
index 0000000..2c604ef
--- /dev/null
+++ b/src/hw/ppc/ppc.c
@@ -0,0 +1,1339 @@
+/*
+ * QEMU generic PowerPC hardware System Emulator
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/ppc.h"
+#include "hw/ppc/ppc_e500.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/cpus.h"
+#include "hw/timer/m48t59.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include "hw/loader.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "trace.h"
+
+//#define PPC_DEBUG_IRQ
+//#define PPC_DEBUG_TB
+
+#ifdef PPC_DEBUG_IRQ
+# define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
+#else
+# define LOG_IRQ(...) do { } while (0)
+#endif
+
+
+#ifdef PPC_DEBUG_TB
+# define LOG_TB(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_TB(...) do { } while (0)
+#endif
+
+static void cpu_ppc_tb_stop (CPUPPCState *env);
+static void cpu_ppc_tb_start (CPUPPCState *env);
+
+void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ unsigned int old_pending = env->pending_interrupts;
+
+ if (level) {
+ env->pending_interrupts |= 1 << n_IRQ;
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ env->pending_interrupts &= ~(1 << n_IRQ);
+ if (env->pending_interrupts == 0) {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ }
+
+ if (old_pending != env->pending_interrupts) {
+#ifdef CONFIG_KVM
+ kvmppc_set_interrupt(cpu, n_IRQ, level);
+#endif
+ }
+
+ LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32
+ "req %08x\n", __func__, env, n_IRQ, level,
+ env->pending_interrupts, CPU(cpu)->interrupt_request);
+}
+
+/* PowerPC 6xx / 7xx internal IRQ controller */
+static void ppc6xx_set_irq(void *opaque, int pin, int level)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int cur_level;
+
+ LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
+ env, pin, level);
+ cur_level = (env->irq_input_state >> pin) & 1;
+ /* Don't generate spurious events */
+ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
+ CPUState *cs = CPU(cpu);
+
+ switch (pin) {
+ case PPC6xx_INPUT_TBEN:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: %s the time base\n",
+ __func__, level ? "start" : "stop");
+ if (level) {
+ cpu_ppc_tb_start(env);
+ } else {
+ cpu_ppc_tb_stop(env);
+ }
+ case PPC6xx_INPUT_INT:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the external IRQ state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
+ break;
+ case PPC6xx_INPUT_SMI:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the SMI IRQ state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
+ break;
+ case PPC6xx_INPUT_MCP:
+ /* Negative edge sensitive */
+ /* XXX: TODO: actual reaction may depends on HID0 status
+ * 603/604/740/750: check HID0[EMCP]
+ */
+ if (cur_level == 1 && level == 0) {
+ LOG_IRQ("%s: raise machine check state\n",
+ __func__);
+ ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
+ }
+ break;
+ case PPC6xx_INPUT_CKSTP_IN:
+ /* Level sensitive - active low */
+ /* XXX: TODO: relay the signal to CKSTP_OUT pin */
+ /* XXX: Note that the only way to restart the CPU is to reset it */
+ if (level) {
+ LOG_IRQ("%s: stop the CPU\n", __func__);
+ cs->halted = 1;
+ }
+ break;
+ case PPC6xx_INPUT_HRESET:
+ /* Level sensitive - active low */
+ if (level) {
+ LOG_IRQ("%s: reset the CPU\n", __func__);
+ cpu_interrupt(cs, CPU_INTERRUPT_RESET);
+ }
+ break;
+ case PPC6xx_INPUT_SRESET:
+ LOG_IRQ("%s: set the RESET IRQ state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
+ break;
+ default:
+ /* Unknown pin - do nothing */
+ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
+ return;
+ }
+ if (level)
+ env->irq_input_state |= 1 << pin;
+ else
+ env->irq_input_state &= ~(1 << pin);
+ }
+}
+
+void ppc6xx_irq_init(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu,
+ PPC6xx_INPUT_NB);
+}
+
+#if defined(TARGET_PPC64)
+/* PowerPC 970 internal IRQ controller */
+static void ppc970_set_irq(void *opaque, int pin, int level)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int cur_level;
+
+ LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
+ env, pin, level);
+ cur_level = (env->irq_input_state >> pin) & 1;
+ /* Don't generate spurious events */
+ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
+ CPUState *cs = CPU(cpu);
+
+ switch (pin) {
+ case PPC970_INPUT_INT:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the external IRQ state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
+ break;
+ case PPC970_INPUT_THINT:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__,
+ level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
+ break;
+ case PPC970_INPUT_MCP:
+ /* Negative edge sensitive */
+ /* XXX: TODO: actual reaction may depends on HID0 status
+ * 603/604/740/750: check HID0[EMCP]
+ */
+ if (cur_level == 1 && level == 0) {
+ LOG_IRQ("%s: raise machine check state\n",
+ __func__);
+ ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
+ }
+ break;
+ case PPC970_INPUT_CKSTP:
+ /* Level sensitive - active low */
+ /* XXX: TODO: relay the signal to CKSTP_OUT pin */
+ if (level) {
+ LOG_IRQ("%s: stop the CPU\n", __func__);
+ cs->halted = 1;
+ } else {
+ LOG_IRQ("%s: restart the CPU\n", __func__);
+ cs->halted = 0;
+ qemu_cpu_kick(cs);
+ }
+ break;
+ case PPC970_INPUT_HRESET:
+ /* Level sensitive - active low */
+ if (level) {
+ cpu_interrupt(cs, CPU_INTERRUPT_RESET);
+ }
+ break;
+ case PPC970_INPUT_SRESET:
+ LOG_IRQ("%s: set the RESET IRQ state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
+ break;
+ case PPC970_INPUT_TBEN:
+ LOG_IRQ("%s: set the TBEN state to %d\n", __func__,
+ level);
+ /* XXX: TODO */
+ break;
+ default:
+ /* Unknown pin - do nothing */
+ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
+ return;
+ }
+ if (level)
+ env->irq_input_state |= 1 << pin;
+ else
+ env->irq_input_state &= ~(1 << pin);
+ }
+}
+
+void ppc970_irq_init(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu,
+ PPC970_INPUT_NB);
+}
+
+/* POWER7 internal IRQ controller */
+static void power7_set_irq(void *opaque, int pin, int level)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
+ env, pin, level);
+
+ switch (pin) {
+ case POWER7_INPUT_INT:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the external IRQ state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
+ break;
+ default:
+ /* Unknown pin - do nothing */
+ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
+ return;
+ }
+ if (level) {
+ env->irq_input_state |= 1 << pin;
+ } else {
+ env->irq_input_state &= ~(1 << pin);
+ }
+}
+
+void ppcPOWER7_irq_init(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu,
+ POWER7_INPUT_NB);
+}
+#endif /* defined(TARGET_PPC64) */
+
+/* PowerPC 40x internal IRQ controller */
+static void ppc40x_set_irq(void *opaque, int pin, int level)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int cur_level;
+
+ LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
+ env, pin, level);
+ cur_level = (env->irq_input_state >> pin) & 1;
+ /* Don't generate spurious events */
+ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
+ CPUState *cs = CPU(cpu);
+
+ switch (pin) {
+ case PPC40x_INPUT_RESET_SYS:
+ if (level) {
+ LOG_IRQ("%s: reset the PowerPC system\n",
+ __func__);
+ ppc40x_system_reset(cpu);
+ }
+ break;
+ case PPC40x_INPUT_RESET_CHIP:
+ if (level) {
+ LOG_IRQ("%s: reset the PowerPC chip\n", __func__);
+ ppc40x_chip_reset(cpu);
+ }
+ break;
+ case PPC40x_INPUT_RESET_CORE:
+ /* XXX: TODO: update DBSR[MRR] */
+ if (level) {
+ LOG_IRQ("%s: reset the PowerPC core\n", __func__);
+ ppc40x_core_reset(cpu);
+ }
+ break;
+ case PPC40x_INPUT_CINT:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the critical IRQ state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
+ break;
+ case PPC40x_INPUT_INT:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the external IRQ state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
+ break;
+ case PPC40x_INPUT_HALT:
+ /* Level sensitive - active low */
+ if (level) {
+ LOG_IRQ("%s: stop the CPU\n", __func__);
+ cs->halted = 1;
+ } else {
+ LOG_IRQ("%s: restart the CPU\n", __func__);
+ cs->halted = 0;
+ qemu_cpu_kick(cs);
+ }
+ break;
+ case PPC40x_INPUT_DEBUG:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the debug pin state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
+ break;
+ default:
+ /* Unknown pin - do nothing */
+ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
+ return;
+ }
+ if (level)
+ env->irq_input_state |= 1 << pin;
+ else
+ env->irq_input_state &= ~(1 << pin);
+ }
+}
+
+void ppc40x_irq_init(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq,
+ cpu, PPC40x_INPUT_NB);
+}
+
+/* PowerPC E500 internal IRQ controller */
+static void ppce500_set_irq(void *opaque, int pin, int level)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int cur_level;
+
+ LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
+ env, pin, level);
+ cur_level = (env->irq_input_state >> pin) & 1;
+ /* Don't generate spurious events */
+ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
+ switch (pin) {
+ case PPCE500_INPUT_MCK:
+ if (level) {
+ LOG_IRQ("%s: reset the PowerPC system\n",
+ __func__);
+ qemu_system_reset_request();
+ }
+ break;
+ case PPCE500_INPUT_RESET_CORE:
+ if (level) {
+ LOG_IRQ("%s: reset the PowerPC core\n", __func__);
+ ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
+ }
+ break;
+ case PPCE500_INPUT_CINT:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the critical IRQ state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
+ break;
+ case PPCE500_INPUT_INT:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the core IRQ state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
+ break;
+ case PPCE500_INPUT_DEBUG:
+ /* Level sensitive - active high */
+ LOG_IRQ("%s: set the debug pin state to %d\n",
+ __func__, level);
+ ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
+ break;
+ default:
+ /* Unknown pin - do nothing */
+ LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
+ return;
+ }
+ if (level)
+ env->irq_input_state |= 1 << pin;
+ else
+ env->irq_input_state &= ~(1 << pin);
+ }
+}
+
+void ppce500_irq_init(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq,
+ cpu, PPCE500_INPUT_NB);
+}
+
+/* Enable or Disable the E500 EPR capability */
+void ppce500_set_mpic_proxy(bool enabled)
+{
+ CPUState *cs;
+
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ cpu->env.mpic_proxy = enabled;
+ if (kvm_enabled()) {
+ kvmppc_set_mpic_proxy(cpu, enabled);
+ }
+ }
+}
+
+/*****************************************************************************/
+/* PowerPC time base and decrementer emulation */
+
+uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
+{
+ /* TB time in tb periods */
+ return muldiv64(vmclk, tb_env->tb_freq, get_ticks_per_sec()) + tb_offset;
+}
+
+uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t tb;
+
+ if (kvm_enabled()) {
+ return env->spr[SPR_TBL];
+ }
+
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
+ LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
+
+ return tb;
+}
+
+static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t tb;
+
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
+ LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
+
+ return tb >> 32;
+}
+
+uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
+{
+ if (kvm_enabled()) {
+ return env->spr[SPR_TBU];
+ }
+
+ return _cpu_ppc_load_tbu(env);
+}
+
+static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
+ int64_t *tb_offsetp, uint64_t value)
+{
+ *tb_offsetp = value - muldiv64(vmclk, tb_env->tb_freq, get_ticks_per_sec());
+ LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n",
+ __func__, value, *tb_offsetp);
+}
+
+void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t tb;
+
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
+ tb &= 0xFFFFFFFF00000000ULL;
+ cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ &tb_env->tb_offset, tb | (uint64_t)value);
+}
+
+static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t tb;
+
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
+ tb &= 0x00000000FFFFFFFFULL;
+ cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
+}
+
+void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
+{
+ _cpu_ppc_store_tbu(env, value);
+}
+
+uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t tb;
+
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
+ LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
+
+ return tb;
+}
+
+uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t tb;
+
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
+ LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
+
+ return tb >> 32;
+}
+
+void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t tb;
+
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
+ tb &= 0xFFFFFFFF00000000ULL;
+ cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ &tb_env->atb_offset, tb | (uint64_t)value);
+}
+
+void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t tb;
+
+ tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
+ tb &= 0x00000000FFFFFFFFULL;
+ cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
+}
+
+static void cpu_ppc_tb_stop (CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t tb, atb, vmclk;
+
+ /* If the time base is already frozen, do nothing */
+ if (tb_env->tb_freq != 0) {
+ vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ /* Get the time base */
+ tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
+ /* Get the alternate time base */
+ atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
+ /* Store the time base value (ie compute the current offset) */
+ cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
+ /* Store the alternate time base value (compute the current offset) */
+ cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
+ /* Set the time base frequency to zero */
+ tb_env->tb_freq = 0;
+ /* Now, the time bases are frozen to tb_offset / atb_offset value */
+ }
+}
+
+static void cpu_ppc_tb_start (CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t tb, atb, vmclk;
+
+ /* If the time base is not frozen, do nothing */
+ if (tb_env->tb_freq == 0) {
+ vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ /* Get the time base from tb_offset */
+ tb = tb_env->tb_offset;
+ /* Get the alternate time base from atb_offset */
+ atb = tb_env->atb_offset;
+ /* Restore the tb frequency from the decrementer frequency */
+ tb_env->tb_freq = tb_env->decr_freq;
+ /* Store the time base value */
+ cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
+ /* Store the alternate time base value */
+ cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
+ }
+}
+
+bool ppc_decr_clear_on_delivery(CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
+ return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
+}
+
+static inline uint32_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint32_t decr;
+ int64_t diff;
+
+ diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ if (diff >= 0) {
+ decr = muldiv64(diff, tb_env->decr_freq, get_ticks_per_sec());
+ } else if (tb_env->flags & PPC_TIMER_BOOKE) {
+ decr = 0;
+ } else {
+ decr = -muldiv64(-diff, tb_env->decr_freq, get_ticks_per_sec());
+ }
+ LOG_TB("%s: %08" PRIx32 "\n", __func__, decr);
+
+ return decr;
+}
+
+uint32_t cpu_ppc_load_decr (CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+
+ if (kvm_enabled()) {
+ return env->spr[SPR_DECR];
+ }
+
+ return _cpu_ppc_load_decr(env, tb_env->decr_next);
+}
+
+uint32_t cpu_ppc_load_hdecr (CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+
+ return _cpu_ppc_load_decr(env, tb_env->hdecr_next);
+}
+
+uint64_t cpu_ppc_load_purr (CPUPPCState *env)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t diff;
+
+ diff = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - tb_env->purr_start;
+
+ return tb_env->purr_load + muldiv64(diff, tb_env->tb_freq, get_ticks_per_sec());
+}
+
+/* When decrementer expires,
+ * all we need to do is generate or queue a CPU exception
+ */
+static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
+{
+ /* Raise it */
+ LOG_TB("raise decrementer exception\n");
+ ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
+}
+
+static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
+{
+ ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
+}
+
+static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
+{
+ /* Raise it */
+ LOG_TB("raise decrementer exception\n");
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
+}
+
+static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
+{
+ ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
+}
+
+static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
+ QEMUTimer *timer,
+ void (*raise_excp)(void *),
+ void (*lower_excp)(PowerPCCPU *),
+ uint32_t decr, uint32_t value)
+{
+ CPUPPCState *env = &cpu->env;
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t now, next;
+
+ LOG_TB("%s: %08" PRIx32 " => %08" PRIx32 "\n", __func__,
+ decr, value);
+
+ if (kvm_enabled()) {
+ /* KVM handles decrementer exceptions, we don't need our own timer */
+ return;
+ }
+
+ /*
+ * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
+ * interrupt.
+ *
+ * If we get a really small DEC value, we can assume that by the time we
+ * handled it we should inject an interrupt already.
+ *
+ * On MSB level based DEC implementations the MSB always means the interrupt
+ * is pending, so raise it on those.
+ *
+ * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
+ * an edge interrupt, so raise it here too.
+ */
+ if ((value < 3) ||
+ ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && (value & 0x80000000)) ||
+ ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && (value & 0x80000000)
+ && !(decr & 0x80000000))) {
+ (*raise_excp)(cpu);
+ return;
+ }
+
+ /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
+ if (!(value & 0x80000000) && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
+ (*lower_excp)(cpu);
+ }
+
+ /* Calculate the next timer event */
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ next = now + muldiv64(value, get_ticks_per_sec(), tb_env->decr_freq);
+ *nextp = next;
+
+ /* Adjust timer */
+ timer_mod(timer, next);
+}
+
+static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, uint32_t decr,
+ uint32_t value)
+{
+ ppc_tb_t *tb_env = cpu->env.tb_env;
+
+ __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
+ tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
+ value);
+}
+
+void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value);
+}
+
+static void cpu_ppc_decr_cb(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ cpu_ppc_decr_excp(cpu);
+}
+
+static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, uint32_t hdecr,
+ uint32_t value)
+{
+ ppc_tb_t *tb_env = cpu->env.tb_env;
+
+ if (tb_env->hdecr_timer != NULL) {
+ __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
+ tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
+ hdecr, value);
+ }
+}
+
+void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value);
+}
+
+static void cpu_ppc_hdecr_cb(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ cpu_ppc_hdecr_excp(cpu);
+}
+
+static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
+{
+ ppc_tb_t *tb_env = cpu->env.tb_env;
+
+ tb_env->purr_load = value;
+ tb_env->purr_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+}
+
+static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
+{
+ CPUPPCState *env = opaque;
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ ppc_tb_t *tb_env = env->tb_env;
+
+ tb_env->tb_freq = freq;
+ tb_env->decr_freq = freq;
+ /* There is a bug in Linux 2.4 kernels:
+ * if a decrementer exception is pending when it enables msr_ee at startup,
+ * it's not ready to handle it...
+ */
+ _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
+ _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
+ cpu_ppc_store_purr(cpu, 0x0000000000000000ULL);
+}
+
+static void timebase_pre_save(void *opaque)
+{
+ PPCTimebase *tb = opaque;
+ uint64_t ticks = cpu_get_host_ticks();
+ PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
+
+ if (!first_ppc_cpu->env.tb_env) {
+ error_report("No timebase object");
+ return;
+ }
+
+ tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
+ /*
+ * tb_offset is only expected to be changed by migration so
+ * there is no need to update it from KVM here
+ */
+ tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
+}
+
+static int timebase_post_load(void *opaque, int version_id)
+{
+ PPCTimebase *tb_remote = opaque;
+ CPUState *cpu;
+ PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
+ int64_t tb_off_adj, tb_off, ns_diff;
+ int64_t migration_duration_ns, migration_duration_tb, guest_tb, host_ns;
+ unsigned long freq;
+
+ if (!first_ppc_cpu->env.tb_env) {
+ error_report("No timebase object");
+ return -1;
+ }
+
+ freq = first_ppc_cpu->env.tb_env->tb_freq;
+ /*
+ * Calculate timebase on the destination side of migration.
+ * The destination timebase must be not less than the source timebase.
+ * We try to adjust timebase by downtime if host clocks are not
+ * too much out of sync (1 second for now).
+ */
+ host_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
+ ns_diff = MAX(0, host_ns - tb_remote->time_of_the_day_ns);
+ migration_duration_ns = MIN(NANOSECONDS_PER_SECOND, ns_diff);
+ migration_duration_tb = muldiv64(migration_duration_ns, freq,
+ NANOSECONDS_PER_SECOND);
+ guest_tb = tb_remote->guest_timebase + MIN(0, migration_duration_tb);
+
+ tb_off_adj = guest_tb - cpu_get_host_ticks();
+
+ tb_off = first_ppc_cpu->env.tb_env->tb_offset;
+ trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
+ (tb_off_adj - tb_off) / freq);
+
+ /* Set new offset to all CPUs */
+ CPU_FOREACH(cpu) {
+ PowerPCCPU *pcpu = POWERPC_CPU(cpu);
+ pcpu->env.tb_env->tb_offset = tb_off_adj;
+ }
+
+ return 0;
+}
+
+const VMStateDescription vmstate_ppc_timebase = {
+ .name = "timebase",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .pre_save = timebase_pre_save,
+ .post_load = timebase_post_load,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT64(guest_timebase, PPCTimebase),
+ VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+/* Set up (once) timebase frequency (in Hz) */
+clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ ppc_tb_t *tb_env;
+
+ tb_env = g_malloc0(sizeof(ppc_tb_t));
+ env->tb_env = tb_env;
+ tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
+ if (env->insns_flags & PPC_SEGMENT_64B) {
+ /* All Book3S 64bit CPUs implement level based DEC logic */
+ tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
+ }
+ /* Create new timer */
+ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
+ if (0) {
+ /* XXX: find a suitable condition to enable the hypervisor decrementer
+ */
+ tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
+ cpu);
+ } else {
+ tb_env->hdecr_timer = NULL;
+ }
+ cpu_ppc_set_tb_clk(env, freq);
+
+ return &cpu_ppc_set_tb_clk;
+}
+
+/* Specific helpers for POWER & PowerPC 601 RTC */
+#if 0
+static clk_setup_cb cpu_ppc601_rtc_init (CPUPPCState *env)
+{
+ return cpu_ppc_tb_init(env, 7812500);
+}
+#endif
+
+void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value)
+{
+ _cpu_ppc_store_tbu(env, value);
+}
+
+uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env)
+{
+ return _cpu_ppc_load_tbu(env);
+}
+
+void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value)
+{
+ cpu_ppc_store_tbl(env, value & 0x3FFFFF80);
+}
+
+uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env)
+{
+ return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
+}
+
+/*****************************************************************************/
+/* PowerPC 40x timers */
+
+/* PIT, FIT & WDT */
+typedef struct ppc40x_timer_t ppc40x_timer_t;
+struct ppc40x_timer_t {
+ uint64_t pit_reload; /* PIT auto-reload value */
+ uint64_t fit_next; /* Tick for next FIT interrupt */
+ QEMUTimer *fit_timer;
+ uint64_t wdt_next; /* Tick for next WDT interrupt */
+ QEMUTimer *wdt_timer;
+
+ /* 405 have the PIT, 440 have a DECR. */
+ unsigned int decr_excp;
+};
+
+/* Fixed interval timer */
+static void cpu_4xx_fit_cb (void *opaque)
+{
+ PowerPCCPU *cpu;
+ CPUPPCState *env;
+ ppc_tb_t *tb_env;
+ ppc40x_timer_t *ppc40x_timer;
+ uint64_t now, next;
+
+ env = opaque;
+ cpu = ppc_env_get_cpu(env);
+ tb_env = env->tb_env;
+ ppc40x_timer = tb_env->opaque;
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
+ case 0:
+ next = 1 << 9;
+ break;
+ case 1:
+ next = 1 << 13;
+ break;
+ case 2:
+ next = 1 << 17;
+ break;
+ case 3:
+ next = 1 << 21;
+ break;
+ default:
+ /* Cannot occur, but makes gcc happy */
+ return;
+ }
+ next = now + muldiv64(next, get_ticks_per_sec(), tb_env->tb_freq);
+ if (next == now)
+ next++;
+ timer_mod(ppc40x_timer->fit_timer, next);
+ env->spr[SPR_40x_TSR] |= 1 << 26;
+ if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
+ }
+ LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
+ (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
+ env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
+}
+
+/* Programmable interval timer */
+static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
+{
+ ppc40x_timer_t *ppc40x_timer;
+ uint64_t now, next;
+
+ ppc40x_timer = tb_env->opaque;
+ if (ppc40x_timer->pit_reload <= 1 ||
+ !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
+ (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
+ /* Stop PIT */
+ LOG_TB("%s: stop PIT\n", __func__);
+ timer_del(tb_env->decr_timer);
+ } else {
+ LOG_TB("%s: start PIT %016" PRIx64 "\n",
+ __func__, ppc40x_timer->pit_reload);
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ next = now + muldiv64(ppc40x_timer->pit_reload,
+ get_ticks_per_sec(), tb_env->decr_freq);
+ if (is_excp)
+ next += tb_env->decr_next - now;
+ if (next == now)
+ next++;
+ timer_mod(tb_env->decr_timer, next);
+ tb_env->decr_next = next;
+ }
+}
+
+static void cpu_4xx_pit_cb (void *opaque)
+{
+ PowerPCCPU *cpu;
+ CPUPPCState *env;
+ ppc_tb_t *tb_env;
+ ppc40x_timer_t *ppc40x_timer;
+
+ env = opaque;
+ cpu = ppc_env_get_cpu(env);
+ tb_env = env->tb_env;
+ ppc40x_timer = tb_env->opaque;
+ env->spr[SPR_40x_TSR] |= 1 << 27;
+ if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
+ ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
+ }
+ start_stop_pit(env, tb_env, 1);
+ LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " "
+ "%016" PRIx64 "\n", __func__,
+ (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
+ (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
+ env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
+ ppc40x_timer->pit_reload);
+}
+
+/* Watchdog timer */
+static void cpu_4xx_wdt_cb (void *opaque)
+{
+ PowerPCCPU *cpu;
+ CPUPPCState *env;
+ ppc_tb_t *tb_env;
+ ppc40x_timer_t *ppc40x_timer;
+ uint64_t now, next;
+
+ env = opaque;
+ cpu = ppc_env_get_cpu(env);
+ tb_env = env->tb_env;
+ ppc40x_timer = tb_env->opaque;
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
+ case 0:
+ next = 1 << 17;
+ break;
+ case 1:
+ next = 1 << 21;
+ break;
+ case 2:
+ next = 1 << 25;
+ break;
+ case 3:
+ next = 1 << 29;
+ break;
+ default:
+ /* Cannot occur, but makes gcc happy */
+ return;
+ }
+ next = now + muldiv64(next, get_ticks_per_sec(), tb_env->decr_freq);
+ if (next == now)
+ next++;
+ LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
+ env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
+ switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
+ case 0x0:
+ case 0x1:
+ timer_mod(ppc40x_timer->wdt_timer, next);
+ ppc40x_timer->wdt_next = next;
+ env->spr[SPR_40x_TSR] |= 1U << 31;
+ break;
+ case 0x2:
+ timer_mod(ppc40x_timer->wdt_timer, next);
+ ppc40x_timer->wdt_next = next;
+ env->spr[SPR_40x_TSR] |= 1 << 30;
+ if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
+ }
+ break;
+ case 0x3:
+ env->spr[SPR_40x_TSR] &= ~0x30000000;
+ env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
+ switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
+ case 0x0:
+ /* No reset */
+ break;
+ case 0x1: /* Core reset */
+ ppc40x_core_reset(cpu);
+ break;
+ case 0x2: /* Chip reset */
+ ppc40x_chip_reset(cpu);
+ break;
+ case 0x3: /* System reset */
+ ppc40x_system_reset(cpu);
+ break;
+ }
+ }
+}
+
+void store_40x_pit (CPUPPCState *env, target_ulong val)
+{
+ ppc_tb_t *tb_env;
+ ppc40x_timer_t *ppc40x_timer;
+
+ tb_env = env->tb_env;
+ ppc40x_timer = tb_env->opaque;
+ LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val);
+ ppc40x_timer->pit_reload = val;
+ start_stop_pit(env, tb_env, 0);
+}
+
+target_ulong load_40x_pit (CPUPPCState *env)
+{
+ return cpu_ppc_load_decr(env);
+}
+
+static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
+{
+ CPUPPCState *env = opaque;
+ ppc_tb_t *tb_env = env->tb_env;
+
+ LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__,
+ freq);
+ tb_env->tb_freq = freq;
+ tb_env->decr_freq = freq;
+ /* XXX: we should also update all timers */
+}
+
+clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
+ unsigned int decr_excp)
+{
+ ppc_tb_t *tb_env;
+ ppc40x_timer_t *ppc40x_timer;
+
+ tb_env = g_malloc0(sizeof(ppc_tb_t));
+ env->tb_env = tb_env;
+ tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
+ ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t));
+ tb_env->tb_freq = freq;
+ tb_env->decr_freq = freq;
+ tb_env->opaque = ppc40x_timer;
+ LOG_TB("%s freq %" PRIu32 "\n", __func__, freq);
+ if (ppc40x_timer != NULL) {
+ /* We use decr timer for PIT */
+ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env);
+ ppc40x_timer->fit_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env);
+ ppc40x_timer->wdt_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env);
+ ppc40x_timer->decr_excp = decr_excp;
+ }
+
+ return &ppc_40x_set_tb_clk;
+}
+
+/*****************************************************************************/
+/* Embedded PowerPC Device Control Registers */
+typedef struct ppc_dcrn_t ppc_dcrn_t;
+struct ppc_dcrn_t {
+ dcr_read_cb dcr_read;
+ dcr_write_cb dcr_write;
+ void *opaque;
+};
+
+/* XXX: on 460, DCR addresses are 32 bits wide,
+ * using DCRIPR to get the 22 upper bits of the DCR address
+ */
+#define DCRN_NB 1024
+struct ppc_dcr_t {
+ ppc_dcrn_t dcrn[DCRN_NB];
+ int (*read_error)(int dcrn);
+ int (*write_error)(int dcrn);
+};
+
+int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
+{
+ ppc_dcrn_t *dcr;
+
+ if (dcrn < 0 || dcrn >= DCRN_NB)
+ goto error;
+ dcr = &dcr_env->dcrn[dcrn];
+ if (dcr->dcr_read == NULL)
+ goto error;
+ *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
+
+ return 0;
+
+ error:
+ if (dcr_env->read_error != NULL)
+ return (*dcr_env->read_error)(dcrn);
+
+ return -1;
+}
+
+int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
+{
+ ppc_dcrn_t *dcr;
+
+ if (dcrn < 0 || dcrn >= DCRN_NB)
+ goto error;
+ dcr = &dcr_env->dcrn[dcrn];
+ if (dcr->dcr_write == NULL)
+ goto error;
+ (*dcr->dcr_write)(dcr->opaque, dcrn, val);
+
+ return 0;
+
+ error:
+ if (dcr_env->write_error != NULL)
+ return (*dcr_env->write_error)(dcrn);
+
+ return -1;
+}
+
+int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
+ dcr_read_cb dcr_read, dcr_write_cb dcr_write)
+{
+ ppc_dcr_t *dcr_env;
+ ppc_dcrn_t *dcr;
+
+ dcr_env = env->dcr_env;
+ if (dcr_env == NULL)
+ return -1;
+ if (dcrn < 0 || dcrn >= DCRN_NB)
+ return -1;
+ dcr = &dcr_env->dcrn[dcrn];
+ if (dcr->opaque != NULL ||
+ dcr->dcr_read != NULL ||
+ dcr->dcr_write != NULL)
+ return -1;
+ dcr->opaque = opaque;
+ dcr->dcr_read = dcr_read;
+ dcr->dcr_write = dcr_write;
+
+ return 0;
+}
+
+int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
+ int (*write_error)(int dcrn))
+{
+ ppc_dcr_t *dcr_env;
+
+ dcr_env = g_malloc0(sizeof(ppc_dcr_t));
+ dcr_env->read_error = read_error;
+ dcr_env->write_error = write_error;
+ env->dcr_env = dcr_env;
+
+ return 0;
+}
+
+/*****************************************************************************/
+/* Debug port */
+void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val)
+{
+ addr &= 0xF;
+ switch (addr) {
+ case 0:
+ printf("%c", val);
+ break;
+ case 1:
+ printf("\n");
+ fflush(stdout);
+ break;
+ case 2:
+ printf("Set loglevel to %04" PRIx32 "\n", val);
+ qemu_set_log(val | 0x100);
+ break;
+ }
+}
+
+/* CPU device-tree ID helpers */
+int ppc_get_vcpu_dt_id(PowerPCCPU *cpu)
+{
+ return cpu->cpu_dt_id;
+}
+
+PowerPCCPU *ppc_get_vcpu_by_dt_id(int cpu_dt_id)
+{
+ CPUState *cs;
+
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ if (cpu->cpu_dt_id == cpu_dt_id) {
+ return cpu;
+ }
+ }
+
+ return NULL;
+}
diff --git a/src/hw/ppc/ppc405.h b/src/hw/ppc/ppc405.h
new file mode 100644
index 0000000..1c5f04f
--- /dev/null
+++ b/src/hw/ppc/ppc405.h
@@ -0,0 +1,81 @@
+/*
+ * QEMU PowerPC 405 shared definitions
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#if !defined(PPC_405_H)
+#define PPC_405_H
+
+#include "hw/ppc/ppc4xx.h"
+
+/* Bootinfo as set-up by u-boot */
+typedef struct ppc4xx_bd_info_t ppc4xx_bd_info_t;
+struct ppc4xx_bd_info_t {
+ uint32_t bi_memstart;
+ uint32_t bi_memsize;
+ uint32_t bi_flashstart;
+ uint32_t bi_flashsize;
+ uint32_t bi_flashoffset; /* 0x10 */
+ uint32_t bi_sramstart;
+ uint32_t bi_sramsize;
+ uint32_t bi_bootflags;
+ uint32_t bi_ipaddr; /* 0x20 */
+ uint8_t bi_enetaddr[6];
+ uint16_t bi_ethspeed;
+ uint32_t bi_intfreq;
+ uint32_t bi_busfreq; /* 0x30 */
+ uint32_t bi_baudrate;
+ uint8_t bi_s_version[4];
+ uint8_t bi_r_version[32];
+ uint32_t bi_procfreq;
+ uint32_t bi_plb_busfreq;
+ uint32_t bi_pci_busfreq;
+ uint8_t bi_pci_enetaddr[6];
+ uint32_t bi_pci_enetaddr2[6];
+ uint32_t bi_opbfreq;
+ uint32_t bi_iic_fast[2];
+};
+
+/* PowerPC 405 core */
+ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd,
+ uint32_t flags);
+
+CPUPPCState *ppc405cr_init(MemoryRegion *address_space_mem,
+ MemoryRegion ram_memories[4],
+ hwaddr ram_bases[4],
+ hwaddr ram_sizes[4],
+ uint32_t sysclk, qemu_irq **picp,
+ int do_init);
+CPUPPCState *ppc405ep_init(MemoryRegion *address_space_mem,
+ MemoryRegion ram_memories[2],
+ hwaddr ram_bases[2],
+ hwaddr ram_sizes[2],
+ uint32_t sysclk, qemu_irq **picp,
+ int do_init);
+/* IBM STBxxx microcontrollers */
+CPUPPCState *ppc_stb025_init (MemoryRegion ram_memories[2],
+ hwaddr ram_bases[2],
+ hwaddr ram_sizes[2],
+ uint32_t sysclk, qemu_irq **picp,
+ ram_addr_t *offsetp);
+
+#endif /* !defined(PPC_405_H) */
diff --git a/src/hw/ppc/ppc405_boards.c b/src/hw/ppc/ppc405_boards.c
new file mode 100644
index 0000000..31bc186
--- /dev/null
+++ b/src/hw/ppc/ppc405_boards.c
@@ -0,0 +1,660 @@
+/*
+ * QEMU PowerPC 405 evaluation boards emulation
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/ppc.h"
+#include "ppc405.h"
+#include "hw/timer/m48t59.h"
+#include "hw/block/flash.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/qtest.h"
+#include "sysemu/block-backend.h"
+#include "hw/boards.h"
+#include "qemu/log.h"
+#include "qemu/error-report.h"
+#include "hw/loader.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/blockdev.h"
+#include "exec/address-spaces.h"
+
+#define BIOS_FILENAME "ppc405_rom.bin"
+#define BIOS_SIZE (2048 * 1024)
+
+#define KERNEL_LOAD_ADDR 0x00000000
+#define INITRD_LOAD_ADDR 0x01800000
+
+#define USE_FLASH_BIOS
+
+//#define DEBUG_BOARD_INIT
+
+/*****************************************************************************/
+/* PPC405EP reference board (IBM) */
+/* Standalone board with:
+ * - PowerPC 405EP CPU
+ * - SDRAM (0x00000000)
+ * - Flash (0xFFF80000)
+ * - SRAM (0xFFF00000)
+ * - NVRAM (0xF0000000)
+ * - FPGA (0xF0300000)
+ */
+typedef struct ref405ep_fpga_t ref405ep_fpga_t;
+struct ref405ep_fpga_t {
+ uint8_t reg0;
+ uint8_t reg1;
+};
+
+static uint32_t ref405ep_fpga_readb (void *opaque, hwaddr addr)
+{
+ ref405ep_fpga_t *fpga;
+ uint32_t ret;
+
+ fpga = opaque;
+ switch (addr) {
+ case 0x0:
+ ret = fpga->reg0;
+ break;
+ case 0x1:
+ ret = fpga->reg1;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static void ref405ep_fpga_writeb (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+ ref405ep_fpga_t *fpga;
+
+ fpga = opaque;
+ switch (addr) {
+ case 0x0:
+ /* Read only */
+ break;
+ case 0x1:
+ fpga->reg1 = value;
+ break;
+ default:
+ break;
+ }
+}
+
+static uint32_t ref405ep_fpga_readw (void *opaque, hwaddr addr)
+{
+ uint32_t ret;
+
+ ret = ref405ep_fpga_readb(opaque, addr) << 8;
+ ret |= ref405ep_fpga_readb(opaque, addr + 1);
+
+ return ret;
+}
+
+static void ref405ep_fpga_writew (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+ ref405ep_fpga_writeb(opaque, addr, (value >> 8) & 0xFF);
+ ref405ep_fpga_writeb(opaque, addr + 1, value & 0xFF);
+}
+
+static uint32_t ref405ep_fpga_readl (void *opaque, hwaddr addr)
+{
+ uint32_t ret;
+
+ ret = ref405ep_fpga_readb(opaque, addr) << 24;
+ ret |= ref405ep_fpga_readb(opaque, addr + 1) << 16;
+ ret |= ref405ep_fpga_readb(opaque, addr + 2) << 8;
+ ret |= ref405ep_fpga_readb(opaque, addr + 3);
+
+ return ret;
+}
+
+static void ref405ep_fpga_writel (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+ ref405ep_fpga_writeb(opaque, addr, (value >> 24) & 0xFF);
+ ref405ep_fpga_writeb(opaque, addr + 1, (value >> 16) & 0xFF);
+ ref405ep_fpga_writeb(opaque, addr + 2, (value >> 8) & 0xFF);
+ ref405ep_fpga_writeb(opaque, addr + 3, value & 0xFF);
+}
+
+static const MemoryRegionOps ref405ep_fpga_ops = {
+ .old_mmio = {
+ .read = {
+ ref405ep_fpga_readb, ref405ep_fpga_readw, ref405ep_fpga_readl,
+ },
+ .write = {
+ ref405ep_fpga_writeb, ref405ep_fpga_writew, ref405ep_fpga_writel,
+ },
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void ref405ep_fpga_reset (void *opaque)
+{
+ ref405ep_fpga_t *fpga;
+
+ fpga = opaque;
+ fpga->reg0 = 0x00;
+ fpga->reg1 = 0x0F;
+}
+
+static void ref405ep_fpga_init(MemoryRegion *sysmem, uint32_t base)
+{
+ ref405ep_fpga_t *fpga;
+ MemoryRegion *fpga_memory = g_new(MemoryRegion, 1);
+
+ fpga = g_malloc0(sizeof(ref405ep_fpga_t));
+ memory_region_init_io(fpga_memory, NULL, &ref405ep_fpga_ops, fpga,
+ "fpga", 0x00000100);
+ memory_region_add_subregion(sysmem, base, fpga_memory);
+ qemu_register_reset(&ref405ep_fpga_reset, fpga);
+}
+
+static void ref405ep_init(MachineState *machine)
+{
+ ram_addr_t ram_size = machine->ram_size;
+ const char *kernel_filename = machine->kernel_filename;
+ const char *kernel_cmdline = machine->kernel_cmdline;
+ const char *initrd_filename = machine->initrd_filename;
+ char *filename;
+ ppc4xx_bd_info_t bd;
+ CPUPPCState *env;
+ qemu_irq *pic;
+ MemoryRegion *bios;
+ MemoryRegion *sram = g_new(MemoryRegion, 1);
+ ram_addr_t bdloc;
+ MemoryRegion *ram_memories = g_malloc(2 * sizeof(*ram_memories));
+ hwaddr ram_bases[2], ram_sizes[2];
+ target_ulong sram_size;
+ long bios_size;
+ //int phy_addr = 0;
+ //static int phy_addr = 1;
+ target_ulong kernel_base, initrd_base;
+ long kernel_size, initrd_size;
+ int linux_boot;
+ int fl_idx, fl_sectors, len;
+ DriveInfo *dinfo;
+ MemoryRegion *sysmem = get_system_memory();
+
+ /* XXX: fix this */
+ memory_region_allocate_system_memory(&ram_memories[0], NULL, "ef405ep.ram",
+ 0x08000000);
+ ram_bases[0] = 0;
+ ram_sizes[0] = 0x08000000;
+ memory_region_init(&ram_memories[1], NULL, "ef405ep.ram1", 0);
+ ram_bases[1] = 0x00000000;
+ ram_sizes[1] = 0x00000000;
+ ram_size = 128 * 1024 * 1024;
+#ifdef DEBUG_BOARD_INIT
+ printf("%s: register cpu\n", __func__);
+#endif
+ env = ppc405ep_init(sysmem, ram_memories, ram_bases, ram_sizes,
+ 33333333, &pic, kernel_filename == NULL ? 0 : 1);
+ /* allocate SRAM */
+ sram_size = 512 * 1024;
+ memory_region_init_ram(sram, NULL, "ef405ep.sram", sram_size,
+ &error_fatal);
+ vmstate_register_ram_global(sram);
+ memory_region_add_subregion(sysmem, 0xFFF00000, sram);
+ /* allocate and load BIOS */
+#ifdef DEBUG_BOARD_INIT
+ printf("%s: register BIOS\n", __func__);
+#endif
+ fl_idx = 0;
+#ifdef USE_FLASH_BIOS
+ dinfo = drive_get(IF_PFLASH, 0, fl_idx);
+ if (dinfo) {
+ BlockBackend *blk = blk_by_legacy_dinfo(dinfo);
+
+ bios_size = blk_getlength(blk);
+ fl_sectors = (bios_size + 65535) >> 16;
+#ifdef DEBUG_BOARD_INIT
+ printf("Register parallel flash %d size %lx"
+ " at addr %lx '%s' %d\n",
+ fl_idx, bios_size, -bios_size,
+ blk_name(blk), fl_sectors);
+#endif
+ pflash_cfi02_register((uint32_t)(-bios_size),
+ NULL, "ef405ep.bios", bios_size,
+ blk, 65536, fl_sectors, 1,
+ 2, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA,
+ 1);
+ fl_idx++;
+ } else
+#endif
+ {
+#ifdef DEBUG_BOARD_INIT
+ printf("Load BIOS from file\n");
+#endif
+ bios = g_new(MemoryRegion, 1);
+ memory_region_init_ram(bios, NULL, "ef405ep.bios", BIOS_SIZE,
+ &error_fatal);
+ vmstate_register_ram_global(bios);
+
+ if (bios_name == NULL)
+ bios_name = BIOS_FILENAME;
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ if (filename) {
+ bios_size = load_image(filename, memory_region_get_ram_ptr(bios));
+ g_free(filename);
+ if (bios_size < 0 || bios_size > BIOS_SIZE) {
+ error_report("Could not load PowerPC BIOS '%s'", bios_name);
+ exit(1);
+ }
+ bios_size = (bios_size + 0xfff) & ~0xfff;
+ memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios);
+ } else if (!qtest_enabled() || kernel_filename != NULL) {
+ error_report("Could not load PowerPC BIOS '%s'", bios_name);
+ exit(1);
+ } else {
+ /* Avoid an uninitialized variable warning */
+ bios_size = -1;
+ }
+ memory_region_set_readonly(bios, true);
+ }
+ /* Register FPGA */
+#ifdef DEBUG_BOARD_INIT
+ printf("%s: register FPGA\n", __func__);
+#endif
+ ref405ep_fpga_init(sysmem, 0xF0300000);
+ /* Register NVRAM */
+#ifdef DEBUG_BOARD_INIT
+ printf("%s: register NVRAM\n", __func__);
+#endif
+ m48t59_init(NULL, 0xF0000000, 0, 8192, 1968, 8);
+ /* Load kernel */
+ linux_boot = (kernel_filename != NULL);
+ if (linux_boot) {
+#ifdef DEBUG_BOARD_INIT
+ printf("%s: load kernel\n", __func__);
+#endif
+ memset(&bd, 0, sizeof(bd));
+ bd.bi_memstart = 0x00000000;
+ bd.bi_memsize = ram_size;
+ bd.bi_flashstart = -bios_size;
+ bd.bi_flashsize = -bios_size;
+ bd.bi_flashoffset = 0;
+ bd.bi_sramstart = 0xFFF00000;
+ bd.bi_sramsize = sram_size;
+ bd.bi_bootflags = 0;
+ bd.bi_intfreq = 133333333;
+ bd.bi_busfreq = 33333333;
+ bd.bi_baudrate = 115200;
+ bd.bi_s_version[0] = 'Q';
+ bd.bi_s_version[1] = 'M';
+ bd.bi_s_version[2] = 'U';
+ bd.bi_s_version[3] = '\0';
+ bd.bi_r_version[0] = 'Q';
+ bd.bi_r_version[1] = 'E';
+ bd.bi_r_version[2] = 'M';
+ bd.bi_r_version[3] = 'U';
+ bd.bi_r_version[4] = '\0';
+ bd.bi_procfreq = 133333333;
+ bd.bi_plb_busfreq = 33333333;
+ bd.bi_pci_busfreq = 33333333;
+ bd.bi_opbfreq = 33333333;
+ bdloc = ppc405_set_bootinfo(env, &bd, 0x00000001);
+ env->gpr[3] = bdloc;
+ kernel_base = KERNEL_LOAD_ADDR;
+ /* now we can load the kernel */
+ kernel_size = load_image_targphys(kernel_filename, kernel_base,
+ ram_size - kernel_base);
+ if (kernel_size < 0) {
+ fprintf(stderr, "qemu: could not load kernel '%s'\n",
+ kernel_filename);
+ exit(1);
+ }
+ printf("Load kernel size %ld at " TARGET_FMT_lx,
+ kernel_size, kernel_base);
+ /* load initrd */
+ if (initrd_filename) {
+ initrd_base = INITRD_LOAD_ADDR;
+ initrd_size = load_image_targphys(initrd_filename, initrd_base,
+ ram_size - initrd_base);
+ if (initrd_size < 0) {
+ fprintf(stderr, "qemu: could not load initial ram disk '%s'\n",
+ initrd_filename);
+ exit(1);
+ }
+ } else {
+ initrd_base = 0;
+ initrd_size = 0;
+ }
+ env->gpr[4] = initrd_base;
+ env->gpr[5] = initrd_size;
+ if (kernel_cmdline != NULL) {
+ len = strlen(kernel_cmdline);
+ bdloc -= ((len + 255) & ~255);
+ cpu_physical_memory_write(bdloc, kernel_cmdline, len + 1);
+ env->gpr[6] = bdloc;
+ env->gpr[7] = bdloc + len;
+ } else {
+ env->gpr[6] = 0;
+ env->gpr[7] = 0;
+ }
+ env->nip = KERNEL_LOAD_ADDR;
+ } else {
+ kernel_base = 0;
+ kernel_size = 0;
+ initrd_base = 0;
+ initrd_size = 0;
+ bdloc = 0;
+ }
+#ifdef DEBUG_BOARD_INIT
+ printf("bdloc " RAM_ADDR_FMT "\n", bdloc);
+ printf("%s: Done\n", __func__);
+#endif
+}
+
+static void ref405ep_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "ref405ep";
+ mc->init = ref405ep_init;
+}
+
+static const TypeInfo ref405ep_type = {
+ .name = MACHINE_TYPE_NAME("ref405ep"),
+ .parent = TYPE_MACHINE,
+ .class_init = ref405ep_class_init,
+};
+
+/*****************************************************************************/
+/* AMCC Taihu evaluation board */
+/* - PowerPC 405EP processor
+ * - SDRAM 128 MB at 0x00000000
+ * - Boot flash 2 MB at 0xFFE00000
+ * - Application flash 32 MB at 0xFC000000
+ * - 2 serial ports
+ * - 2 ethernet PHY
+ * - 1 USB 1.1 device 0x50000000
+ * - 1 LCD display 0x50100000
+ * - 1 CPLD 0x50100000
+ * - 1 I2C EEPROM
+ * - 1 I2C thermal sensor
+ * - a set of LEDs
+ * - bit-bang SPI port using GPIOs
+ * - 1 EBC interface connector 0 0x50200000
+ * - 1 cardbus controller + expansion slot.
+ * - 1 PCI expansion slot.
+ */
+typedef struct taihu_cpld_t taihu_cpld_t;
+struct taihu_cpld_t {
+ uint8_t reg0;
+ uint8_t reg1;
+};
+
+static uint64_t taihu_cpld_read(void *opaque, hwaddr addr, unsigned size)
+{
+ taihu_cpld_t *cpld;
+ uint32_t ret;
+
+ cpld = opaque;
+ switch (addr) {
+ case 0x0:
+ ret = cpld->reg0;
+ break;
+ case 0x1:
+ ret = cpld->reg1;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static void taihu_cpld_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ taihu_cpld_t *cpld;
+
+ cpld = opaque;
+ switch (addr) {
+ case 0x0:
+ /* Read only */
+ break;
+ case 0x1:
+ cpld->reg1 = value;
+ break;
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps taihu_cpld_ops = {
+ .read = taihu_cpld_read,
+ .write = taihu_cpld_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 1,
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void taihu_cpld_reset (void *opaque)
+{
+ taihu_cpld_t *cpld;
+
+ cpld = opaque;
+ cpld->reg0 = 0x01;
+ cpld->reg1 = 0x80;
+}
+
+static void taihu_cpld_init(MemoryRegion *sysmem, uint32_t base)
+{
+ taihu_cpld_t *cpld;
+ MemoryRegion *cpld_memory = g_new(MemoryRegion, 1);
+
+ cpld = g_malloc0(sizeof(taihu_cpld_t));
+ memory_region_init_io(cpld_memory, NULL, &taihu_cpld_ops, cpld, "cpld", 0x100);
+ memory_region_add_subregion(sysmem, base, cpld_memory);
+ qemu_register_reset(&taihu_cpld_reset, cpld);
+}
+
+static void taihu_405ep_init(MachineState *machine)
+{
+ ram_addr_t ram_size = machine->ram_size;
+ const char *kernel_filename = machine->kernel_filename;
+ const char *initrd_filename = machine->initrd_filename;
+ char *filename;
+ qemu_irq *pic;
+ MemoryRegion *sysmem = get_system_memory();
+ MemoryRegion *bios;
+ MemoryRegion *ram_memories = g_malloc(2 * sizeof(*ram_memories));
+ MemoryRegion *ram = g_malloc0(sizeof(*ram));
+ hwaddr ram_bases[2], ram_sizes[2];
+ long bios_size;
+ target_ulong kernel_base, initrd_base;
+ long kernel_size, initrd_size;
+ int linux_boot;
+ int fl_idx, fl_sectors;
+ DriveInfo *dinfo;
+
+ /* RAM is soldered to the board so the size cannot be changed */
+ ram_size = 0x08000000;
+ memory_region_allocate_system_memory(ram, NULL, "taihu_405ep.ram",
+ ram_size);
+
+ ram_bases[0] = 0;
+ ram_sizes[0] = 0x04000000;
+ memory_region_init_alias(&ram_memories[0], NULL,
+ "taihu_405ep.ram-0", ram, ram_bases[0],
+ ram_sizes[0]);
+ ram_bases[1] = 0x04000000;
+ ram_sizes[1] = 0x04000000;
+ memory_region_init_alias(&ram_memories[1], NULL,
+ "taihu_405ep.ram-1", ram, ram_bases[1],
+ ram_sizes[1]);
+#ifdef DEBUG_BOARD_INIT
+ printf("%s: register cpu\n", __func__);
+#endif
+ ppc405ep_init(sysmem, ram_memories, ram_bases, ram_sizes,
+ 33333333, &pic, kernel_filename == NULL ? 0 : 1);
+ /* allocate and load BIOS */
+#ifdef DEBUG_BOARD_INIT
+ printf("%s: register BIOS\n", __func__);
+#endif
+ fl_idx = 0;
+#if defined(USE_FLASH_BIOS)
+ dinfo = drive_get(IF_PFLASH, 0, fl_idx);
+ if (dinfo) {
+ BlockBackend *blk = blk_by_legacy_dinfo(dinfo);
+
+ bios_size = blk_getlength(blk);
+ /* XXX: should check that size is 2MB */
+ // bios_size = 2 * 1024 * 1024;
+ fl_sectors = (bios_size + 65535) >> 16;
+#ifdef DEBUG_BOARD_INIT
+ printf("Register parallel flash %d size %lx"
+ " at addr %lx '%s' %d\n",
+ fl_idx, bios_size, -bios_size,
+ blk_name(blk), fl_sectors);
+#endif
+ pflash_cfi02_register((uint32_t)(-bios_size),
+ NULL, "taihu_405ep.bios", bios_size,
+ blk, 65536, fl_sectors, 1,
+ 4, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA,
+ 1);
+ fl_idx++;
+ } else
+#endif
+ {
+#ifdef DEBUG_BOARD_INIT
+ printf("Load BIOS from file\n");
+#endif
+ if (bios_name == NULL)
+ bios_name = BIOS_FILENAME;
+ bios = g_new(MemoryRegion, 1);
+ memory_region_init_ram(bios, NULL, "taihu_405ep.bios", BIOS_SIZE,
+ &error_fatal);
+ vmstate_register_ram_global(bios);
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ if (filename) {
+ bios_size = load_image(filename, memory_region_get_ram_ptr(bios));
+ g_free(filename);
+ if (bios_size < 0 || bios_size > BIOS_SIZE) {
+ error_report("Could not load PowerPC BIOS '%s'", bios_name);
+ exit(1);
+ }
+ bios_size = (bios_size + 0xfff) & ~0xfff;
+ memory_region_add_subregion(sysmem, (uint32_t)(-bios_size), bios);
+ } else if (!qtest_enabled()) {
+ error_report("Could not load PowerPC BIOS '%s'", bios_name);
+ exit(1);
+ }
+ memory_region_set_readonly(bios, true);
+ }
+ /* Register Linux flash */
+ dinfo = drive_get(IF_PFLASH, 0, fl_idx);
+ if (dinfo) {
+ BlockBackend *blk = blk_by_legacy_dinfo(dinfo);
+
+ bios_size = blk_getlength(blk);
+ /* XXX: should check that size is 32MB */
+ bios_size = 32 * 1024 * 1024;
+ fl_sectors = (bios_size + 65535) >> 16;
+#ifdef DEBUG_BOARD_INIT
+ printf("Register parallel flash %d size %lx"
+ " at addr " TARGET_FMT_lx " '%s'\n",
+ fl_idx, bios_size, (target_ulong)0xfc000000,
+ blk_name(blk));
+#endif
+ pflash_cfi02_register(0xfc000000, NULL, "taihu_405ep.flash", bios_size,
+ blk, 65536, fl_sectors, 1,
+ 4, 0x0001, 0x22DA, 0x0000, 0x0000, 0x555, 0x2AA,
+ 1);
+ fl_idx++;
+ }
+ /* Register CLPD & LCD display */
+#ifdef DEBUG_BOARD_INIT
+ printf("%s: register CPLD\n", __func__);
+#endif
+ taihu_cpld_init(sysmem, 0x50100000);
+ /* Load kernel */
+ linux_boot = (kernel_filename != NULL);
+ if (linux_boot) {
+#ifdef DEBUG_BOARD_INIT
+ printf("%s: load kernel\n", __func__);
+#endif
+ kernel_base = KERNEL_LOAD_ADDR;
+ /* now we can load the kernel */
+ kernel_size = load_image_targphys(kernel_filename, kernel_base,
+ ram_size - kernel_base);
+ if (kernel_size < 0) {
+ fprintf(stderr, "qemu: could not load kernel '%s'\n",
+ kernel_filename);
+ exit(1);
+ }
+ /* load initrd */
+ if (initrd_filename) {
+ initrd_base = INITRD_LOAD_ADDR;
+ initrd_size = load_image_targphys(initrd_filename, initrd_base,
+ ram_size - initrd_base);
+ if (initrd_size < 0) {
+ fprintf(stderr,
+ "qemu: could not load initial ram disk '%s'\n",
+ initrd_filename);
+ exit(1);
+ }
+ } else {
+ initrd_base = 0;
+ initrd_size = 0;
+ }
+ } else {
+ kernel_base = 0;
+ kernel_size = 0;
+ initrd_base = 0;
+ initrd_size = 0;
+ }
+#ifdef DEBUG_BOARD_INIT
+ printf("%s: Done\n", __func__);
+#endif
+}
+
+static void taihu_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "taihu";
+ mc->init = taihu_405ep_init;
+}
+
+static const TypeInfo taihu_type = {
+ .name = MACHINE_TYPE_NAME("taihu"),
+ .parent = TYPE_MACHINE,
+ .class_init = taihu_class_init,
+};
+
+static void ppc405_machine_init(void)
+{
+ type_register_static(&ref405ep_type);
+ type_register_static(&taihu_type);
+}
+
+machine_init(ppc405_machine_init)
diff --git a/src/hw/ppc/ppc405_uc.c b/src/hw/ppc/ppc405_uc.c
new file mode 100644
index 0000000..10f5dda
--- /dev/null
+++ b/src/hw/ppc/ppc405_uc.c
@@ -0,0 +1,2551 @@
+/*
+ * QEMU PowerPC 405 embedded processors emulation
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/ppc.h"
+#include "hw/boards.h"
+#include "ppc405.h"
+#include "hw/char/serial.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "qemu/log.h"
+#include "exec/address-spaces.h"
+
+//#define DEBUG_OPBA
+//#define DEBUG_SDRAM
+//#define DEBUG_GPIO
+//#define DEBUG_SERIAL
+//#define DEBUG_OCM
+//#define DEBUG_I2C
+//#define DEBUG_GPT
+//#define DEBUG_MAL
+//#define DEBUG_CLOCKS
+//#define DEBUG_CLOCKS_LL
+
+ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd,
+ uint32_t flags)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ ram_addr_t bdloc;
+ int i, n;
+
+ /* We put the bd structure at the top of memory */
+ if (bd->bi_memsize >= 0x01000000UL)
+ bdloc = 0x01000000UL - sizeof(struct ppc4xx_bd_info_t);
+ else
+ bdloc = bd->bi_memsize - sizeof(struct ppc4xx_bd_info_t);
+ stl_be_phys(cs->as, bdloc + 0x00, bd->bi_memstart);
+ stl_be_phys(cs->as, bdloc + 0x04, bd->bi_memsize);
+ stl_be_phys(cs->as, bdloc + 0x08, bd->bi_flashstart);
+ stl_be_phys(cs->as, bdloc + 0x0C, bd->bi_flashsize);
+ stl_be_phys(cs->as, bdloc + 0x10, bd->bi_flashoffset);
+ stl_be_phys(cs->as, bdloc + 0x14, bd->bi_sramstart);
+ stl_be_phys(cs->as, bdloc + 0x18, bd->bi_sramsize);
+ stl_be_phys(cs->as, bdloc + 0x1C, bd->bi_bootflags);
+ stl_be_phys(cs->as, bdloc + 0x20, bd->bi_ipaddr);
+ for (i = 0; i < 6; i++) {
+ stb_phys(cs->as, bdloc + 0x24 + i, bd->bi_enetaddr[i]);
+ }
+ stw_be_phys(cs->as, bdloc + 0x2A, bd->bi_ethspeed);
+ stl_be_phys(cs->as, bdloc + 0x2C, bd->bi_intfreq);
+ stl_be_phys(cs->as, bdloc + 0x30, bd->bi_busfreq);
+ stl_be_phys(cs->as, bdloc + 0x34, bd->bi_baudrate);
+ for (i = 0; i < 4; i++) {
+ stb_phys(cs->as, bdloc + 0x38 + i, bd->bi_s_version[i]);
+ }
+ for (i = 0; i < 32; i++) {
+ stb_phys(cs->as, bdloc + 0x3C + i, bd->bi_r_version[i]);
+ }
+ stl_be_phys(cs->as, bdloc + 0x5C, bd->bi_plb_busfreq);
+ stl_be_phys(cs->as, bdloc + 0x60, bd->bi_pci_busfreq);
+ for (i = 0; i < 6; i++) {
+ stb_phys(cs->as, bdloc + 0x64 + i, bd->bi_pci_enetaddr[i]);
+ }
+ n = 0x6A;
+ if (flags & 0x00000001) {
+ for (i = 0; i < 6; i++)
+ stb_phys(cs->as, bdloc + n++, bd->bi_pci_enetaddr2[i]);
+ }
+ stl_be_phys(cs->as, bdloc + n, bd->bi_opbfreq);
+ n += 4;
+ for (i = 0; i < 2; i++) {
+ stl_be_phys(cs->as, bdloc + n, bd->bi_iic_fast[i]);
+ n += 4;
+ }
+
+ return bdloc;
+}
+
+/*****************************************************************************/
+/* Shared peripherals */
+
+/*****************************************************************************/
+/* Peripheral local bus arbitrer */
+enum {
+ PLB0_BESR = 0x084,
+ PLB0_BEAR = 0x086,
+ PLB0_ACR = 0x087,
+};
+
+typedef struct ppc4xx_plb_t ppc4xx_plb_t;
+struct ppc4xx_plb_t {
+ uint32_t acr;
+ uint32_t bear;
+ uint32_t besr;
+};
+
+static uint32_t dcr_read_plb (void *opaque, int dcrn)
+{
+ ppc4xx_plb_t *plb;
+ uint32_t ret;
+
+ plb = opaque;
+ switch (dcrn) {
+ case PLB0_ACR:
+ ret = plb->acr;
+ break;
+ case PLB0_BEAR:
+ ret = plb->bear;
+ break;
+ case PLB0_BESR:
+ ret = plb->besr;
+ break;
+ default:
+ /* Avoid gcc warning */
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static void dcr_write_plb (void *opaque, int dcrn, uint32_t val)
+{
+ ppc4xx_plb_t *plb;
+
+ plb = opaque;
+ switch (dcrn) {
+ case PLB0_ACR:
+ /* We don't care about the actual parameters written as
+ * we don't manage any priorities on the bus
+ */
+ plb->acr = val & 0xF8000000;
+ break;
+ case PLB0_BEAR:
+ /* Read only */
+ break;
+ case PLB0_BESR:
+ /* Write-clear */
+ plb->besr &= ~val;
+ break;
+ }
+}
+
+static void ppc4xx_plb_reset (void *opaque)
+{
+ ppc4xx_plb_t *plb;
+
+ plb = opaque;
+ plb->acr = 0x00000000;
+ plb->bear = 0x00000000;
+ plb->besr = 0x00000000;
+}
+
+static void ppc4xx_plb_init(CPUPPCState *env)
+{
+ ppc4xx_plb_t *plb;
+
+ plb = g_malloc0(sizeof(ppc4xx_plb_t));
+ ppc_dcr_register(env, PLB0_ACR, plb, &dcr_read_plb, &dcr_write_plb);
+ ppc_dcr_register(env, PLB0_BEAR, plb, &dcr_read_plb, &dcr_write_plb);
+ ppc_dcr_register(env, PLB0_BESR, plb, &dcr_read_plb, &dcr_write_plb);
+ qemu_register_reset(ppc4xx_plb_reset, plb);
+}
+
+/*****************************************************************************/
+/* PLB to OPB bridge */
+enum {
+ POB0_BESR0 = 0x0A0,
+ POB0_BESR1 = 0x0A2,
+ POB0_BEAR = 0x0A4,
+};
+
+typedef struct ppc4xx_pob_t ppc4xx_pob_t;
+struct ppc4xx_pob_t {
+ uint32_t bear;
+ uint32_t besr0;
+ uint32_t besr1;
+};
+
+static uint32_t dcr_read_pob (void *opaque, int dcrn)
+{
+ ppc4xx_pob_t *pob;
+ uint32_t ret;
+
+ pob = opaque;
+ switch (dcrn) {
+ case POB0_BEAR:
+ ret = pob->bear;
+ break;
+ case POB0_BESR0:
+ ret = pob->besr0;
+ break;
+ case POB0_BESR1:
+ ret = pob->besr1;
+ break;
+ default:
+ /* Avoid gcc warning */
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static void dcr_write_pob (void *opaque, int dcrn, uint32_t val)
+{
+ ppc4xx_pob_t *pob;
+
+ pob = opaque;
+ switch (dcrn) {
+ case POB0_BEAR:
+ /* Read only */
+ break;
+ case POB0_BESR0:
+ /* Write-clear */
+ pob->besr0 &= ~val;
+ break;
+ case POB0_BESR1:
+ /* Write-clear */
+ pob->besr1 &= ~val;
+ break;
+ }
+}
+
+static void ppc4xx_pob_reset (void *opaque)
+{
+ ppc4xx_pob_t *pob;
+
+ pob = opaque;
+ /* No error */
+ pob->bear = 0x00000000;
+ pob->besr0 = 0x0000000;
+ pob->besr1 = 0x0000000;
+}
+
+static void ppc4xx_pob_init(CPUPPCState *env)
+{
+ ppc4xx_pob_t *pob;
+
+ pob = g_malloc0(sizeof(ppc4xx_pob_t));
+ ppc_dcr_register(env, POB0_BEAR, pob, &dcr_read_pob, &dcr_write_pob);
+ ppc_dcr_register(env, POB0_BESR0, pob, &dcr_read_pob, &dcr_write_pob);
+ ppc_dcr_register(env, POB0_BESR1, pob, &dcr_read_pob, &dcr_write_pob);
+ qemu_register_reset(ppc4xx_pob_reset, pob);
+}
+
+/*****************************************************************************/
+/* OPB arbitrer */
+typedef struct ppc4xx_opba_t ppc4xx_opba_t;
+struct ppc4xx_opba_t {
+ MemoryRegion io;
+ uint8_t cr;
+ uint8_t pr;
+};
+
+static uint32_t opba_readb (void *opaque, hwaddr addr)
+{
+ ppc4xx_opba_t *opba;
+ uint32_t ret;
+
+#ifdef DEBUG_OPBA
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+ opba = opaque;
+ switch (addr) {
+ case 0x00:
+ ret = opba->cr;
+ break;
+ case 0x01:
+ ret = opba->pr;
+ break;
+ default:
+ ret = 0x00;
+ break;
+ }
+
+ return ret;
+}
+
+static void opba_writeb (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+ ppc4xx_opba_t *opba;
+
+#ifdef DEBUG_OPBA
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+ opba = opaque;
+ switch (addr) {
+ case 0x00:
+ opba->cr = value & 0xF8;
+ break;
+ case 0x01:
+ opba->pr = value & 0xFF;
+ break;
+ default:
+ break;
+ }
+}
+
+static uint32_t opba_readw (void *opaque, hwaddr addr)
+{
+ uint32_t ret;
+
+#ifdef DEBUG_OPBA
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+ ret = opba_readb(opaque, addr) << 8;
+ ret |= opba_readb(opaque, addr + 1);
+
+ return ret;
+}
+
+static void opba_writew (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+#ifdef DEBUG_OPBA
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+ opba_writeb(opaque, addr, value >> 8);
+ opba_writeb(opaque, addr + 1, value);
+}
+
+static uint32_t opba_readl (void *opaque, hwaddr addr)
+{
+ uint32_t ret;
+
+#ifdef DEBUG_OPBA
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+ ret = opba_readb(opaque, addr) << 24;
+ ret |= opba_readb(opaque, addr + 1) << 16;
+
+ return ret;
+}
+
+static void opba_writel (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+#ifdef DEBUG_OPBA
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+ opba_writeb(opaque, addr, value >> 24);
+ opba_writeb(opaque, addr + 1, value >> 16);
+}
+
+static const MemoryRegionOps opba_ops = {
+ .old_mmio = {
+ .read = { opba_readb, opba_readw, opba_readl, },
+ .write = { opba_writeb, opba_writew, opba_writel, },
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void ppc4xx_opba_reset (void *opaque)
+{
+ ppc4xx_opba_t *opba;
+
+ opba = opaque;
+ opba->cr = 0x00; /* No dynamic priorities - park disabled */
+ opba->pr = 0x11;
+}
+
+static void ppc4xx_opba_init(hwaddr base)
+{
+ ppc4xx_opba_t *opba;
+
+ opba = g_malloc0(sizeof(ppc4xx_opba_t));
+#ifdef DEBUG_OPBA
+ printf("%s: offset " TARGET_FMT_plx "\n", __func__, base);
+#endif
+ memory_region_init_io(&opba->io, NULL, &opba_ops, opba, "opba", 0x002);
+ memory_region_add_subregion(get_system_memory(), base, &opba->io);
+ qemu_register_reset(ppc4xx_opba_reset, opba);
+}
+
+/*****************************************************************************/
+/* Code decompression controller */
+/* XXX: TODO */
+
+/*****************************************************************************/
+/* Peripheral controller */
+typedef struct ppc4xx_ebc_t ppc4xx_ebc_t;
+struct ppc4xx_ebc_t {
+ uint32_t addr;
+ uint32_t bcr[8];
+ uint32_t bap[8];
+ uint32_t bear;
+ uint32_t besr0;
+ uint32_t besr1;
+ uint32_t cfg;
+};
+
+enum {
+ EBC0_CFGADDR = 0x012,
+ EBC0_CFGDATA = 0x013,
+};
+
+static uint32_t dcr_read_ebc (void *opaque, int dcrn)
+{
+ ppc4xx_ebc_t *ebc;
+ uint32_t ret;
+
+ ebc = opaque;
+ switch (dcrn) {
+ case EBC0_CFGADDR:
+ ret = ebc->addr;
+ break;
+ case EBC0_CFGDATA:
+ switch (ebc->addr) {
+ case 0x00: /* B0CR */
+ ret = ebc->bcr[0];
+ break;
+ case 0x01: /* B1CR */
+ ret = ebc->bcr[1];
+ break;
+ case 0x02: /* B2CR */
+ ret = ebc->bcr[2];
+ break;
+ case 0x03: /* B3CR */
+ ret = ebc->bcr[3];
+ break;
+ case 0x04: /* B4CR */
+ ret = ebc->bcr[4];
+ break;
+ case 0x05: /* B5CR */
+ ret = ebc->bcr[5];
+ break;
+ case 0x06: /* B6CR */
+ ret = ebc->bcr[6];
+ break;
+ case 0x07: /* B7CR */
+ ret = ebc->bcr[7];
+ break;
+ case 0x10: /* B0AP */
+ ret = ebc->bap[0];
+ break;
+ case 0x11: /* B1AP */
+ ret = ebc->bap[1];
+ break;
+ case 0x12: /* B2AP */
+ ret = ebc->bap[2];
+ break;
+ case 0x13: /* B3AP */
+ ret = ebc->bap[3];
+ break;
+ case 0x14: /* B4AP */
+ ret = ebc->bap[4];
+ break;
+ case 0x15: /* B5AP */
+ ret = ebc->bap[5];
+ break;
+ case 0x16: /* B6AP */
+ ret = ebc->bap[6];
+ break;
+ case 0x17: /* B7AP */
+ ret = ebc->bap[7];
+ break;
+ case 0x20: /* BEAR */
+ ret = ebc->bear;
+ break;
+ case 0x21: /* BESR0 */
+ ret = ebc->besr0;
+ break;
+ case 0x22: /* BESR1 */
+ ret = ebc->besr1;
+ break;
+ case 0x23: /* CFG */
+ ret = ebc->cfg;
+ break;
+ default:
+ ret = 0x00000000;
+ break;
+ }
+ break;
+ default:
+ ret = 0x00000000;
+ break;
+ }
+
+ return ret;
+}
+
+static void dcr_write_ebc (void *opaque, int dcrn, uint32_t val)
+{
+ ppc4xx_ebc_t *ebc;
+
+ ebc = opaque;
+ switch (dcrn) {
+ case EBC0_CFGADDR:
+ ebc->addr = val;
+ break;
+ case EBC0_CFGDATA:
+ switch (ebc->addr) {
+ case 0x00: /* B0CR */
+ break;
+ case 0x01: /* B1CR */
+ break;
+ case 0x02: /* B2CR */
+ break;
+ case 0x03: /* B3CR */
+ break;
+ case 0x04: /* B4CR */
+ break;
+ case 0x05: /* B5CR */
+ break;
+ case 0x06: /* B6CR */
+ break;
+ case 0x07: /* B7CR */
+ break;
+ case 0x10: /* B0AP */
+ break;
+ case 0x11: /* B1AP */
+ break;
+ case 0x12: /* B2AP */
+ break;
+ case 0x13: /* B3AP */
+ break;
+ case 0x14: /* B4AP */
+ break;
+ case 0x15: /* B5AP */
+ break;
+ case 0x16: /* B6AP */
+ break;
+ case 0x17: /* B7AP */
+ break;
+ case 0x20: /* BEAR */
+ break;
+ case 0x21: /* BESR0 */
+ break;
+ case 0x22: /* BESR1 */
+ break;
+ case 0x23: /* CFG */
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ebc_reset (void *opaque)
+{
+ ppc4xx_ebc_t *ebc;
+ int i;
+
+ ebc = opaque;
+ ebc->addr = 0x00000000;
+ ebc->bap[0] = 0x7F8FFE80;
+ ebc->bcr[0] = 0xFFE28000;
+ for (i = 0; i < 8; i++) {
+ ebc->bap[i] = 0x00000000;
+ ebc->bcr[i] = 0x00000000;
+ }
+ ebc->besr0 = 0x00000000;
+ ebc->besr1 = 0x00000000;
+ ebc->cfg = 0x80400000;
+}
+
+static void ppc405_ebc_init(CPUPPCState *env)
+{
+ ppc4xx_ebc_t *ebc;
+
+ ebc = g_malloc0(sizeof(ppc4xx_ebc_t));
+ qemu_register_reset(&ebc_reset, ebc);
+ ppc_dcr_register(env, EBC0_CFGADDR,
+ ebc, &dcr_read_ebc, &dcr_write_ebc);
+ ppc_dcr_register(env, EBC0_CFGDATA,
+ ebc, &dcr_read_ebc, &dcr_write_ebc);
+}
+
+/*****************************************************************************/
+/* DMA controller */
+enum {
+ DMA0_CR0 = 0x100,
+ DMA0_CT0 = 0x101,
+ DMA0_DA0 = 0x102,
+ DMA0_SA0 = 0x103,
+ DMA0_SG0 = 0x104,
+ DMA0_CR1 = 0x108,
+ DMA0_CT1 = 0x109,
+ DMA0_DA1 = 0x10A,
+ DMA0_SA1 = 0x10B,
+ DMA0_SG1 = 0x10C,
+ DMA0_CR2 = 0x110,
+ DMA0_CT2 = 0x111,
+ DMA0_DA2 = 0x112,
+ DMA0_SA2 = 0x113,
+ DMA0_SG2 = 0x114,
+ DMA0_CR3 = 0x118,
+ DMA0_CT3 = 0x119,
+ DMA0_DA3 = 0x11A,
+ DMA0_SA3 = 0x11B,
+ DMA0_SG3 = 0x11C,
+ DMA0_SR = 0x120,
+ DMA0_SGC = 0x123,
+ DMA0_SLP = 0x125,
+ DMA0_POL = 0x126,
+};
+
+typedef struct ppc405_dma_t ppc405_dma_t;
+struct ppc405_dma_t {
+ qemu_irq irqs[4];
+ uint32_t cr[4];
+ uint32_t ct[4];
+ uint32_t da[4];
+ uint32_t sa[4];
+ uint32_t sg[4];
+ uint32_t sr;
+ uint32_t sgc;
+ uint32_t slp;
+ uint32_t pol;
+};
+
+static uint32_t dcr_read_dma (void *opaque, int dcrn)
+{
+ return 0;
+}
+
+static void dcr_write_dma (void *opaque, int dcrn, uint32_t val)
+{
+}
+
+static void ppc405_dma_reset (void *opaque)
+{
+ ppc405_dma_t *dma;
+ int i;
+
+ dma = opaque;
+ for (i = 0; i < 4; i++) {
+ dma->cr[i] = 0x00000000;
+ dma->ct[i] = 0x00000000;
+ dma->da[i] = 0x00000000;
+ dma->sa[i] = 0x00000000;
+ dma->sg[i] = 0x00000000;
+ }
+ dma->sr = 0x00000000;
+ dma->sgc = 0x00000000;
+ dma->slp = 0x7C000000;
+ dma->pol = 0x00000000;
+}
+
+static void ppc405_dma_init(CPUPPCState *env, qemu_irq irqs[4])
+{
+ ppc405_dma_t *dma;
+
+ dma = g_malloc0(sizeof(ppc405_dma_t));
+ memcpy(dma->irqs, irqs, 4 * sizeof(qemu_irq));
+ qemu_register_reset(&ppc405_dma_reset, dma);
+ ppc_dcr_register(env, DMA0_CR0,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_CT0,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_DA0,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SA0,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SG0,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_CR1,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_CT1,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_DA1,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SA1,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SG1,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_CR2,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_CT2,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_DA2,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SA2,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SG2,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_CR3,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_CT3,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_DA3,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SA3,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SG3,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SR,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SGC,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_SLP,
+ dma, &dcr_read_dma, &dcr_write_dma);
+ ppc_dcr_register(env, DMA0_POL,
+ dma, &dcr_read_dma, &dcr_write_dma);
+}
+
+/*****************************************************************************/
+/* GPIO */
+typedef struct ppc405_gpio_t ppc405_gpio_t;
+struct ppc405_gpio_t {
+ MemoryRegion io;
+ uint32_t or;
+ uint32_t tcr;
+ uint32_t osrh;
+ uint32_t osrl;
+ uint32_t tsrh;
+ uint32_t tsrl;
+ uint32_t odr;
+ uint32_t ir;
+ uint32_t rr1;
+ uint32_t isr1h;
+ uint32_t isr1l;
+};
+
+static uint32_t ppc405_gpio_readb (void *opaque, hwaddr addr)
+{
+#ifdef DEBUG_GPIO
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+
+ return 0;
+}
+
+static void ppc405_gpio_writeb (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+#ifdef DEBUG_GPIO
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+}
+
+static uint32_t ppc405_gpio_readw (void *opaque, hwaddr addr)
+{
+#ifdef DEBUG_GPIO
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+
+ return 0;
+}
+
+static void ppc405_gpio_writew (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+#ifdef DEBUG_GPIO
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+}
+
+static uint32_t ppc405_gpio_readl (void *opaque, hwaddr addr)
+{
+#ifdef DEBUG_GPIO
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+
+ return 0;
+}
+
+static void ppc405_gpio_writel (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+#ifdef DEBUG_GPIO
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+}
+
+static const MemoryRegionOps ppc405_gpio_ops = {
+ .old_mmio = {
+ .read = { ppc405_gpio_readb, ppc405_gpio_readw, ppc405_gpio_readl, },
+ .write = { ppc405_gpio_writeb, ppc405_gpio_writew, ppc405_gpio_writel, },
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void ppc405_gpio_reset (void *opaque)
+{
+}
+
+static void ppc405_gpio_init(hwaddr base)
+{
+ ppc405_gpio_t *gpio;
+
+ gpio = g_malloc0(sizeof(ppc405_gpio_t));
+#ifdef DEBUG_GPIO
+ printf("%s: offset " TARGET_FMT_plx "\n", __func__, base);
+#endif
+ memory_region_init_io(&gpio->io, NULL, &ppc405_gpio_ops, gpio, "pgio", 0x038);
+ memory_region_add_subregion(get_system_memory(), base, &gpio->io);
+ qemu_register_reset(&ppc405_gpio_reset, gpio);
+}
+
+/*****************************************************************************/
+/* On Chip Memory */
+enum {
+ OCM0_ISARC = 0x018,
+ OCM0_ISACNTL = 0x019,
+ OCM0_DSARC = 0x01A,
+ OCM0_DSACNTL = 0x01B,
+};
+
+typedef struct ppc405_ocm_t ppc405_ocm_t;
+struct ppc405_ocm_t {
+ MemoryRegion ram;
+ MemoryRegion isarc_ram;
+ MemoryRegion dsarc_ram;
+ uint32_t isarc;
+ uint32_t isacntl;
+ uint32_t dsarc;
+ uint32_t dsacntl;
+};
+
+static void ocm_update_mappings (ppc405_ocm_t *ocm,
+ uint32_t isarc, uint32_t isacntl,
+ uint32_t dsarc, uint32_t dsacntl)
+{
+#ifdef DEBUG_OCM
+ printf("OCM update ISA %08" PRIx32 " %08" PRIx32 " (%08" PRIx32
+ " %08" PRIx32 ") DSA %08" PRIx32 " %08" PRIx32
+ " (%08" PRIx32 " %08" PRIx32 ")\n",
+ isarc, isacntl, dsarc, dsacntl,
+ ocm->isarc, ocm->isacntl, ocm->dsarc, ocm->dsacntl);
+#endif
+ if (ocm->isarc != isarc ||
+ (ocm->isacntl & 0x80000000) != (isacntl & 0x80000000)) {
+ if (ocm->isacntl & 0x80000000) {
+ /* Unmap previously assigned memory region */
+ printf("OCM unmap ISA %08" PRIx32 "\n", ocm->isarc);
+ memory_region_del_subregion(get_system_memory(), &ocm->isarc_ram);
+ }
+ if (isacntl & 0x80000000) {
+ /* Map new instruction memory region */
+#ifdef DEBUG_OCM
+ printf("OCM map ISA %08" PRIx32 "\n", isarc);
+#endif
+ memory_region_add_subregion(get_system_memory(), isarc,
+ &ocm->isarc_ram);
+ }
+ }
+ if (ocm->dsarc != dsarc ||
+ (ocm->dsacntl & 0x80000000) != (dsacntl & 0x80000000)) {
+ if (ocm->dsacntl & 0x80000000) {
+ /* Beware not to unmap the region we just mapped */
+ if (!(isacntl & 0x80000000) || ocm->dsarc != isarc) {
+ /* Unmap previously assigned memory region */
+#ifdef DEBUG_OCM
+ printf("OCM unmap DSA %08" PRIx32 "\n", ocm->dsarc);
+#endif
+ memory_region_del_subregion(get_system_memory(),
+ &ocm->dsarc_ram);
+ }
+ }
+ if (dsacntl & 0x80000000) {
+ /* Beware not to remap the region we just mapped */
+ if (!(isacntl & 0x80000000) || dsarc != isarc) {
+ /* Map new data memory region */
+#ifdef DEBUG_OCM
+ printf("OCM map DSA %08" PRIx32 "\n", dsarc);
+#endif
+ memory_region_add_subregion(get_system_memory(), dsarc,
+ &ocm->dsarc_ram);
+ }
+ }
+ }
+}
+
+static uint32_t dcr_read_ocm (void *opaque, int dcrn)
+{
+ ppc405_ocm_t *ocm;
+ uint32_t ret;
+
+ ocm = opaque;
+ switch (dcrn) {
+ case OCM0_ISARC:
+ ret = ocm->isarc;
+ break;
+ case OCM0_ISACNTL:
+ ret = ocm->isacntl;
+ break;
+ case OCM0_DSARC:
+ ret = ocm->dsarc;
+ break;
+ case OCM0_DSACNTL:
+ ret = ocm->dsacntl;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static void dcr_write_ocm (void *opaque, int dcrn, uint32_t val)
+{
+ ppc405_ocm_t *ocm;
+ uint32_t isarc, dsarc, isacntl, dsacntl;
+
+ ocm = opaque;
+ isarc = ocm->isarc;
+ dsarc = ocm->dsarc;
+ isacntl = ocm->isacntl;
+ dsacntl = ocm->dsacntl;
+ switch (dcrn) {
+ case OCM0_ISARC:
+ isarc = val & 0xFC000000;
+ break;
+ case OCM0_ISACNTL:
+ isacntl = val & 0xC0000000;
+ break;
+ case OCM0_DSARC:
+ isarc = val & 0xFC000000;
+ break;
+ case OCM0_DSACNTL:
+ isacntl = val & 0xC0000000;
+ break;
+ }
+ ocm_update_mappings(ocm, isarc, isacntl, dsarc, dsacntl);
+ ocm->isarc = isarc;
+ ocm->dsarc = dsarc;
+ ocm->isacntl = isacntl;
+ ocm->dsacntl = dsacntl;
+}
+
+static void ocm_reset (void *opaque)
+{
+ ppc405_ocm_t *ocm;
+ uint32_t isarc, dsarc, isacntl, dsacntl;
+
+ ocm = opaque;
+ isarc = 0x00000000;
+ isacntl = 0x00000000;
+ dsarc = 0x00000000;
+ dsacntl = 0x00000000;
+ ocm_update_mappings(ocm, isarc, isacntl, dsarc, dsacntl);
+ ocm->isarc = isarc;
+ ocm->dsarc = dsarc;
+ ocm->isacntl = isacntl;
+ ocm->dsacntl = dsacntl;
+}
+
+static void ppc405_ocm_init(CPUPPCState *env)
+{
+ ppc405_ocm_t *ocm;
+
+ ocm = g_malloc0(sizeof(ppc405_ocm_t));
+ /* XXX: Size is 4096 or 0x04000000 */
+ memory_region_init_ram(&ocm->isarc_ram, NULL, "ppc405.ocm", 4096,
+ &error_fatal);
+ vmstate_register_ram_global(&ocm->isarc_ram);
+ memory_region_init_alias(&ocm->dsarc_ram, NULL, "ppc405.dsarc", &ocm->isarc_ram,
+ 0, 4096);
+ qemu_register_reset(&ocm_reset, ocm);
+ ppc_dcr_register(env, OCM0_ISARC,
+ ocm, &dcr_read_ocm, &dcr_write_ocm);
+ ppc_dcr_register(env, OCM0_ISACNTL,
+ ocm, &dcr_read_ocm, &dcr_write_ocm);
+ ppc_dcr_register(env, OCM0_DSARC,
+ ocm, &dcr_read_ocm, &dcr_write_ocm);
+ ppc_dcr_register(env, OCM0_DSACNTL,
+ ocm, &dcr_read_ocm, &dcr_write_ocm);
+}
+
+/*****************************************************************************/
+/* I2C controller */
+typedef struct ppc4xx_i2c_t ppc4xx_i2c_t;
+struct ppc4xx_i2c_t {
+ qemu_irq irq;
+ MemoryRegion iomem;
+ uint8_t mdata;
+ uint8_t lmadr;
+ uint8_t hmadr;
+ uint8_t cntl;
+ uint8_t mdcntl;
+ uint8_t sts;
+ uint8_t extsts;
+ uint8_t sdata;
+ uint8_t lsadr;
+ uint8_t hsadr;
+ uint8_t clkdiv;
+ uint8_t intrmsk;
+ uint8_t xfrcnt;
+ uint8_t xtcntlss;
+ uint8_t directcntl;
+};
+
+static uint32_t ppc4xx_i2c_readb (void *opaque, hwaddr addr)
+{
+ ppc4xx_i2c_t *i2c;
+ uint32_t ret;
+
+#ifdef DEBUG_I2C
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+ i2c = opaque;
+ switch (addr) {
+ case 0x00:
+ // i2c_readbyte(&i2c->mdata);
+ ret = i2c->mdata;
+ break;
+ case 0x02:
+ ret = i2c->sdata;
+ break;
+ case 0x04:
+ ret = i2c->lmadr;
+ break;
+ case 0x05:
+ ret = i2c->hmadr;
+ break;
+ case 0x06:
+ ret = i2c->cntl;
+ break;
+ case 0x07:
+ ret = i2c->mdcntl;
+ break;
+ case 0x08:
+ ret = i2c->sts;
+ break;
+ case 0x09:
+ ret = i2c->extsts;
+ break;
+ case 0x0A:
+ ret = i2c->lsadr;
+ break;
+ case 0x0B:
+ ret = i2c->hsadr;
+ break;
+ case 0x0C:
+ ret = i2c->clkdiv;
+ break;
+ case 0x0D:
+ ret = i2c->intrmsk;
+ break;
+ case 0x0E:
+ ret = i2c->xfrcnt;
+ break;
+ case 0x0F:
+ ret = i2c->xtcntlss;
+ break;
+ case 0x10:
+ ret = i2c->directcntl;
+ break;
+ default:
+ ret = 0x00;
+ break;
+ }
+#ifdef DEBUG_I2C
+ printf("%s: addr " TARGET_FMT_plx " %02" PRIx32 "\n", __func__, addr, ret);
+#endif
+
+ return ret;
+}
+
+static void ppc4xx_i2c_writeb (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+ ppc4xx_i2c_t *i2c;
+
+#ifdef DEBUG_I2C
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+ i2c = opaque;
+ switch (addr) {
+ case 0x00:
+ i2c->mdata = value;
+ // i2c_sendbyte(&i2c->mdata);
+ break;
+ case 0x02:
+ i2c->sdata = value;
+ break;
+ case 0x04:
+ i2c->lmadr = value;
+ break;
+ case 0x05:
+ i2c->hmadr = value;
+ break;
+ case 0x06:
+ i2c->cntl = value;
+ break;
+ case 0x07:
+ i2c->mdcntl = value & 0xDF;
+ break;
+ case 0x08:
+ i2c->sts &= ~(value & 0x0A);
+ break;
+ case 0x09:
+ i2c->extsts &= ~(value & 0x8F);
+ break;
+ case 0x0A:
+ i2c->lsadr = value;
+ break;
+ case 0x0B:
+ i2c->hsadr = value;
+ break;
+ case 0x0C:
+ i2c->clkdiv = value;
+ break;
+ case 0x0D:
+ i2c->intrmsk = value;
+ break;
+ case 0x0E:
+ i2c->xfrcnt = value & 0x77;
+ break;
+ case 0x0F:
+ i2c->xtcntlss = value;
+ break;
+ case 0x10:
+ i2c->directcntl = value & 0x7;
+ break;
+ }
+}
+
+static uint32_t ppc4xx_i2c_readw (void *opaque, hwaddr addr)
+{
+ uint32_t ret;
+
+#ifdef DEBUG_I2C
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+ ret = ppc4xx_i2c_readb(opaque, addr) << 8;
+ ret |= ppc4xx_i2c_readb(opaque, addr + 1);
+
+ return ret;
+}
+
+static void ppc4xx_i2c_writew (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+#ifdef DEBUG_I2C
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+ ppc4xx_i2c_writeb(opaque, addr, value >> 8);
+ ppc4xx_i2c_writeb(opaque, addr + 1, value);
+}
+
+static uint32_t ppc4xx_i2c_readl (void *opaque, hwaddr addr)
+{
+ uint32_t ret;
+
+#ifdef DEBUG_I2C
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+ ret = ppc4xx_i2c_readb(opaque, addr) << 24;
+ ret |= ppc4xx_i2c_readb(opaque, addr + 1) << 16;
+ ret |= ppc4xx_i2c_readb(opaque, addr + 2) << 8;
+ ret |= ppc4xx_i2c_readb(opaque, addr + 3);
+
+ return ret;
+}
+
+static void ppc4xx_i2c_writel (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+#ifdef DEBUG_I2C
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+ ppc4xx_i2c_writeb(opaque, addr, value >> 24);
+ ppc4xx_i2c_writeb(opaque, addr + 1, value >> 16);
+ ppc4xx_i2c_writeb(opaque, addr + 2, value >> 8);
+ ppc4xx_i2c_writeb(opaque, addr + 3, value);
+}
+
+static const MemoryRegionOps i2c_ops = {
+ .old_mmio = {
+ .read = { ppc4xx_i2c_readb, ppc4xx_i2c_readw, ppc4xx_i2c_readl, },
+ .write = { ppc4xx_i2c_writeb, ppc4xx_i2c_writew, ppc4xx_i2c_writel, },
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void ppc4xx_i2c_reset (void *opaque)
+{
+ ppc4xx_i2c_t *i2c;
+
+ i2c = opaque;
+ i2c->mdata = 0x00;
+ i2c->sdata = 0x00;
+ i2c->cntl = 0x00;
+ i2c->mdcntl = 0x00;
+ i2c->sts = 0x00;
+ i2c->extsts = 0x00;
+ i2c->clkdiv = 0x00;
+ i2c->xfrcnt = 0x00;
+ i2c->directcntl = 0x0F;
+}
+
+static void ppc405_i2c_init(hwaddr base, qemu_irq irq)
+{
+ ppc4xx_i2c_t *i2c;
+
+ i2c = g_malloc0(sizeof(ppc4xx_i2c_t));
+ i2c->irq = irq;
+#ifdef DEBUG_I2C
+ printf("%s: offset " TARGET_FMT_plx "\n", __func__, base);
+#endif
+ memory_region_init_io(&i2c->iomem, NULL, &i2c_ops, i2c, "i2c", 0x011);
+ memory_region_add_subregion(get_system_memory(), base, &i2c->iomem);
+ qemu_register_reset(ppc4xx_i2c_reset, i2c);
+}
+
+/*****************************************************************************/
+/* General purpose timers */
+typedef struct ppc4xx_gpt_t ppc4xx_gpt_t;
+struct ppc4xx_gpt_t {
+ MemoryRegion iomem;
+ int64_t tb_offset;
+ uint32_t tb_freq;
+ QEMUTimer *timer;
+ qemu_irq irqs[5];
+ uint32_t oe;
+ uint32_t ol;
+ uint32_t im;
+ uint32_t is;
+ uint32_t ie;
+ uint32_t comp[5];
+ uint32_t mask[5];
+};
+
+static uint32_t ppc4xx_gpt_readb (void *opaque, hwaddr addr)
+{
+#ifdef DEBUG_GPT
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+ /* XXX: generate a bus fault */
+ return -1;
+}
+
+static void ppc4xx_gpt_writeb (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+#ifdef DEBUG_I2C
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+ /* XXX: generate a bus fault */
+}
+
+static uint32_t ppc4xx_gpt_readw (void *opaque, hwaddr addr)
+{
+#ifdef DEBUG_GPT
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+ /* XXX: generate a bus fault */
+ return -1;
+}
+
+static void ppc4xx_gpt_writew (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+#ifdef DEBUG_I2C
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+ /* XXX: generate a bus fault */
+}
+
+static int ppc4xx_gpt_compare (ppc4xx_gpt_t *gpt, int n)
+{
+ /* XXX: TODO */
+ return 0;
+}
+
+static void ppc4xx_gpt_set_output (ppc4xx_gpt_t *gpt, int n, int level)
+{
+ /* XXX: TODO */
+}
+
+static void ppc4xx_gpt_set_outputs (ppc4xx_gpt_t *gpt)
+{
+ uint32_t mask;
+ int i;
+
+ mask = 0x80000000;
+ for (i = 0; i < 5; i++) {
+ if (gpt->oe & mask) {
+ /* Output is enabled */
+ if (ppc4xx_gpt_compare(gpt, i)) {
+ /* Comparison is OK */
+ ppc4xx_gpt_set_output(gpt, i, gpt->ol & mask);
+ } else {
+ /* Comparison is KO */
+ ppc4xx_gpt_set_output(gpt, i, gpt->ol & mask ? 0 : 1);
+ }
+ }
+ mask = mask >> 1;
+ }
+}
+
+static void ppc4xx_gpt_set_irqs (ppc4xx_gpt_t *gpt)
+{
+ uint32_t mask;
+ int i;
+
+ mask = 0x00008000;
+ for (i = 0; i < 5; i++) {
+ if (gpt->is & gpt->im & mask)
+ qemu_irq_raise(gpt->irqs[i]);
+ else
+ qemu_irq_lower(gpt->irqs[i]);
+ mask = mask >> 1;
+ }
+}
+
+static void ppc4xx_gpt_compute_timer (ppc4xx_gpt_t *gpt)
+{
+ /* XXX: TODO */
+}
+
+static uint32_t ppc4xx_gpt_readl (void *opaque, hwaddr addr)
+{
+ ppc4xx_gpt_t *gpt;
+ uint32_t ret;
+ int idx;
+
+#ifdef DEBUG_GPT
+ printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
+#endif
+ gpt = opaque;
+ switch (addr) {
+ case 0x00:
+ /* Time base counter */
+ ret = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + gpt->tb_offset,
+ gpt->tb_freq, get_ticks_per_sec());
+ break;
+ case 0x10:
+ /* Output enable */
+ ret = gpt->oe;
+ break;
+ case 0x14:
+ /* Output level */
+ ret = gpt->ol;
+ break;
+ case 0x18:
+ /* Interrupt mask */
+ ret = gpt->im;
+ break;
+ case 0x1C:
+ case 0x20:
+ /* Interrupt status */
+ ret = gpt->is;
+ break;
+ case 0x24:
+ /* Interrupt enable */
+ ret = gpt->ie;
+ break;
+ case 0x80 ... 0x90:
+ /* Compare timer */
+ idx = (addr - 0x80) >> 2;
+ ret = gpt->comp[idx];
+ break;
+ case 0xC0 ... 0xD0:
+ /* Compare mask */
+ idx = (addr - 0xC0) >> 2;
+ ret = gpt->mask[idx];
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+static void ppc4xx_gpt_writel (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+ ppc4xx_gpt_t *gpt;
+ int idx;
+
+#ifdef DEBUG_I2C
+ printf("%s: addr " TARGET_FMT_plx " val %08" PRIx32 "\n", __func__, addr,
+ value);
+#endif
+ gpt = opaque;
+ switch (addr) {
+ case 0x00:
+ /* Time base counter */
+ gpt->tb_offset = muldiv64(value, get_ticks_per_sec(), gpt->tb_freq)
+ - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ ppc4xx_gpt_compute_timer(gpt);
+ break;
+ case 0x10:
+ /* Output enable */
+ gpt->oe = value & 0xF8000000;
+ ppc4xx_gpt_set_outputs(gpt);
+ break;
+ case 0x14:
+ /* Output level */
+ gpt->ol = value & 0xF8000000;
+ ppc4xx_gpt_set_outputs(gpt);
+ break;
+ case 0x18:
+ /* Interrupt mask */
+ gpt->im = value & 0x0000F800;
+ break;
+ case 0x1C:
+ /* Interrupt status set */
+ gpt->is |= value & 0x0000F800;
+ ppc4xx_gpt_set_irqs(gpt);
+ break;
+ case 0x20:
+ /* Interrupt status clear */
+ gpt->is &= ~(value & 0x0000F800);
+ ppc4xx_gpt_set_irqs(gpt);
+ break;
+ case 0x24:
+ /* Interrupt enable */
+ gpt->ie = value & 0x0000F800;
+ ppc4xx_gpt_set_irqs(gpt);
+ break;
+ case 0x80 ... 0x90:
+ /* Compare timer */
+ idx = (addr - 0x80) >> 2;
+ gpt->comp[idx] = value & 0xF8000000;
+ ppc4xx_gpt_compute_timer(gpt);
+ break;
+ case 0xC0 ... 0xD0:
+ /* Compare mask */
+ idx = (addr - 0xC0) >> 2;
+ gpt->mask[idx] = value & 0xF8000000;
+ ppc4xx_gpt_compute_timer(gpt);
+ break;
+ }
+}
+
+static const MemoryRegionOps gpt_ops = {
+ .old_mmio = {
+ .read = { ppc4xx_gpt_readb, ppc4xx_gpt_readw, ppc4xx_gpt_readl, },
+ .write = { ppc4xx_gpt_writeb, ppc4xx_gpt_writew, ppc4xx_gpt_writel, },
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void ppc4xx_gpt_cb (void *opaque)
+{
+ ppc4xx_gpt_t *gpt;
+
+ gpt = opaque;
+ ppc4xx_gpt_set_irqs(gpt);
+ ppc4xx_gpt_set_outputs(gpt);
+ ppc4xx_gpt_compute_timer(gpt);
+}
+
+static void ppc4xx_gpt_reset (void *opaque)
+{
+ ppc4xx_gpt_t *gpt;
+ int i;
+
+ gpt = opaque;
+ timer_del(gpt->timer);
+ gpt->oe = 0x00000000;
+ gpt->ol = 0x00000000;
+ gpt->im = 0x00000000;
+ gpt->is = 0x00000000;
+ gpt->ie = 0x00000000;
+ for (i = 0; i < 5; i++) {
+ gpt->comp[i] = 0x00000000;
+ gpt->mask[i] = 0x00000000;
+ }
+}
+
+static void ppc4xx_gpt_init(hwaddr base, qemu_irq irqs[5])
+{
+ ppc4xx_gpt_t *gpt;
+ int i;
+
+ gpt = g_malloc0(sizeof(ppc4xx_gpt_t));
+ for (i = 0; i < 5; i++) {
+ gpt->irqs[i] = irqs[i];
+ }
+ gpt->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &ppc4xx_gpt_cb, gpt);
+#ifdef DEBUG_GPT
+ printf("%s: offset " TARGET_FMT_plx "\n", __func__, base);
+#endif
+ memory_region_init_io(&gpt->iomem, NULL, &gpt_ops, gpt, "gpt", 0x0d4);
+ memory_region_add_subregion(get_system_memory(), base, &gpt->iomem);
+ qemu_register_reset(ppc4xx_gpt_reset, gpt);
+}
+
+/*****************************************************************************/
+/* MAL */
+enum {
+ MAL0_CFG = 0x180,
+ MAL0_ESR = 0x181,
+ MAL0_IER = 0x182,
+ MAL0_TXCASR = 0x184,
+ MAL0_TXCARR = 0x185,
+ MAL0_TXEOBISR = 0x186,
+ MAL0_TXDEIR = 0x187,
+ MAL0_RXCASR = 0x190,
+ MAL0_RXCARR = 0x191,
+ MAL0_RXEOBISR = 0x192,
+ MAL0_RXDEIR = 0x193,
+ MAL0_TXCTP0R = 0x1A0,
+ MAL0_TXCTP1R = 0x1A1,
+ MAL0_TXCTP2R = 0x1A2,
+ MAL0_TXCTP3R = 0x1A3,
+ MAL0_RXCTP0R = 0x1C0,
+ MAL0_RXCTP1R = 0x1C1,
+ MAL0_RCBS0 = 0x1E0,
+ MAL0_RCBS1 = 0x1E1,
+};
+
+typedef struct ppc40x_mal_t ppc40x_mal_t;
+struct ppc40x_mal_t {
+ qemu_irq irqs[4];
+ uint32_t cfg;
+ uint32_t esr;
+ uint32_t ier;
+ uint32_t txcasr;
+ uint32_t txcarr;
+ uint32_t txeobisr;
+ uint32_t txdeir;
+ uint32_t rxcasr;
+ uint32_t rxcarr;
+ uint32_t rxeobisr;
+ uint32_t rxdeir;
+ uint32_t txctpr[4];
+ uint32_t rxctpr[2];
+ uint32_t rcbs[2];
+};
+
+static void ppc40x_mal_reset (void *opaque);
+
+static uint32_t dcr_read_mal (void *opaque, int dcrn)
+{
+ ppc40x_mal_t *mal;
+ uint32_t ret;
+
+ mal = opaque;
+ switch (dcrn) {
+ case MAL0_CFG:
+ ret = mal->cfg;
+ break;
+ case MAL0_ESR:
+ ret = mal->esr;
+ break;
+ case MAL0_IER:
+ ret = mal->ier;
+ break;
+ case MAL0_TXCASR:
+ ret = mal->txcasr;
+ break;
+ case MAL0_TXCARR:
+ ret = mal->txcarr;
+ break;
+ case MAL0_TXEOBISR:
+ ret = mal->txeobisr;
+ break;
+ case MAL0_TXDEIR:
+ ret = mal->txdeir;
+ break;
+ case MAL0_RXCASR:
+ ret = mal->rxcasr;
+ break;
+ case MAL0_RXCARR:
+ ret = mal->rxcarr;
+ break;
+ case MAL0_RXEOBISR:
+ ret = mal->rxeobisr;
+ break;
+ case MAL0_RXDEIR:
+ ret = mal->rxdeir;
+ break;
+ case MAL0_TXCTP0R:
+ ret = mal->txctpr[0];
+ break;
+ case MAL0_TXCTP1R:
+ ret = mal->txctpr[1];
+ break;
+ case MAL0_TXCTP2R:
+ ret = mal->txctpr[2];
+ break;
+ case MAL0_TXCTP3R:
+ ret = mal->txctpr[3];
+ break;
+ case MAL0_RXCTP0R:
+ ret = mal->rxctpr[0];
+ break;
+ case MAL0_RXCTP1R:
+ ret = mal->rxctpr[1];
+ break;
+ case MAL0_RCBS0:
+ ret = mal->rcbs[0];
+ break;
+ case MAL0_RCBS1:
+ ret = mal->rcbs[1];
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static void dcr_write_mal (void *opaque, int dcrn, uint32_t val)
+{
+ ppc40x_mal_t *mal;
+ int idx;
+
+ mal = opaque;
+ switch (dcrn) {
+ case MAL0_CFG:
+ if (val & 0x80000000)
+ ppc40x_mal_reset(mal);
+ mal->cfg = val & 0x00FFC087;
+ break;
+ case MAL0_ESR:
+ /* Read/clear */
+ mal->esr &= ~val;
+ break;
+ case MAL0_IER:
+ mal->ier = val & 0x0000001F;
+ break;
+ case MAL0_TXCASR:
+ mal->txcasr = val & 0xF0000000;
+ break;
+ case MAL0_TXCARR:
+ mal->txcarr = val & 0xF0000000;
+ break;
+ case MAL0_TXEOBISR:
+ /* Read/clear */
+ mal->txeobisr &= ~val;
+ break;
+ case MAL0_TXDEIR:
+ /* Read/clear */
+ mal->txdeir &= ~val;
+ break;
+ case MAL0_RXCASR:
+ mal->rxcasr = val & 0xC0000000;
+ break;
+ case MAL0_RXCARR:
+ mal->rxcarr = val & 0xC0000000;
+ break;
+ case MAL0_RXEOBISR:
+ /* Read/clear */
+ mal->rxeobisr &= ~val;
+ break;
+ case MAL0_RXDEIR:
+ /* Read/clear */
+ mal->rxdeir &= ~val;
+ break;
+ case MAL0_TXCTP0R:
+ idx = 0;
+ goto update_tx_ptr;
+ case MAL0_TXCTP1R:
+ idx = 1;
+ goto update_tx_ptr;
+ case MAL0_TXCTP2R:
+ idx = 2;
+ goto update_tx_ptr;
+ case MAL0_TXCTP3R:
+ idx = 3;
+ update_tx_ptr:
+ mal->txctpr[idx] = val;
+ break;
+ case MAL0_RXCTP0R:
+ idx = 0;
+ goto update_rx_ptr;
+ case MAL0_RXCTP1R:
+ idx = 1;
+ update_rx_ptr:
+ mal->rxctpr[idx] = val;
+ break;
+ case MAL0_RCBS0:
+ idx = 0;
+ goto update_rx_size;
+ case MAL0_RCBS1:
+ idx = 1;
+ update_rx_size:
+ mal->rcbs[idx] = val & 0x000000FF;
+ break;
+ }
+}
+
+static void ppc40x_mal_reset (void *opaque)
+{
+ ppc40x_mal_t *mal;
+
+ mal = opaque;
+ mal->cfg = 0x0007C000;
+ mal->esr = 0x00000000;
+ mal->ier = 0x00000000;
+ mal->rxcasr = 0x00000000;
+ mal->rxdeir = 0x00000000;
+ mal->rxeobisr = 0x00000000;
+ mal->txcasr = 0x00000000;
+ mal->txdeir = 0x00000000;
+ mal->txeobisr = 0x00000000;
+}
+
+static void ppc405_mal_init(CPUPPCState *env, qemu_irq irqs[4])
+{
+ ppc40x_mal_t *mal;
+ int i;
+
+ mal = g_malloc0(sizeof(ppc40x_mal_t));
+ for (i = 0; i < 4; i++)
+ mal->irqs[i] = irqs[i];
+ qemu_register_reset(&ppc40x_mal_reset, mal);
+ ppc_dcr_register(env, MAL0_CFG,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_ESR,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_IER,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_TXCASR,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_TXCARR,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_TXEOBISR,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_TXDEIR,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_RXCASR,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_RXCARR,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_RXEOBISR,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_RXDEIR,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_TXCTP0R,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_TXCTP1R,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_TXCTP2R,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_TXCTP3R,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_RXCTP0R,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_RXCTP1R,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_RCBS0,
+ mal, &dcr_read_mal, &dcr_write_mal);
+ ppc_dcr_register(env, MAL0_RCBS1,
+ mal, &dcr_read_mal, &dcr_write_mal);
+}
+
+/*****************************************************************************/
+/* SPR */
+void ppc40x_core_reset(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong dbsr;
+
+ printf("Reset PowerPC core\n");
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
+ dbsr = env->spr[SPR_40x_DBSR];
+ dbsr &= ~0x00000300;
+ dbsr |= 0x00000100;
+ env->spr[SPR_40x_DBSR] = dbsr;
+}
+
+void ppc40x_chip_reset(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong dbsr;
+
+ printf("Reset PowerPC chip\n");
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
+ /* XXX: TODO reset all internal peripherals */
+ dbsr = env->spr[SPR_40x_DBSR];
+ dbsr &= ~0x00000300;
+ dbsr |= 0x00000200;
+ env->spr[SPR_40x_DBSR] = dbsr;
+}
+
+void ppc40x_system_reset(PowerPCCPU *cpu)
+{
+ printf("Reset PowerPC system\n");
+ qemu_system_reset_request();
+}
+
+void store_40x_dbcr0 (CPUPPCState *env, uint32_t val)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ switch ((val >> 28) & 0x3) {
+ case 0x0:
+ /* No action */
+ break;
+ case 0x1:
+ /* Core reset */
+ ppc40x_core_reset(cpu);
+ break;
+ case 0x2:
+ /* Chip reset */
+ ppc40x_chip_reset(cpu);
+ break;
+ case 0x3:
+ /* System reset */
+ ppc40x_system_reset(cpu);
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* PowerPC 405CR */
+enum {
+ PPC405CR_CPC0_PLLMR = 0x0B0,
+ PPC405CR_CPC0_CR0 = 0x0B1,
+ PPC405CR_CPC0_CR1 = 0x0B2,
+ PPC405CR_CPC0_PSR = 0x0B4,
+ PPC405CR_CPC0_JTAGID = 0x0B5,
+ PPC405CR_CPC0_ER = 0x0B9,
+ PPC405CR_CPC0_FR = 0x0BA,
+ PPC405CR_CPC0_SR = 0x0BB,
+};
+
+enum {
+ PPC405CR_CPU_CLK = 0,
+ PPC405CR_TMR_CLK = 1,
+ PPC405CR_PLB_CLK = 2,
+ PPC405CR_SDRAM_CLK = 3,
+ PPC405CR_OPB_CLK = 4,
+ PPC405CR_EXT_CLK = 5,
+ PPC405CR_UART_CLK = 6,
+ PPC405CR_CLK_NB = 7,
+};
+
+typedef struct ppc405cr_cpc_t ppc405cr_cpc_t;
+struct ppc405cr_cpc_t {
+ clk_setup_t clk_setup[PPC405CR_CLK_NB];
+ uint32_t sysclk;
+ uint32_t psr;
+ uint32_t cr0;
+ uint32_t cr1;
+ uint32_t jtagid;
+ uint32_t pllmr;
+ uint32_t er;
+ uint32_t fr;
+};
+
+static void ppc405cr_clk_setup (ppc405cr_cpc_t *cpc)
+{
+ uint64_t VCO_out, PLL_out;
+ uint32_t CPU_clk, TMR_clk, SDRAM_clk, PLB_clk, OPB_clk, EXT_clk, UART_clk;
+ int M, D0, D1, D2;
+
+ D0 = ((cpc->pllmr >> 26) & 0x3) + 1; /* CBDV */
+ if (cpc->pllmr & 0x80000000) {
+ D1 = (((cpc->pllmr >> 20) - 1) & 0xF) + 1; /* FBDV */
+ D2 = 8 - ((cpc->pllmr >> 16) & 0x7); /* FWDVA */
+ M = D0 * D1 * D2;
+ VCO_out = cpc->sysclk * M;
+ if (VCO_out < 400000000 || VCO_out > 800000000) {
+ /* PLL cannot lock */
+ cpc->pllmr &= ~0x80000000;
+ goto bypass_pll;
+ }
+ PLL_out = VCO_out / D2;
+ } else {
+ /* Bypass PLL */
+ bypass_pll:
+ M = D0;
+ PLL_out = cpc->sysclk * M;
+ }
+ CPU_clk = PLL_out;
+ if (cpc->cr1 & 0x00800000)
+ TMR_clk = cpc->sysclk; /* Should have a separate clock */
+ else
+ TMR_clk = CPU_clk;
+ PLB_clk = CPU_clk / D0;
+ SDRAM_clk = PLB_clk;
+ D0 = ((cpc->pllmr >> 10) & 0x3) + 1;
+ OPB_clk = PLB_clk / D0;
+ D0 = ((cpc->pllmr >> 24) & 0x3) + 2;
+ EXT_clk = PLB_clk / D0;
+ D0 = ((cpc->cr0 >> 1) & 0x1F) + 1;
+ UART_clk = CPU_clk / D0;
+ /* Setup CPU clocks */
+ clk_setup(&cpc->clk_setup[PPC405CR_CPU_CLK], CPU_clk);
+ /* Setup time-base clock */
+ clk_setup(&cpc->clk_setup[PPC405CR_TMR_CLK], TMR_clk);
+ /* Setup PLB clock */
+ clk_setup(&cpc->clk_setup[PPC405CR_PLB_CLK], PLB_clk);
+ /* Setup SDRAM clock */
+ clk_setup(&cpc->clk_setup[PPC405CR_SDRAM_CLK], SDRAM_clk);
+ /* Setup OPB clock */
+ clk_setup(&cpc->clk_setup[PPC405CR_OPB_CLK], OPB_clk);
+ /* Setup external clock */
+ clk_setup(&cpc->clk_setup[PPC405CR_EXT_CLK], EXT_clk);
+ /* Setup UART clock */
+ clk_setup(&cpc->clk_setup[PPC405CR_UART_CLK], UART_clk);
+}
+
+static uint32_t dcr_read_crcpc (void *opaque, int dcrn)
+{
+ ppc405cr_cpc_t *cpc;
+ uint32_t ret;
+
+ cpc = opaque;
+ switch (dcrn) {
+ case PPC405CR_CPC0_PLLMR:
+ ret = cpc->pllmr;
+ break;
+ case PPC405CR_CPC0_CR0:
+ ret = cpc->cr0;
+ break;
+ case PPC405CR_CPC0_CR1:
+ ret = cpc->cr1;
+ break;
+ case PPC405CR_CPC0_PSR:
+ ret = cpc->psr;
+ break;
+ case PPC405CR_CPC0_JTAGID:
+ ret = cpc->jtagid;
+ break;
+ case PPC405CR_CPC0_ER:
+ ret = cpc->er;
+ break;
+ case PPC405CR_CPC0_FR:
+ ret = cpc->fr;
+ break;
+ case PPC405CR_CPC0_SR:
+ ret = ~(cpc->er | cpc->fr) & 0xFFFF0000;
+ break;
+ default:
+ /* Avoid gcc warning */
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static void dcr_write_crcpc (void *opaque, int dcrn, uint32_t val)
+{
+ ppc405cr_cpc_t *cpc;
+
+ cpc = opaque;
+ switch (dcrn) {
+ case PPC405CR_CPC0_PLLMR:
+ cpc->pllmr = val & 0xFFF77C3F;
+ break;
+ case PPC405CR_CPC0_CR0:
+ cpc->cr0 = val & 0x0FFFFFFE;
+ break;
+ case PPC405CR_CPC0_CR1:
+ cpc->cr1 = val & 0x00800000;
+ break;
+ case PPC405CR_CPC0_PSR:
+ /* Read-only */
+ break;
+ case PPC405CR_CPC0_JTAGID:
+ /* Read-only */
+ break;
+ case PPC405CR_CPC0_ER:
+ cpc->er = val & 0xBFFC0000;
+ break;
+ case PPC405CR_CPC0_FR:
+ cpc->fr = val & 0xBFFC0000;
+ break;
+ case PPC405CR_CPC0_SR:
+ /* Read-only */
+ break;
+ }
+}
+
+static void ppc405cr_cpc_reset (void *opaque)
+{
+ ppc405cr_cpc_t *cpc;
+ int D;
+
+ cpc = opaque;
+ /* Compute PLLMR value from PSR settings */
+ cpc->pllmr = 0x80000000;
+ /* PFWD */
+ switch ((cpc->psr >> 30) & 3) {
+ case 0:
+ /* Bypass */
+ cpc->pllmr &= ~0x80000000;
+ break;
+ case 1:
+ /* Divide by 3 */
+ cpc->pllmr |= 5 << 16;
+ break;
+ case 2:
+ /* Divide by 4 */
+ cpc->pllmr |= 4 << 16;
+ break;
+ case 3:
+ /* Divide by 6 */
+ cpc->pllmr |= 2 << 16;
+ break;
+ }
+ /* PFBD */
+ D = (cpc->psr >> 28) & 3;
+ cpc->pllmr |= (D + 1) << 20;
+ /* PT */
+ D = (cpc->psr >> 25) & 7;
+ switch (D) {
+ case 0x2:
+ cpc->pllmr |= 0x13;
+ break;
+ case 0x4:
+ cpc->pllmr |= 0x15;
+ break;
+ case 0x5:
+ cpc->pllmr |= 0x16;
+ break;
+ default:
+ break;
+ }
+ /* PDC */
+ D = (cpc->psr >> 23) & 3;
+ cpc->pllmr |= D << 26;
+ /* ODP */
+ D = (cpc->psr >> 21) & 3;
+ cpc->pllmr |= D << 10;
+ /* EBPD */
+ D = (cpc->psr >> 17) & 3;
+ cpc->pllmr |= D << 24;
+ cpc->cr0 = 0x0000003C;
+ cpc->cr1 = 0x2B0D8800;
+ cpc->er = 0x00000000;
+ cpc->fr = 0x00000000;
+ ppc405cr_clk_setup(cpc);
+}
+
+static void ppc405cr_clk_init (ppc405cr_cpc_t *cpc)
+{
+ int D;
+
+ /* XXX: this should be read from IO pins */
+ cpc->psr = 0x00000000; /* 8 bits ROM */
+ /* PFWD */
+ D = 0x2; /* Divide by 4 */
+ cpc->psr |= D << 30;
+ /* PFBD */
+ D = 0x1; /* Divide by 2 */
+ cpc->psr |= D << 28;
+ /* PDC */
+ D = 0x1; /* Divide by 2 */
+ cpc->psr |= D << 23;
+ /* PT */
+ D = 0x5; /* M = 16 */
+ cpc->psr |= D << 25;
+ /* ODP */
+ D = 0x1; /* Divide by 2 */
+ cpc->psr |= D << 21;
+ /* EBDP */
+ D = 0x2; /* Divide by 4 */
+ cpc->psr |= D << 17;
+}
+
+static void ppc405cr_cpc_init (CPUPPCState *env, clk_setup_t clk_setup[7],
+ uint32_t sysclk)
+{
+ ppc405cr_cpc_t *cpc;
+
+ cpc = g_malloc0(sizeof(ppc405cr_cpc_t));
+ memcpy(cpc->clk_setup, clk_setup,
+ PPC405CR_CLK_NB * sizeof(clk_setup_t));
+ cpc->sysclk = sysclk;
+ cpc->jtagid = 0x42051049;
+ ppc_dcr_register(env, PPC405CR_CPC0_PSR, cpc,
+ &dcr_read_crcpc, &dcr_write_crcpc);
+ ppc_dcr_register(env, PPC405CR_CPC0_CR0, cpc,
+ &dcr_read_crcpc, &dcr_write_crcpc);
+ ppc_dcr_register(env, PPC405CR_CPC0_CR1, cpc,
+ &dcr_read_crcpc, &dcr_write_crcpc);
+ ppc_dcr_register(env, PPC405CR_CPC0_JTAGID, cpc,
+ &dcr_read_crcpc, &dcr_write_crcpc);
+ ppc_dcr_register(env, PPC405CR_CPC0_PLLMR, cpc,
+ &dcr_read_crcpc, &dcr_write_crcpc);
+ ppc_dcr_register(env, PPC405CR_CPC0_ER, cpc,
+ &dcr_read_crcpc, &dcr_write_crcpc);
+ ppc_dcr_register(env, PPC405CR_CPC0_FR, cpc,
+ &dcr_read_crcpc, &dcr_write_crcpc);
+ ppc_dcr_register(env, PPC405CR_CPC0_SR, cpc,
+ &dcr_read_crcpc, &dcr_write_crcpc);
+ ppc405cr_clk_init(cpc);
+ qemu_register_reset(ppc405cr_cpc_reset, cpc);
+}
+
+CPUPPCState *ppc405cr_init(MemoryRegion *address_space_mem,
+ MemoryRegion ram_memories[4],
+ hwaddr ram_bases[4],
+ hwaddr ram_sizes[4],
+ uint32_t sysclk, qemu_irq **picp,
+ int do_init)
+{
+ clk_setup_t clk_setup[PPC405CR_CLK_NB];
+ qemu_irq dma_irqs[4];
+ PowerPCCPU *cpu;
+ CPUPPCState *env;
+ qemu_irq *pic, *irqs;
+
+ memset(clk_setup, 0, sizeof(clk_setup));
+ cpu = ppc4xx_init("405cr", &clk_setup[PPC405CR_CPU_CLK],
+ &clk_setup[PPC405CR_TMR_CLK], sysclk);
+ env = &cpu->env;
+ /* Memory mapped devices registers */
+ /* PLB arbitrer */
+ ppc4xx_plb_init(env);
+ /* PLB to OPB bridge */
+ ppc4xx_pob_init(env);
+ /* OBP arbitrer */
+ ppc4xx_opba_init(0xef600600);
+ /* Universal interrupt controller */
+ irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB);
+ irqs[PPCUIC_OUTPUT_INT] =
+ ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT];
+ irqs[PPCUIC_OUTPUT_CINT] =
+ ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT];
+ pic = ppcuic_init(env, irqs, 0x0C0, 0, 1);
+ *picp = pic;
+ /* SDRAM controller */
+ ppc4xx_sdram_init(env, pic[14], 1, ram_memories,
+ ram_bases, ram_sizes, do_init);
+ /* External bus controller */
+ ppc405_ebc_init(env);
+ /* DMA controller */
+ dma_irqs[0] = pic[26];
+ dma_irqs[1] = pic[25];
+ dma_irqs[2] = pic[24];
+ dma_irqs[3] = pic[23];
+ ppc405_dma_init(env, dma_irqs);
+ /* Serial ports */
+ if (serial_hds[0] != NULL) {
+ serial_mm_init(address_space_mem, 0xef600300, 0, pic[0],
+ PPC_SERIAL_MM_BAUDBASE, serial_hds[0],
+ DEVICE_BIG_ENDIAN);
+ }
+ if (serial_hds[1] != NULL) {
+ serial_mm_init(address_space_mem, 0xef600400, 0, pic[1],
+ PPC_SERIAL_MM_BAUDBASE, serial_hds[1],
+ DEVICE_BIG_ENDIAN);
+ }
+ /* IIC controller */
+ ppc405_i2c_init(0xef600500, pic[2]);
+ /* GPIO */
+ ppc405_gpio_init(0xef600700);
+ /* CPU control */
+ ppc405cr_cpc_init(env, clk_setup, sysclk);
+
+ return env;
+}
+
+/*****************************************************************************/
+/* PowerPC 405EP */
+/* CPU control */
+enum {
+ PPC405EP_CPC0_PLLMR0 = 0x0F0,
+ PPC405EP_CPC0_BOOT = 0x0F1,
+ PPC405EP_CPC0_EPCTL = 0x0F3,
+ PPC405EP_CPC0_PLLMR1 = 0x0F4,
+ PPC405EP_CPC0_UCR = 0x0F5,
+ PPC405EP_CPC0_SRR = 0x0F6,
+ PPC405EP_CPC0_JTAGID = 0x0F7,
+ PPC405EP_CPC0_PCI = 0x0F9,
+#if 0
+ PPC405EP_CPC0_ER = xxx,
+ PPC405EP_CPC0_FR = xxx,
+ PPC405EP_CPC0_SR = xxx,
+#endif
+};
+
+enum {
+ PPC405EP_CPU_CLK = 0,
+ PPC405EP_PLB_CLK = 1,
+ PPC405EP_OPB_CLK = 2,
+ PPC405EP_EBC_CLK = 3,
+ PPC405EP_MAL_CLK = 4,
+ PPC405EP_PCI_CLK = 5,
+ PPC405EP_UART0_CLK = 6,
+ PPC405EP_UART1_CLK = 7,
+ PPC405EP_CLK_NB = 8,
+};
+
+typedef struct ppc405ep_cpc_t ppc405ep_cpc_t;
+struct ppc405ep_cpc_t {
+ uint32_t sysclk;
+ clk_setup_t clk_setup[PPC405EP_CLK_NB];
+ uint32_t boot;
+ uint32_t epctl;
+ uint32_t pllmr[2];
+ uint32_t ucr;
+ uint32_t srr;
+ uint32_t jtagid;
+ uint32_t pci;
+ /* Clock and power management */
+ uint32_t er;
+ uint32_t fr;
+ uint32_t sr;
+};
+
+static void ppc405ep_compute_clocks (ppc405ep_cpc_t *cpc)
+{
+ uint32_t CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk;
+ uint32_t UART0_clk, UART1_clk;
+ uint64_t VCO_out, PLL_out;
+ int M, D;
+
+ VCO_out = 0;
+ if ((cpc->pllmr[1] & 0x80000000) && !(cpc->pllmr[1] & 0x40000000)) {
+ M = (((cpc->pllmr[1] >> 20) - 1) & 0xF) + 1; /* FBMUL */
+#ifdef DEBUG_CLOCKS_LL
+ printf("FBMUL %01" PRIx32 " %d\n", (cpc->pllmr[1] >> 20) & 0xF, M);
+#endif
+ D = 8 - ((cpc->pllmr[1] >> 16) & 0x7); /* FWDA */
+#ifdef DEBUG_CLOCKS_LL
+ printf("FWDA %01" PRIx32 " %d\n", (cpc->pllmr[1] >> 16) & 0x7, D);
+#endif
+ VCO_out = cpc->sysclk * M * D;
+ if (VCO_out < 500000000UL || VCO_out > 1000000000UL) {
+ /* Error - unlock the PLL */
+ printf("VCO out of range %" PRIu64 "\n", VCO_out);
+#if 0
+ cpc->pllmr[1] &= ~0x80000000;
+ goto pll_bypass;
+#endif
+ }
+ PLL_out = VCO_out / D;
+ /* Pretend the PLL is locked */
+ cpc->boot |= 0x00000001;
+ } else {
+#if 0
+ pll_bypass:
+#endif
+ PLL_out = cpc->sysclk;
+ if (cpc->pllmr[1] & 0x40000000) {
+ /* Pretend the PLL is not locked */
+ cpc->boot &= ~0x00000001;
+ }
+ }
+ /* Now, compute all other clocks */
+ D = ((cpc->pllmr[0] >> 20) & 0x3) + 1; /* CCDV */
+#ifdef DEBUG_CLOCKS_LL
+ printf("CCDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 20) & 0x3, D);
+#endif
+ CPU_clk = PLL_out / D;
+ D = ((cpc->pllmr[0] >> 16) & 0x3) + 1; /* CBDV */
+#ifdef DEBUG_CLOCKS_LL
+ printf("CBDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 16) & 0x3, D);
+#endif
+ PLB_clk = CPU_clk / D;
+ D = ((cpc->pllmr[0] >> 12) & 0x3) + 1; /* OPDV */
+#ifdef DEBUG_CLOCKS_LL
+ printf("OPDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 12) & 0x3, D);
+#endif
+ OPB_clk = PLB_clk / D;
+ D = ((cpc->pllmr[0] >> 8) & 0x3) + 2; /* EPDV */
+#ifdef DEBUG_CLOCKS_LL
+ printf("EPDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 8) & 0x3, D);
+#endif
+ EBC_clk = PLB_clk / D;
+ D = ((cpc->pllmr[0] >> 4) & 0x3) + 1; /* MPDV */
+#ifdef DEBUG_CLOCKS_LL
+ printf("MPDV %01" PRIx32 " %d\n", (cpc->pllmr[0] >> 4) & 0x3, D);
+#endif
+ MAL_clk = PLB_clk / D;
+ D = (cpc->pllmr[0] & 0x3) + 1; /* PPDV */
+#ifdef DEBUG_CLOCKS_LL
+ printf("PPDV %01" PRIx32 " %d\n", cpc->pllmr[0] & 0x3, D);
+#endif
+ PCI_clk = PLB_clk / D;
+ D = ((cpc->ucr - 1) & 0x7F) + 1; /* U0DIV */
+#ifdef DEBUG_CLOCKS_LL
+ printf("U0DIV %01" PRIx32 " %d\n", cpc->ucr & 0x7F, D);
+#endif
+ UART0_clk = PLL_out / D;
+ D = (((cpc->ucr >> 8) - 1) & 0x7F) + 1; /* U1DIV */
+#ifdef DEBUG_CLOCKS_LL
+ printf("U1DIV %01" PRIx32 " %d\n", (cpc->ucr >> 8) & 0x7F, D);
+#endif
+ UART1_clk = PLL_out / D;
+#ifdef DEBUG_CLOCKS
+ printf("Setup PPC405EP clocks - sysclk %" PRIu32 " VCO %" PRIu64
+ " PLL out %" PRIu64 " Hz\n", cpc->sysclk, VCO_out, PLL_out);
+ printf("CPU %" PRIu32 " PLB %" PRIu32 " OPB %" PRIu32 " EBC %" PRIu32
+ " MAL %" PRIu32 " PCI %" PRIu32 " UART0 %" PRIu32
+ " UART1 %" PRIu32 "\n",
+ CPU_clk, PLB_clk, OPB_clk, EBC_clk, MAL_clk, PCI_clk,
+ UART0_clk, UART1_clk);
+#endif
+ /* Setup CPU clocks */
+ clk_setup(&cpc->clk_setup[PPC405EP_CPU_CLK], CPU_clk);
+ /* Setup PLB clock */
+ clk_setup(&cpc->clk_setup[PPC405EP_PLB_CLK], PLB_clk);
+ /* Setup OPB clock */
+ clk_setup(&cpc->clk_setup[PPC405EP_OPB_CLK], OPB_clk);
+ /* Setup external clock */
+ clk_setup(&cpc->clk_setup[PPC405EP_EBC_CLK], EBC_clk);
+ /* Setup MAL clock */
+ clk_setup(&cpc->clk_setup[PPC405EP_MAL_CLK], MAL_clk);
+ /* Setup PCI clock */
+ clk_setup(&cpc->clk_setup[PPC405EP_PCI_CLK], PCI_clk);
+ /* Setup UART0 clock */
+ clk_setup(&cpc->clk_setup[PPC405EP_UART0_CLK], UART0_clk);
+ /* Setup UART1 clock */
+ clk_setup(&cpc->clk_setup[PPC405EP_UART1_CLK], UART1_clk);
+}
+
+static uint32_t dcr_read_epcpc (void *opaque, int dcrn)
+{
+ ppc405ep_cpc_t *cpc;
+ uint32_t ret;
+
+ cpc = opaque;
+ switch (dcrn) {
+ case PPC405EP_CPC0_BOOT:
+ ret = cpc->boot;
+ break;
+ case PPC405EP_CPC0_EPCTL:
+ ret = cpc->epctl;
+ break;
+ case PPC405EP_CPC0_PLLMR0:
+ ret = cpc->pllmr[0];
+ break;
+ case PPC405EP_CPC0_PLLMR1:
+ ret = cpc->pllmr[1];
+ break;
+ case PPC405EP_CPC0_UCR:
+ ret = cpc->ucr;
+ break;
+ case PPC405EP_CPC0_SRR:
+ ret = cpc->srr;
+ break;
+ case PPC405EP_CPC0_JTAGID:
+ ret = cpc->jtagid;
+ break;
+ case PPC405EP_CPC0_PCI:
+ ret = cpc->pci;
+ break;
+ default:
+ /* Avoid gcc warning */
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+static void dcr_write_epcpc (void *opaque, int dcrn, uint32_t val)
+{
+ ppc405ep_cpc_t *cpc;
+
+ cpc = opaque;
+ switch (dcrn) {
+ case PPC405EP_CPC0_BOOT:
+ /* Read-only register */
+ break;
+ case PPC405EP_CPC0_EPCTL:
+ /* Don't care for now */
+ cpc->epctl = val & 0xC00000F3;
+ break;
+ case PPC405EP_CPC0_PLLMR0:
+ cpc->pllmr[0] = val & 0x00633333;
+ ppc405ep_compute_clocks(cpc);
+ break;
+ case PPC405EP_CPC0_PLLMR1:
+ cpc->pllmr[1] = val & 0xC0F73FFF;
+ ppc405ep_compute_clocks(cpc);
+ break;
+ case PPC405EP_CPC0_UCR:
+ /* UART control - don't care for now */
+ cpc->ucr = val & 0x003F7F7F;
+ break;
+ case PPC405EP_CPC0_SRR:
+ cpc->srr = val;
+ break;
+ case PPC405EP_CPC0_JTAGID:
+ /* Read-only */
+ break;
+ case PPC405EP_CPC0_PCI:
+ cpc->pci = val;
+ break;
+ }
+}
+
+static void ppc405ep_cpc_reset (void *opaque)
+{
+ ppc405ep_cpc_t *cpc = opaque;
+
+ cpc->boot = 0x00000010; /* Boot from PCI - IIC EEPROM disabled */
+ cpc->epctl = 0x00000000;
+ cpc->pllmr[0] = 0x00011010;
+ cpc->pllmr[1] = 0x40000000;
+ cpc->ucr = 0x00000000;
+ cpc->srr = 0x00040000;
+ cpc->pci = 0x00000000;
+ cpc->er = 0x00000000;
+ cpc->fr = 0x00000000;
+ cpc->sr = 0x00000000;
+ ppc405ep_compute_clocks(cpc);
+}
+
+/* XXX: sysclk should be between 25 and 100 MHz */
+static void ppc405ep_cpc_init (CPUPPCState *env, clk_setup_t clk_setup[8],
+ uint32_t sysclk)
+{
+ ppc405ep_cpc_t *cpc;
+
+ cpc = g_malloc0(sizeof(ppc405ep_cpc_t));
+ memcpy(cpc->clk_setup, clk_setup,
+ PPC405EP_CLK_NB * sizeof(clk_setup_t));
+ cpc->jtagid = 0x20267049;
+ cpc->sysclk = sysclk;
+ qemu_register_reset(&ppc405ep_cpc_reset, cpc);
+ ppc_dcr_register(env, PPC405EP_CPC0_BOOT, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+ ppc_dcr_register(env, PPC405EP_CPC0_EPCTL, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+ ppc_dcr_register(env, PPC405EP_CPC0_PLLMR0, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+ ppc_dcr_register(env, PPC405EP_CPC0_PLLMR1, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+ ppc_dcr_register(env, PPC405EP_CPC0_UCR, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+ ppc_dcr_register(env, PPC405EP_CPC0_SRR, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+ ppc_dcr_register(env, PPC405EP_CPC0_JTAGID, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+ ppc_dcr_register(env, PPC405EP_CPC0_PCI, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+#if 0
+ ppc_dcr_register(env, PPC405EP_CPC0_ER, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+ ppc_dcr_register(env, PPC405EP_CPC0_FR, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+ ppc_dcr_register(env, PPC405EP_CPC0_SR, cpc,
+ &dcr_read_epcpc, &dcr_write_epcpc);
+#endif
+}
+
+CPUPPCState *ppc405ep_init(MemoryRegion *address_space_mem,
+ MemoryRegion ram_memories[2],
+ hwaddr ram_bases[2],
+ hwaddr ram_sizes[2],
+ uint32_t sysclk, qemu_irq **picp,
+ int do_init)
+{
+ clk_setup_t clk_setup[PPC405EP_CLK_NB], tlb_clk_setup;
+ qemu_irq dma_irqs[4], gpt_irqs[5], mal_irqs[4];
+ PowerPCCPU *cpu;
+ CPUPPCState *env;
+ qemu_irq *pic, *irqs;
+
+ memset(clk_setup, 0, sizeof(clk_setup));
+ /* init CPUs */
+ cpu = ppc4xx_init("405ep", &clk_setup[PPC405EP_CPU_CLK],
+ &tlb_clk_setup, sysclk);
+ env = &cpu->env;
+ clk_setup[PPC405EP_CPU_CLK].cb = tlb_clk_setup.cb;
+ clk_setup[PPC405EP_CPU_CLK].opaque = tlb_clk_setup.opaque;
+ /* Internal devices init */
+ /* Memory mapped devices registers */
+ /* PLB arbitrer */
+ ppc4xx_plb_init(env);
+ /* PLB to OPB bridge */
+ ppc4xx_pob_init(env);
+ /* OBP arbitrer */
+ ppc4xx_opba_init(0xef600600);
+ /* Initialize timers */
+ ppc_booke_timers_init(cpu, sysclk, 0);
+ /* Universal interrupt controller */
+ irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB);
+ irqs[PPCUIC_OUTPUT_INT] =
+ ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT];
+ irqs[PPCUIC_OUTPUT_CINT] =
+ ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT];
+ pic = ppcuic_init(env, irqs, 0x0C0, 0, 1);
+ *picp = pic;
+ /* SDRAM controller */
+ /* XXX 405EP has no ECC interrupt */
+ ppc4xx_sdram_init(env, pic[17], 2, ram_memories,
+ ram_bases, ram_sizes, do_init);
+ /* External bus controller */
+ ppc405_ebc_init(env);
+ /* DMA controller */
+ dma_irqs[0] = pic[5];
+ dma_irqs[1] = pic[6];
+ dma_irqs[2] = pic[7];
+ dma_irqs[3] = pic[8];
+ ppc405_dma_init(env, dma_irqs);
+ /* IIC controller */
+ ppc405_i2c_init(0xef600500, pic[2]);
+ /* GPIO */
+ ppc405_gpio_init(0xef600700);
+ /* Serial ports */
+ if (serial_hds[0] != NULL) {
+ serial_mm_init(address_space_mem, 0xef600300, 0, pic[0],
+ PPC_SERIAL_MM_BAUDBASE, serial_hds[0],
+ DEVICE_BIG_ENDIAN);
+ }
+ if (serial_hds[1] != NULL) {
+ serial_mm_init(address_space_mem, 0xef600400, 0, pic[1],
+ PPC_SERIAL_MM_BAUDBASE, serial_hds[1],
+ DEVICE_BIG_ENDIAN);
+ }
+ /* OCM */
+ ppc405_ocm_init(env);
+ /* GPT */
+ gpt_irqs[0] = pic[19];
+ gpt_irqs[1] = pic[20];
+ gpt_irqs[2] = pic[21];
+ gpt_irqs[3] = pic[22];
+ gpt_irqs[4] = pic[23];
+ ppc4xx_gpt_init(0xef600000, gpt_irqs);
+ /* PCI */
+ /* Uses pic[3], pic[16], pic[18] */
+ /* MAL */
+ mal_irqs[0] = pic[11];
+ mal_irqs[1] = pic[12];
+ mal_irqs[2] = pic[13];
+ mal_irqs[3] = pic[14];
+ ppc405_mal_init(env, mal_irqs);
+ /* Ethernet */
+ /* Uses pic[9], pic[15], pic[17] */
+ /* CPU control */
+ ppc405ep_cpc_init(env, clk_setup, sysclk);
+
+ return env;
+}
diff --git a/src/hw/ppc/ppc440_bamboo.c b/src/hw/ppc/ppc440_bamboo.c
new file mode 100644
index 0000000..b66c113
--- /dev/null
+++ b/src/hw/ppc/ppc440_bamboo.c
@@ -0,0 +1,299 @@
+/*
+ * QEMU PowerPC 440 Bamboo board emulation
+ *
+ * Copyright 2007 IBM Corporation.
+ * Authors:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ * Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ */
+
+#include "config.h"
+#include "qemu-common.h"
+#include "net/net.h"
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "hw/boards.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "sysemu/device_tree.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "exec/address-spaces.h"
+#include "hw/char/serial.h"
+#include "hw/ppc/ppc.h"
+#include "ppc405.h"
+#include "sysemu/sysemu.h"
+#include "hw/sysbus.h"
+
+#define BINARY_DEVICE_TREE_FILE "bamboo.dtb"
+
+/* from u-boot */
+#define KERNEL_ADDR 0x1000000
+#define FDT_ADDR 0x1800000
+#define RAMDISK_ADDR 0x1900000
+
+#define PPC440EP_PCI_CONFIG 0xeec00000
+#define PPC440EP_PCI_INTACK 0xeed00000
+#define PPC440EP_PCI_SPECIAL 0xeed00000
+#define PPC440EP_PCI_REGS 0xef400000
+#define PPC440EP_PCI_IO 0xe8000000
+#define PPC440EP_PCI_IOLEN 0x00010000
+
+#define PPC440EP_SDRAM_NR_BANKS 4
+
+static const unsigned int ppc440ep_sdram_bank_sizes[] = {
+ 256<<20, 128<<20, 64<<20, 32<<20, 16<<20, 8<<20, 0
+};
+
+static hwaddr entry;
+
+static int bamboo_load_device_tree(hwaddr addr,
+ uint32_t ramsize,
+ hwaddr initrd_base,
+ hwaddr initrd_size,
+ const char *kernel_cmdline)
+{
+ int ret = -1;
+ uint32_t mem_reg_property[] = { 0, 0, cpu_to_be32(ramsize) };
+ char *filename;
+ int fdt_size;
+ void *fdt;
+ uint32_t tb_freq = 400000000;
+ uint32_t clock_freq = 400000000;
+
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE);
+ if (!filename) {
+ goto out;
+ }
+ fdt = load_device_tree(filename, &fdt_size);
+ g_free(filename);
+ if (fdt == NULL) {
+ goto out;
+ }
+
+ /* Manipulate device tree in memory. */
+
+ ret = qemu_fdt_setprop(fdt, "/memory", "reg", mem_reg_property,
+ sizeof(mem_reg_property));
+ if (ret < 0)
+ fprintf(stderr, "couldn't set /memory/reg\n");
+
+ ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start",
+ initrd_base);
+ if (ret < 0)
+ fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n");
+
+ ret = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end",
+ (initrd_base + initrd_size));
+ if (ret < 0)
+ fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n");
+
+ ret = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs",
+ kernel_cmdline);
+ if (ret < 0)
+ fprintf(stderr, "couldn't set /chosen/bootargs\n");
+
+ /* Copy data from the host device tree into the guest. Since the guest can
+ * directly access the timebase without host involvement, we must expose
+ * the correct frequencies. */
+ if (kvm_enabled()) {
+ tb_freq = kvmppc_get_tbfreq();
+ clock_freq = kvmppc_get_clockfreq();
+ }
+
+ qemu_fdt_setprop_cell(fdt, "/cpus/cpu@0", "clock-frequency",
+ clock_freq);
+ qemu_fdt_setprop_cell(fdt, "/cpus/cpu@0", "timebase-frequency",
+ tb_freq);
+
+ rom_add_blob_fixed(BINARY_DEVICE_TREE_FILE, fdt, fdt_size, addr);
+ g_free(fdt);
+ return 0;
+
+out:
+
+ return ret;
+}
+
+/* Create reset TLB entries for BookE, spanning the 32bit addr space. */
+static void mmubooke_create_initial_mapping(CPUPPCState *env,
+ target_ulong va,
+ hwaddr pa)
+{
+ ppcemb_tlb_t *tlb = &env->tlb.tlbe[0];
+
+ tlb->attr = 0;
+ tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
+ tlb->size = 1U << 31; /* up to 0x80000000 */
+ tlb->EPN = va & TARGET_PAGE_MASK;
+ tlb->RPN = pa & TARGET_PAGE_MASK;
+ tlb->PID = 0;
+
+ tlb = &env->tlb.tlbe[1];
+ tlb->attr = 0;
+ tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
+ tlb->size = 1U << 31; /* up to 0xffffffff */
+ tlb->EPN = 0x80000000 & TARGET_PAGE_MASK;
+ tlb->RPN = 0x80000000 & TARGET_PAGE_MASK;
+ tlb->PID = 0;
+}
+
+static void main_cpu_reset(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ cpu_reset(CPU(cpu));
+ env->gpr[1] = (16<<20) - 8;
+ env->gpr[3] = FDT_ADDR;
+ env->nip = entry;
+
+ /* Create a mapping for the kernel. */
+ mmubooke_create_initial_mapping(env, 0, 0);
+}
+
+static void bamboo_init(MachineState *machine)
+{
+ ram_addr_t ram_size = machine->ram_size;
+ const char *kernel_filename = machine->kernel_filename;
+ const char *kernel_cmdline = machine->kernel_cmdline;
+ const char *initrd_filename = machine->initrd_filename;
+ unsigned int pci_irq_nrs[4] = { 28, 27, 26, 25 };
+ MemoryRegion *address_space_mem = get_system_memory();
+ MemoryRegion *isa = g_new(MemoryRegion, 1);
+ MemoryRegion *ram_memories
+ = g_malloc(PPC440EP_SDRAM_NR_BANKS * sizeof(*ram_memories));
+ hwaddr ram_bases[PPC440EP_SDRAM_NR_BANKS];
+ hwaddr ram_sizes[PPC440EP_SDRAM_NR_BANKS];
+ qemu_irq *pic;
+ qemu_irq *irqs;
+ PCIBus *pcibus;
+ PowerPCCPU *cpu;
+ CPUPPCState *env;
+ uint64_t elf_entry;
+ uint64_t elf_lowaddr;
+ hwaddr loadaddr = 0;
+ target_long initrd_size = 0;
+ DeviceState *dev;
+ int success;
+ int i;
+
+ /* Setup CPU. */
+ if (machine->cpu_model == NULL) {
+ machine->cpu_model = "440EP";
+ }
+ cpu = cpu_ppc_init(machine->cpu_model);
+ if (cpu == NULL) {
+ fprintf(stderr, "Unable to initialize CPU!\n");
+ exit(1);
+ }
+ env = &cpu->env;
+
+ qemu_register_reset(main_cpu_reset, cpu);
+ ppc_booke_timers_init(cpu, 400000000, 0);
+ ppc_dcr_init(env, NULL, NULL);
+
+ /* interrupt controller */
+ irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB);
+ irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT];
+ irqs[PPCUIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT];
+ pic = ppcuic_init(env, irqs, 0x0C0, 0, 1);
+
+ /* SDRAM controller */
+ memset(ram_bases, 0, sizeof(ram_bases));
+ memset(ram_sizes, 0, sizeof(ram_sizes));
+ ram_size = ppc4xx_sdram_adjust(ram_size, PPC440EP_SDRAM_NR_BANKS,
+ ram_memories,
+ ram_bases, ram_sizes,
+ ppc440ep_sdram_bank_sizes);
+ /* XXX 440EP's ECC interrupts are on UIC1, but we've only created UIC0. */
+ ppc4xx_sdram_init(env, pic[14], PPC440EP_SDRAM_NR_BANKS, ram_memories,
+ ram_bases, ram_sizes, 1);
+
+ /* PCI */
+ dev = sysbus_create_varargs(TYPE_PPC4xx_PCI_HOST_BRIDGE,
+ PPC440EP_PCI_CONFIG,
+ pic[pci_irq_nrs[0]], pic[pci_irq_nrs[1]],
+ pic[pci_irq_nrs[2]], pic[pci_irq_nrs[3]],
+ NULL);
+ pcibus = (PCIBus *)qdev_get_child_bus(dev, "pci.0");
+ if (!pcibus) {
+ fprintf(stderr, "couldn't create PCI controller!\n");
+ exit(1);
+ }
+
+ memory_region_init_alias(isa, NULL, "isa_mmio",
+ get_system_io(), 0, PPC440EP_PCI_IOLEN);
+ memory_region_add_subregion(get_system_memory(), PPC440EP_PCI_IO, isa);
+
+ if (serial_hds[0] != NULL) {
+ serial_mm_init(address_space_mem, 0xef600300, 0, pic[0],
+ PPC_SERIAL_MM_BAUDBASE, serial_hds[0],
+ DEVICE_BIG_ENDIAN);
+ }
+ if (serial_hds[1] != NULL) {
+ serial_mm_init(address_space_mem, 0xef600400, 0, pic[1],
+ PPC_SERIAL_MM_BAUDBASE, serial_hds[1],
+ DEVICE_BIG_ENDIAN);
+ }
+
+ if (pcibus) {
+ /* Register network interfaces. */
+ for (i = 0; i < nb_nics; i++) {
+ /* There are no PCI NICs on the Bamboo board, but there are
+ * PCI slots, so we can pick whatever default model we want. */
+ pci_nic_init_nofail(&nd_table[i], pcibus, "e1000", NULL);
+ }
+ }
+
+ /* Load kernel. */
+ if (kernel_filename) {
+ success = load_uimage(kernel_filename, &entry, &loadaddr, NULL,
+ NULL, NULL);
+ if (success < 0) {
+ success = load_elf(kernel_filename, NULL, NULL, &elf_entry,
+ &elf_lowaddr, NULL, 1, PPC_ELF_MACHINE, 0);
+ entry = elf_entry;
+ loadaddr = elf_lowaddr;
+ }
+ /* XXX try again as binary */
+ if (success < 0) {
+ fprintf(stderr, "qemu: could not load kernel '%s'\n",
+ kernel_filename);
+ exit(1);
+ }
+ }
+
+ /* Load initrd. */
+ if (initrd_filename) {
+ initrd_size = load_image_targphys(initrd_filename, RAMDISK_ADDR,
+ ram_size - RAMDISK_ADDR);
+
+ if (initrd_size < 0) {
+ fprintf(stderr, "qemu: could not load ram disk '%s' at %x\n",
+ initrd_filename, RAMDISK_ADDR);
+ exit(1);
+ }
+ }
+
+ /* If we're loading a kernel directly, we must load the device tree too. */
+ if (kernel_filename) {
+ if (bamboo_load_device_tree(FDT_ADDR, ram_size, RAMDISK_ADDR,
+ initrd_size, kernel_cmdline) < 0) {
+ fprintf(stderr, "couldn't load device tree\n");
+ exit(1);
+ }
+ }
+}
+
+static void bamboo_machine_init(MachineClass *mc)
+{
+ mc->desc = "bamboo";
+ mc->init = bamboo_init;
+}
+
+DEFINE_MACHINE("bamboo", bamboo_machine_init)
diff --git a/src/hw/ppc/ppc4xx_devs.c b/src/hw/ppc/ppc4xx_devs.c
new file mode 100644
index 0000000..2f38ff7
--- /dev/null
+++ b/src/hw/ppc/ppc4xx_devs.c
@@ -0,0 +1,734 @@
+/*
+ * QEMU PowerPC 4xx embedded processors shared devices emulation
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/ppc.h"
+#include "hw/ppc/ppc4xx.h"
+#include "hw/boards.h"
+#include "qemu/log.h"
+#include "exec/address-spaces.h"
+
+#define DEBUG_UIC
+
+
+#ifdef DEBUG_UIC
+# define LOG_UIC(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
+#else
+# define LOG_UIC(...) do { } while (0)
+#endif
+
+static void ppc4xx_reset(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ cpu_reset(CPU(cpu));
+}
+
+/*****************************************************************************/
+/* Generic PowerPC 4xx processor instantiation */
+PowerPCCPU *ppc4xx_init(const char *cpu_model,
+ clk_setup_t *cpu_clk, clk_setup_t *tb_clk,
+ uint32_t sysclk)
+{
+ PowerPCCPU *cpu;
+ CPUPPCState *env;
+
+ /* init CPUs */
+ cpu = cpu_ppc_init(cpu_model);
+ if (cpu == NULL) {
+ fprintf(stderr, "Unable to find PowerPC %s CPU definition\n",
+ cpu_model);
+ exit(1);
+ }
+ env = &cpu->env;
+
+ cpu_clk->cb = NULL; /* We don't care about CPU clock frequency changes */
+ cpu_clk->opaque = env;
+ /* Set time-base frequency to sysclk */
+ tb_clk->cb = ppc_40x_timers_init(env, sysclk, PPC_INTERRUPT_PIT);
+ tb_clk->opaque = env;
+ ppc_dcr_init(env, NULL, NULL);
+ /* Register qemu callbacks */
+ qemu_register_reset(ppc4xx_reset, cpu);
+
+ return cpu;
+}
+
+/*****************************************************************************/
+/* "Universal" Interrupt controller */
+enum {
+ DCR_UICSR = 0x000,
+ DCR_UICSRS = 0x001,
+ DCR_UICER = 0x002,
+ DCR_UICCR = 0x003,
+ DCR_UICPR = 0x004,
+ DCR_UICTR = 0x005,
+ DCR_UICMSR = 0x006,
+ DCR_UICVR = 0x007,
+ DCR_UICVCR = 0x008,
+ DCR_UICMAX = 0x009,
+};
+
+#define UIC_MAX_IRQ 32
+typedef struct ppcuic_t ppcuic_t;
+struct ppcuic_t {
+ uint32_t dcr_base;
+ int use_vectors;
+ uint32_t level; /* Remembers the state of level-triggered interrupts. */
+ uint32_t uicsr; /* Status register */
+ uint32_t uicer; /* Enable register */
+ uint32_t uiccr; /* Critical register */
+ uint32_t uicpr; /* Polarity register */
+ uint32_t uictr; /* Triggering register */
+ uint32_t uicvcr; /* Vector configuration register */
+ uint32_t uicvr;
+ qemu_irq *irqs;
+};
+
+static void ppcuic_trigger_irq (ppcuic_t *uic)
+{
+ uint32_t ir, cr;
+ int start, end, inc, i;
+
+ /* Trigger interrupt if any is pending */
+ ir = uic->uicsr & uic->uicer & (~uic->uiccr);
+ cr = uic->uicsr & uic->uicer & uic->uiccr;
+ LOG_UIC("%s: uicsr %08" PRIx32 " uicer %08" PRIx32
+ " uiccr %08" PRIx32 "\n"
+ " %08" PRIx32 " ir %08" PRIx32 " cr %08" PRIx32 "\n",
+ __func__, uic->uicsr, uic->uicer, uic->uiccr,
+ uic->uicsr & uic->uicer, ir, cr);
+ if (ir != 0x0000000) {
+ LOG_UIC("Raise UIC interrupt\n");
+ qemu_irq_raise(uic->irqs[PPCUIC_OUTPUT_INT]);
+ } else {
+ LOG_UIC("Lower UIC interrupt\n");
+ qemu_irq_lower(uic->irqs[PPCUIC_OUTPUT_INT]);
+ }
+ /* Trigger critical interrupt if any is pending and update vector */
+ if (cr != 0x0000000) {
+ qemu_irq_raise(uic->irqs[PPCUIC_OUTPUT_CINT]);
+ if (uic->use_vectors) {
+ /* Compute critical IRQ vector */
+ if (uic->uicvcr & 1) {
+ start = 31;
+ end = 0;
+ inc = -1;
+ } else {
+ start = 0;
+ end = 31;
+ inc = 1;
+ }
+ uic->uicvr = uic->uicvcr & 0xFFFFFFFC;
+ for (i = start; i <= end; i += inc) {
+ if (cr & (1 << i)) {
+ uic->uicvr += (i - start) * 512 * inc;
+ break;
+ }
+ }
+ }
+ LOG_UIC("Raise UIC critical interrupt - "
+ "vector %08" PRIx32 "\n", uic->uicvr);
+ } else {
+ LOG_UIC("Lower UIC critical interrupt\n");
+ qemu_irq_lower(uic->irqs[PPCUIC_OUTPUT_CINT]);
+ uic->uicvr = 0x00000000;
+ }
+}
+
+static void ppcuic_set_irq (void *opaque, int irq_num, int level)
+{
+ ppcuic_t *uic;
+ uint32_t mask, sr;
+
+ uic = opaque;
+ mask = 1U << (31-irq_num);
+ LOG_UIC("%s: irq %d level %d uicsr %08" PRIx32
+ " mask %08" PRIx32 " => %08" PRIx32 " %08" PRIx32 "\n",
+ __func__, irq_num, level,
+ uic->uicsr, mask, uic->uicsr & mask, level << irq_num);
+ if (irq_num < 0 || irq_num > 31)
+ return;
+ sr = uic->uicsr;
+
+ /* Update status register */
+ if (uic->uictr & mask) {
+ /* Edge sensitive interrupt */
+ if (level == 1)
+ uic->uicsr |= mask;
+ } else {
+ /* Level sensitive interrupt */
+ if (level == 1) {
+ uic->uicsr |= mask;
+ uic->level |= mask;
+ } else {
+ uic->uicsr &= ~mask;
+ uic->level &= ~mask;
+ }
+ }
+ LOG_UIC("%s: irq %d level %d sr %" PRIx32 " => "
+ "%08" PRIx32 "\n", __func__, irq_num, level, uic->uicsr, sr);
+ if (sr != uic->uicsr)
+ ppcuic_trigger_irq(uic);
+}
+
+static uint32_t dcr_read_uic (void *opaque, int dcrn)
+{
+ ppcuic_t *uic;
+ uint32_t ret;
+
+ uic = opaque;
+ dcrn -= uic->dcr_base;
+ switch (dcrn) {
+ case DCR_UICSR:
+ case DCR_UICSRS:
+ ret = uic->uicsr;
+ break;
+ case DCR_UICER:
+ ret = uic->uicer;
+ break;
+ case DCR_UICCR:
+ ret = uic->uiccr;
+ break;
+ case DCR_UICPR:
+ ret = uic->uicpr;
+ break;
+ case DCR_UICTR:
+ ret = uic->uictr;
+ break;
+ case DCR_UICMSR:
+ ret = uic->uicsr & uic->uicer;
+ break;
+ case DCR_UICVR:
+ if (!uic->use_vectors)
+ goto no_read;
+ ret = uic->uicvr;
+ break;
+ case DCR_UICVCR:
+ if (!uic->use_vectors)
+ goto no_read;
+ ret = uic->uicvcr;
+ break;
+ default:
+ no_read:
+ ret = 0x00000000;
+ break;
+ }
+
+ return ret;
+}
+
+static void dcr_write_uic (void *opaque, int dcrn, uint32_t val)
+{
+ ppcuic_t *uic;
+
+ uic = opaque;
+ dcrn -= uic->dcr_base;
+ LOG_UIC("%s: dcr %d val 0x%x\n", __func__, dcrn, val);
+ switch (dcrn) {
+ case DCR_UICSR:
+ uic->uicsr &= ~val;
+ uic->uicsr |= uic->level;
+ ppcuic_trigger_irq(uic);
+ break;
+ case DCR_UICSRS:
+ uic->uicsr |= val;
+ ppcuic_trigger_irq(uic);
+ break;
+ case DCR_UICER:
+ uic->uicer = val;
+ ppcuic_trigger_irq(uic);
+ break;
+ case DCR_UICCR:
+ uic->uiccr = val;
+ ppcuic_trigger_irq(uic);
+ break;
+ case DCR_UICPR:
+ uic->uicpr = val;
+ break;
+ case DCR_UICTR:
+ uic->uictr = val;
+ ppcuic_trigger_irq(uic);
+ break;
+ case DCR_UICMSR:
+ break;
+ case DCR_UICVR:
+ break;
+ case DCR_UICVCR:
+ uic->uicvcr = val & 0xFFFFFFFD;
+ ppcuic_trigger_irq(uic);
+ break;
+ }
+}
+
+static void ppcuic_reset (void *opaque)
+{
+ ppcuic_t *uic;
+
+ uic = opaque;
+ uic->uiccr = 0x00000000;
+ uic->uicer = 0x00000000;
+ uic->uicpr = 0x00000000;
+ uic->uicsr = 0x00000000;
+ uic->uictr = 0x00000000;
+ if (uic->use_vectors) {
+ uic->uicvcr = 0x00000000;
+ uic->uicvr = 0x0000000;
+ }
+}
+
+qemu_irq *ppcuic_init (CPUPPCState *env, qemu_irq *irqs,
+ uint32_t dcr_base, int has_ssr, int has_vr)
+{
+ ppcuic_t *uic;
+ int i;
+
+ uic = g_malloc0(sizeof(ppcuic_t));
+ uic->dcr_base = dcr_base;
+ uic->irqs = irqs;
+ if (has_vr)
+ uic->use_vectors = 1;
+ for (i = 0; i < DCR_UICMAX; i++) {
+ ppc_dcr_register(env, dcr_base + i, uic,
+ &dcr_read_uic, &dcr_write_uic);
+ }
+ qemu_register_reset(ppcuic_reset, uic);
+
+ return qemu_allocate_irqs(&ppcuic_set_irq, uic, UIC_MAX_IRQ);
+}
+
+/*****************************************************************************/
+/* SDRAM controller */
+typedef struct ppc4xx_sdram_t ppc4xx_sdram_t;
+struct ppc4xx_sdram_t {
+ uint32_t addr;
+ int nbanks;
+ MemoryRegion containers[4]; /* used for clipping */
+ MemoryRegion *ram_memories;
+ hwaddr ram_bases[4];
+ hwaddr ram_sizes[4];
+ uint32_t besr0;
+ uint32_t besr1;
+ uint32_t bear;
+ uint32_t cfg;
+ uint32_t status;
+ uint32_t rtr;
+ uint32_t pmit;
+ uint32_t bcr[4];
+ uint32_t tr;
+ uint32_t ecccfg;
+ uint32_t eccesr;
+ qemu_irq irq;
+};
+
+enum {
+ SDRAM0_CFGADDR = 0x010,
+ SDRAM0_CFGDATA = 0x011,
+};
+
+/* XXX: TOFIX: some patches have made this code become inconsistent:
+ * there are type inconsistencies, mixing hwaddr, target_ulong
+ * and uint32_t
+ */
+static uint32_t sdram_bcr (hwaddr ram_base,
+ hwaddr ram_size)
+{
+ uint32_t bcr;
+
+ switch (ram_size) {
+ case (4 * 1024 * 1024):
+ bcr = 0x00000000;
+ break;
+ case (8 * 1024 * 1024):
+ bcr = 0x00020000;
+ break;
+ case (16 * 1024 * 1024):
+ bcr = 0x00040000;
+ break;
+ case (32 * 1024 * 1024):
+ bcr = 0x00060000;
+ break;
+ case (64 * 1024 * 1024):
+ bcr = 0x00080000;
+ break;
+ case (128 * 1024 * 1024):
+ bcr = 0x000A0000;
+ break;
+ case (256 * 1024 * 1024):
+ bcr = 0x000C0000;
+ break;
+ default:
+ printf("%s: invalid RAM size " TARGET_FMT_plx "\n", __func__,
+ ram_size);
+ return 0x00000000;
+ }
+ bcr |= ram_base & 0xFF800000;
+ bcr |= 1;
+
+ return bcr;
+}
+
+static inline hwaddr sdram_base(uint32_t bcr)
+{
+ return bcr & 0xFF800000;
+}
+
+static target_ulong sdram_size (uint32_t bcr)
+{
+ target_ulong size;
+ int sh;
+
+ sh = (bcr >> 17) & 0x7;
+ if (sh == 7)
+ size = -1;
+ else
+ size = (4 * 1024 * 1024) << sh;
+
+ return size;
+}
+
+static void sdram_set_bcr(ppc4xx_sdram_t *sdram,
+ uint32_t *bcrp, uint32_t bcr, int enabled)
+{
+ unsigned n = bcrp - sdram->bcr;
+
+ if (*bcrp & 0x00000001) {
+ /* Unmap RAM */
+#ifdef DEBUG_SDRAM
+ printf("%s: unmap RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
+ __func__, sdram_base(*bcrp), sdram_size(*bcrp));
+#endif
+ memory_region_del_subregion(get_system_memory(),
+ &sdram->containers[n]);
+ memory_region_del_subregion(&sdram->containers[n],
+ &sdram->ram_memories[n]);
+ object_unparent(OBJECT(&sdram->containers[n]));
+ }
+ *bcrp = bcr & 0xFFDEE001;
+ if (enabled && (bcr & 0x00000001)) {
+#ifdef DEBUG_SDRAM
+ printf("%s: Map RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
+ __func__, sdram_base(bcr), sdram_size(bcr));
+#endif
+ memory_region_init(&sdram->containers[n], NULL, "sdram-containers",
+ sdram_size(bcr));
+ memory_region_add_subregion(&sdram->containers[n], 0,
+ &sdram->ram_memories[n]);
+ memory_region_add_subregion(get_system_memory(),
+ sdram_base(bcr),
+ &sdram->containers[n]);
+ }
+}
+
+static void sdram_map_bcr (ppc4xx_sdram_t *sdram)
+{
+ int i;
+
+ for (i = 0; i < sdram->nbanks; i++) {
+ if (sdram->ram_sizes[i] != 0) {
+ sdram_set_bcr(sdram,
+ &sdram->bcr[i],
+ sdram_bcr(sdram->ram_bases[i], sdram->ram_sizes[i]),
+ 1);
+ } else {
+ sdram_set_bcr(sdram, &sdram->bcr[i], 0x00000000, 0);
+ }
+ }
+}
+
+static void sdram_unmap_bcr (ppc4xx_sdram_t *sdram)
+{
+ int i;
+
+ for (i = 0; i < sdram->nbanks; i++) {
+#ifdef DEBUG_SDRAM
+ printf("%s: Unmap RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
+ __func__, sdram_base(sdram->bcr[i]), sdram_size(sdram->bcr[i]));
+#endif
+ memory_region_del_subregion(get_system_memory(),
+ &sdram->ram_memories[i]);
+ }
+}
+
+static uint32_t dcr_read_sdram (void *opaque, int dcrn)
+{
+ ppc4xx_sdram_t *sdram;
+ uint32_t ret;
+
+ sdram = opaque;
+ switch (dcrn) {
+ case SDRAM0_CFGADDR:
+ ret = sdram->addr;
+ break;
+ case SDRAM0_CFGDATA:
+ switch (sdram->addr) {
+ case 0x00: /* SDRAM_BESR0 */
+ ret = sdram->besr0;
+ break;
+ case 0x08: /* SDRAM_BESR1 */
+ ret = sdram->besr1;
+ break;
+ case 0x10: /* SDRAM_BEAR */
+ ret = sdram->bear;
+ break;
+ case 0x20: /* SDRAM_CFG */
+ ret = sdram->cfg;
+ break;
+ case 0x24: /* SDRAM_STATUS */
+ ret = sdram->status;
+ break;
+ case 0x30: /* SDRAM_RTR */
+ ret = sdram->rtr;
+ break;
+ case 0x34: /* SDRAM_PMIT */
+ ret = sdram->pmit;
+ break;
+ case 0x40: /* SDRAM_B0CR */
+ ret = sdram->bcr[0];
+ break;
+ case 0x44: /* SDRAM_B1CR */
+ ret = sdram->bcr[1];
+ break;
+ case 0x48: /* SDRAM_B2CR */
+ ret = sdram->bcr[2];
+ break;
+ case 0x4C: /* SDRAM_B3CR */
+ ret = sdram->bcr[3];
+ break;
+ case 0x80: /* SDRAM_TR */
+ ret = -1; /* ? */
+ break;
+ case 0x94: /* SDRAM_ECCCFG */
+ ret = sdram->ecccfg;
+ break;
+ case 0x98: /* SDRAM_ECCESR */
+ ret = sdram->eccesr;
+ break;
+ default: /* Error */
+ ret = -1;
+ break;
+ }
+ break;
+ default:
+ /* Avoid gcc warning */
+ ret = 0x00000000;
+ break;
+ }
+
+ return ret;
+}
+
+static void dcr_write_sdram (void *opaque, int dcrn, uint32_t val)
+{
+ ppc4xx_sdram_t *sdram;
+
+ sdram = opaque;
+ switch (dcrn) {
+ case SDRAM0_CFGADDR:
+ sdram->addr = val;
+ break;
+ case SDRAM0_CFGDATA:
+ switch (sdram->addr) {
+ case 0x00: /* SDRAM_BESR0 */
+ sdram->besr0 &= ~val;
+ break;
+ case 0x08: /* SDRAM_BESR1 */
+ sdram->besr1 &= ~val;
+ break;
+ case 0x10: /* SDRAM_BEAR */
+ sdram->bear = val;
+ break;
+ case 0x20: /* SDRAM_CFG */
+ val &= 0xFFE00000;
+ if (!(sdram->cfg & 0x80000000) && (val & 0x80000000)) {
+#ifdef DEBUG_SDRAM
+ printf("%s: enable SDRAM controller\n", __func__);
+#endif
+ /* validate all RAM mappings */
+ sdram_map_bcr(sdram);
+ sdram->status &= ~0x80000000;
+ } else if ((sdram->cfg & 0x80000000) && !(val & 0x80000000)) {
+#ifdef DEBUG_SDRAM
+ printf("%s: disable SDRAM controller\n", __func__);
+#endif
+ /* invalidate all RAM mappings */
+ sdram_unmap_bcr(sdram);
+ sdram->status |= 0x80000000;
+ }
+ if (!(sdram->cfg & 0x40000000) && (val & 0x40000000))
+ sdram->status |= 0x40000000;
+ else if ((sdram->cfg & 0x40000000) && !(val & 0x40000000))
+ sdram->status &= ~0x40000000;
+ sdram->cfg = val;
+ break;
+ case 0x24: /* SDRAM_STATUS */
+ /* Read-only register */
+ break;
+ case 0x30: /* SDRAM_RTR */
+ sdram->rtr = val & 0x3FF80000;
+ break;
+ case 0x34: /* SDRAM_PMIT */
+ sdram->pmit = (val & 0xF8000000) | 0x07C00000;
+ break;
+ case 0x40: /* SDRAM_B0CR */
+ sdram_set_bcr(sdram, &sdram->bcr[0], val, sdram->cfg & 0x80000000);
+ break;
+ case 0x44: /* SDRAM_B1CR */
+ sdram_set_bcr(sdram, &sdram->bcr[1], val, sdram->cfg & 0x80000000);
+ break;
+ case 0x48: /* SDRAM_B2CR */
+ sdram_set_bcr(sdram, &sdram->bcr[2], val, sdram->cfg & 0x80000000);
+ break;
+ case 0x4C: /* SDRAM_B3CR */
+ sdram_set_bcr(sdram, &sdram->bcr[3], val, sdram->cfg & 0x80000000);
+ break;
+ case 0x80: /* SDRAM_TR */
+ sdram->tr = val & 0x018FC01F;
+ break;
+ case 0x94: /* SDRAM_ECCCFG */
+ sdram->ecccfg = val & 0x00F00000;
+ break;
+ case 0x98: /* SDRAM_ECCESR */
+ val &= 0xFFF0F000;
+ if (sdram->eccesr == 0 && val != 0)
+ qemu_irq_raise(sdram->irq);
+ else if (sdram->eccesr != 0 && val == 0)
+ qemu_irq_lower(sdram->irq);
+ sdram->eccesr = val;
+ break;
+ default: /* Error */
+ break;
+ }
+ break;
+ }
+}
+
+static void sdram_reset (void *opaque)
+{
+ ppc4xx_sdram_t *sdram;
+
+ sdram = opaque;
+ sdram->addr = 0x00000000;
+ sdram->bear = 0x00000000;
+ sdram->besr0 = 0x00000000; /* No error */
+ sdram->besr1 = 0x00000000; /* No error */
+ sdram->cfg = 0x00000000;
+ sdram->ecccfg = 0x00000000; /* No ECC */
+ sdram->eccesr = 0x00000000; /* No error */
+ sdram->pmit = 0x07C00000;
+ sdram->rtr = 0x05F00000;
+ sdram->tr = 0x00854009;
+ /* We pre-initialize RAM banks */
+ sdram->status = 0x00000000;
+ sdram->cfg = 0x00800000;
+}
+
+void ppc4xx_sdram_init (CPUPPCState *env, qemu_irq irq, int nbanks,
+ MemoryRegion *ram_memories,
+ hwaddr *ram_bases,
+ hwaddr *ram_sizes,
+ int do_init)
+{
+ ppc4xx_sdram_t *sdram;
+
+ sdram = g_malloc0(sizeof(ppc4xx_sdram_t));
+ sdram->irq = irq;
+ sdram->nbanks = nbanks;
+ sdram->ram_memories = ram_memories;
+ memset(sdram->ram_bases, 0, 4 * sizeof(hwaddr));
+ memcpy(sdram->ram_bases, ram_bases,
+ nbanks * sizeof(hwaddr));
+ memset(sdram->ram_sizes, 0, 4 * sizeof(hwaddr));
+ memcpy(sdram->ram_sizes, ram_sizes,
+ nbanks * sizeof(hwaddr));
+ qemu_register_reset(&sdram_reset, sdram);
+ ppc_dcr_register(env, SDRAM0_CFGADDR,
+ sdram, &dcr_read_sdram, &dcr_write_sdram);
+ ppc_dcr_register(env, SDRAM0_CFGDATA,
+ sdram, &dcr_read_sdram, &dcr_write_sdram);
+ if (do_init)
+ sdram_map_bcr(sdram);
+}
+
+/* Fill in consecutive SDRAM banks with 'ram_size' bytes of memory.
+ *
+ * sdram_bank_sizes[] must be 0-terminated.
+ *
+ * The 4xx SDRAM controller supports a small number of banks, and each bank
+ * must be one of a small set of sizes. The number of banks and the supported
+ * sizes varies by SoC. */
+ram_addr_t ppc4xx_sdram_adjust(ram_addr_t ram_size, int nr_banks,
+ MemoryRegion ram_memories[],
+ hwaddr ram_bases[],
+ hwaddr ram_sizes[],
+ const unsigned int sdram_bank_sizes[])
+{
+ MemoryRegion *ram = g_malloc0(sizeof(*ram));
+ ram_addr_t size_left = ram_size;
+ ram_addr_t base = 0;
+ unsigned int bank_size;
+ int i;
+ int j;
+
+ for (i = 0; i < nr_banks; i++) {
+ for (j = 0; sdram_bank_sizes[j] != 0; j++) {
+ bank_size = sdram_bank_sizes[j];
+ if (bank_size <= size_left) {
+ size_left -= bank_size;
+ }
+ }
+ if (!size_left) {
+ /* No need to use the remaining banks. */
+ break;
+ }
+ }
+
+ ram_size -= size_left;
+ if (size_left) {
+ printf("Truncating memory to %d MiB to fit SDRAM controller limits.\n",
+ (int)(ram_size >> 20));
+ }
+
+ memory_region_allocate_system_memory(ram, NULL, "ppc4xx.sdram", ram_size);
+
+ size_left = ram_size;
+ for (i = 0; i < nr_banks && size_left; i++) {
+ for (j = 0; sdram_bank_sizes[j] != 0; j++) {
+ bank_size = sdram_bank_sizes[j];
+
+ if (bank_size <= size_left) {
+ char name[32];
+ snprintf(name, sizeof(name), "ppc4xx.sdram%d", i);
+ memory_region_init_alias(&ram_memories[i], NULL, name, ram,
+ base, bank_size);
+ ram_bases[i] = base;
+ ram_sizes[i] = bank_size;
+ base += bank_size;
+ size_left -= bank_size;
+ break;
+ }
+ }
+ }
+
+ return ram_size;
+}
diff --git a/src/hw/ppc/ppc4xx_pci.c b/src/hw/ppc/ppc4xx_pci.c
new file mode 100644
index 0000000..0bb3cdb
--- /dev/null
+++ b/src/hw/ppc/ppc4xx_pci.c
@@ -0,0 +1,392 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+/* This file implements emulation of the 32-bit PCI controller found in some
+ * 4xx SoCs, such as the 440EP. */
+
+#include "hw/hw.h"
+#include "hw/ppc/ppc.h"
+#include "hw/ppc/ppc4xx.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_host.h"
+#include "exec/address-spaces.h"
+
+#undef DEBUG
+#ifdef DEBUG
+#define DPRINTF(fmt, ...) do { printf(fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...)
+#endif /* DEBUG */
+
+struct PCIMasterMap {
+ uint32_t la;
+ uint32_t ma;
+ uint32_t pcila;
+ uint32_t pciha;
+};
+
+struct PCITargetMap {
+ uint32_t ms;
+ uint32_t la;
+};
+
+#define PPC4xx_PCI_HOST_BRIDGE(obj) \
+ OBJECT_CHECK(PPC4xxPCIState, (obj), TYPE_PPC4xx_PCI_HOST_BRIDGE)
+
+#define PPC4xx_PCI_NR_PMMS 3
+#define PPC4xx_PCI_NR_PTMS 2
+
+struct PPC4xxPCIState {
+ PCIHostState parent_obj;
+
+ struct PCIMasterMap pmm[PPC4xx_PCI_NR_PMMS];
+ struct PCITargetMap ptm[PPC4xx_PCI_NR_PTMS];
+ qemu_irq irq[4];
+
+ MemoryRegion container;
+ MemoryRegion iomem;
+};
+typedef struct PPC4xxPCIState PPC4xxPCIState;
+
+#define PCIC0_CFGADDR 0x0
+#define PCIC0_CFGDATA 0x4
+
+/* PLB Memory Map (PMM) registers specify which PLB addresses are translated to
+ * PCI accesses. */
+#define PCIL0_PMM0LA 0x0
+#define PCIL0_PMM0MA 0x4
+#define PCIL0_PMM0PCILA 0x8
+#define PCIL0_PMM0PCIHA 0xc
+#define PCIL0_PMM1LA 0x10
+#define PCIL0_PMM1MA 0x14
+#define PCIL0_PMM1PCILA 0x18
+#define PCIL0_PMM1PCIHA 0x1c
+#define PCIL0_PMM2LA 0x20
+#define PCIL0_PMM2MA 0x24
+#define PCIL0_PMM2PCILA 0x28
+#define PCIL0_PMM2PCIHA 0x2c
+
+/* PCI Target Map (PTM) registers specify which PCI addresses are translated to
+ * PLB accesses. */
+#define PCIL0_PTM1MS 0x30
+#define PCIL0_PTM1LA 0x34
+#define PCIL0_PTM2MS 0x38
+#define PCIL0_PTM2LA 0x3c
+#define PCI_REG_BASE 0x800000
+#define PCI_REG_SIZE 0x40
+
+#define PCI_ALL_SIZE (PCI_REG_BASE + PCI_REG_SIZE)
+
+static void ppc4xx_pci_reg_write4(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ struct PPC4xxPCIState *pci = opaque;
+
+ /* We ignore all target attempts at PCI configuration, effectively
+ * assuming a bidirectional 1:1 mapping of PLB and PCI space. */
+
+ switch (offset) {
+ case PCIL0_PMM0LA:
+ pci->pmm[0].la = value;
+ break;
+ case PCIL0_PMM0MA:
+ pci->pmm[0].ma = value;
+ break;
+ case PCIL0_PMM0PCIHA:
+ pci->pmm[0].pciha = value;
+ break;
+ case PCIL0_PMM0PCILA:
+ pci->pmm[0].pcila = value;
+ break;
+
+ case PCIL0_PMM1LA:
+ pci->pmm[1].la = value;
+ break;
+ case PCIL0_PMM1MA:
+ pci->pmm[1].ma = value;
+ break;
+ case PCIL0_PMM1PCIHA:
+ pci->pmm[1].pciha = value;
+ break;
+ case PCIL0_PMM1PCILA:
+ pci->pmm[1].pcila = value;
+ break;
+
+ case PCIL0_PMM2LA:
+ pci->pmm[2].la = value;
+ break;
+ case PCIL0_PMM2MA:
+ pci->pmm[2].ma = value;
+ break;
+ case PCIL0_PMM2PCIHA:
+ pci->pmm[2].pciha = value;
+ break;
+ case PCIL0_PMM2PCILA:
+ pci->pmm[2].pcila = value;
+ break;
+
+ case PCIL0_PTM1MS:
+ pci->ptm[0].ms = value;
+ break;
+ case PCIL0_PTM1LA:
+ pci->ptm[0].la = value;
+ break;
+ case PCIL0_PTM2MS:
+ pci->ptm[1].ms = value;
+ break;
+ case PCIL0_PTM2LA:
+ pci->ptm[1].la = value;
+ break;
+
+ default:
+ printf("%s: unhandled PCI internal register 0x%lx\n", __func__,
+ (unsigned long)offset);
+ break;
+ }
+}
+
+static uint64_t ppc4xx_pci_reg_read4(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ struct PPC4xxPCIState *pci = opaque;
+ uint32_t value;
+
+ switch (offset) {
+ case PCIL0_PMM0LA:
+ value = pci->pmm[0].la;
+ break;
+ case PCIL0_PMM0MA:
+ value = pci->pmm[0].ma;
+ break;
+ case PCIL0_PMM0PCIHA:
+ value = pci->pmm[0].pciha;
+ break;
+ case PCIL0_PMM0PCILA:
+ value = pci->pmm[0].pcila;
+ break;
+
+ case PCIL0_PMM1LA:
+ value = pci->pmm[1].la;
+ break;
+ case PCIL0_PMM1MA:
+ value = pci->pmm[1].ma;
+ break;
+ case PCIL0_PMM1PCIHA:
+ value = pci->pmm[1].pciha;
+ break;
+ case PCIL0_PMM1PCILA:
+ value = pci->pmm[1].pcila;
+ break;
+
+ case PCIL0_PMM2LA:
+ value = pci->pmm[2].la;
+ break;
+ case PCIL0_PMM2MA:
+ value = pci->pmm[2].ma;
+ break;
+ case PCIL0_PMM2PCIHA:
+ value = pci->pmm[2].pciha;
+ break;
+ case PCIL0_PMM2PCILA:
+ value = pci->pmm[2].pcila;
+ break;
+
+ case PCIL0_PTM1MS:
+ value = pci->ptm[0].ms;
+ break;
+ case PCIL0_PTM1LA:
+ value = pci->ptm[0].la;
+ break;
+ case PCIL0_PTM2MS:
+ value = pci->ptm[1].ms;
+ break;
+ case PCIL0_PTM2LA:
+ value = pci->ptm[1].la;
+ break;
+
+ default:
+ printf("%s: invalid PCI internal register 0x%lx\n", __func__,
+ (unsigned long)offset);
+ value = 0;
+ }
+
+ return value;
+}
+
+static const MemoryRegionOps pci_reg_ops = {
+ .read = ppc4xx_pci_reg_read4,
+ .write = ppc4xx_pci_reg_write4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void ppc4xx_pci_reset(void *opaque)
+{
+ struct PPC4xxPCIState *pci = opaque;
+
+ memset(pci->pmm, 0, sizeof(pci->pmm));
+ memset(pci->ptm, 0, sizeof(pci->ptm));
+}
+
+/* On Bamboo, all pins from each slot are tied to a single board IRQ. This
+ * may need further refactoring for other boards. */
+static int ppc4xx_pci_map_irq(PCIDevice *pci_dev, int irq_num)
+{
+ int slot = pci_dev->devfn >> 3;
+
+ DPRINTF("%s: devfn %x irq %d -> %d\n", __func__,
+ pci_dev->devfn, irq_num, slot);
+
+ return slot - 1;
+}
+
+static void ppc4xx_pci_set_irq(void *opaque, int irq_num, int level)
+{
+ qemu_irq *pci_irqs = opaque;
+
+ DPRINTF("%s: PCI irq %d\n", __func__, irq_num);
+ if (irq_num < 0) {
+ fprintf(stderr, "%s: PCI irq %d\n", __func__, irq_num);
+ return;
+ }
+ qemu_set_irq(pci_irqs[irq_num], level);
+}
+
+static const VMStateDescription vmstate_pci_master_map = {
+ .name = "pci_master_map",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(la, struct PCIMasterMap),
+ VMSTATE_UINT32(ma, struct PCIMasterMap),
+ VMSTATE_UINT32(pcila, struct PCIMasterMap),
+ VMSTATE_UINT32(pciha, struct PCIMasterMap),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_pci_target_map = {
+ .name = "pci_target_map",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(ms, struct PCITargetMap),
+ VMSTATE_UINT32(la, struct PCITargetMap),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_ppc4xx_pci = {
+ .name = "ppc4xx_pci",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(pmm, PPC4xxPCIState, PPC4xx_PCI_NR_PMMS, 1,
+ vmstate_pci_master_map,
+ struct PCIMasterMap),
+ VMSTATE_STRUCT_ARRAY(ptm, PPC4xxPCIState, PPC4xx_PCI_NR_PTMS, 1,
+ vmstate_pci_target_map,
+ struct PCITargetMap),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* XXX Interrupt acknowledge cycles not supported. */
+static int ppc4xx_pcihost_initfn(SysBusDevice *dev)
+{
+ PPC4xxPCIState *s;
+ PCIHostState *h;
+ PCIBus *b;
+ int i;
+
+ h = PCI_HOST_BRIDGE(dev);
+ s = PPC4xx_PCI_HOST_BRIDGE(dev);
+
+ for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
+ sysbus_init_irq(dev, &s->irq[i]);
+ }
+
+ b = pci_register_bus(DEVICE(dev), NULL, ppc4xx_pci_set_irq,
+ ppc4xx_pci_map_irq, s->irq, get_system_memory(),
+ get_system_io(), 0, 4, TYPE_PCI_BUS);
+ h->bus = b;
+
+ pci_create_simple(b, 0, "ppc4xx-host-bridge");
+
+ /* XXX split into 2 memory regions, one for config space, one for regs */
+ memory_region_init(&s->container, OBJECT(s), "pci-container", PCI_ALL_SIZE);
+ memory_region_init_io(&h->conf_mem, OBJECT(s), &pci_host_conf_le_ops, h,
+ "pci-conf-idx", 4);
+ memory_region_init_io(&h->data_mem, OBJECT(s), &pci_host_data_le_ops, h,
+ "pci-conf-data", 4);
+ memory_region_init_io(&s->iomem, OBJECT(s), &pci_reg_ops, s,
+ "pci.reg", PCI_REG_SIZE);
+ memory_region_add_subregion(&s->container, PCIC0_CFGADDR, &h->conf_mem);
+ memory_region_add_subregion(&s->container, PCIC0_CFGDATA, &h->data_mem);
+ memory_region_add_subregion(&s->container, PCI_REG_BASE, &s->iomem);
+ sysbus_init_mmio(dev, &s->container);
+ qemu_register_reset(ppc4xx_pci_reset, s);
+
+ return 0;
+}
+
+static void ppc4xx_host_bridge_class_init(ObjectClass *klass, void *data)
+{
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "Host bridge";
+ k->vendor_id = PCI_VENDOR_ID_IBM;
+ k->device_id = PCI_DEVICE_ID_IBM_440GX;
+ k->class_id = PCI_CLASS_BRIDGE_OTHER;
+ /*
+ * PCI-facing part of the host bridge, not usable without the
+ * host-facing part, which can't be device_add'ed, yet.
+ */
+ dc->cannot_instantiate_with_device_add_yet = true;
+}
+
+static const TypeInfo ppc4xx_host_bridge_info = {
+ .name = "ppc4xx-host-bridge",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PCIDevice),
+ .class_init = ppc4xx_host_bridge_class_init,
+};
+
+static void ppc4xx_pcihost_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ k->init = ppc4xx_pcihost_initfn;
+ dc->vmsd = &vmstate_ppc4xx_pci;
+}
+
+static const TypeInfo ppc4xx_pcihost_info = {
+ .name = TYPE_PPC4xx_PCI_HOST_BRIDGE,
+ .parent = TYPE_PCI_HOST_BRIDGE,
+ .instance_size = sizeof(PPC4xxPCIState),
+ .class_init = ppc4xx_pcihost_class_init,
+};
+
+static void ppc4xx_pci_register_types(void)
+{
+ type_register_static(&ppc4xx_pcihost_info);
+ type_register_static(&ppc4xx_host_bridge_info);
+}
+
+type_init(ppc4xx_pci_register_types)
diff --git a/src/hw/ppc/ppc_booke.c b/src/hw/ppc/ppc_booke.c
new file mode 100644
index 0000000..8b94da6
--- /dev/null
+++ b/src/hw/ppc/ppc_booke.c
@@ -0,0 +1,365 @@
+/*
+ * QEMU PowerPC Booke hardware System Emulator
+ *
+ * Copyright (c) 2011 AdaCore
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/ppc/ppc.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "hw/timer/m48t59.h"
+#include "qemu/log.h"
+#include "hw/loader.h"
+#include "kvm_ppc.h"
+
+
+/* Timer Control Register */
+
+#define TCR_WP_SHIFT 30 /* Watchdog Timer Period */
+#define TCR_WP_MASK (0x3U << TCR_WP_SHIFT)
+#define TCR_WRC_SHIFT 28 /* Watchdog Timer Reset Control */
+#define TCR_WRC_MASK (0x3U << TCR_WRC_SHIFT)
+#define TCR_WIE (1U << 27) /* Watchdog Timer Interrupt Enable */
+#define TCR_DIE (1U << 26) /* Decrementer Interrupt Enable */
+#define TCR_FP_SHIFT 24 /* Fixed-Interval Timer Period */
+#define TCR_FP_MASK (0x3U << TCR_FP_SHIFT)
+#define TCR_FIE (1U << 23) /* Fixed-Interval Timer Interrupt Enable */
+#define TCR_ARE (1U << 22) /* Auto-Reload Enable */
+
+/* Timer Control Register (e500 specific fields) */
+
+#define TCR_E500_FPEXT_SHIFT 13 /* Fixed-Interval Timer Period Extension */
+#define TCR_E500_FPEXT_MASK (0xf << TCR_E500_FPEXT_SHIFT)
+#define TCR_E500_WPEXT_SHIFT 17 /* Watchdog Timer Period Extension */
+#define TCR_E500_WPEXT_MASK (0xf << TCR_E500_WPEXT_SHIFT)
+
+/* Timer Status Register */
+
+#define TSR_FIS (1U << 26) /* Fixed-Interval Timer Interrupt Status */
+#define TSR_DIS (1U << 27) /* Decrementer Interrupt Status */
+#define TSR_WRS_SHIFT 28 /* Watchdog Timer Reset Status */
+#define TSR_WRS_MASK (0x3U << TSR_WRS_SHIFT)
+#define TSR_WIS (1U << 30) /* Watchdog Timer Interrupt Status */
+#define TSR_ENW (1U << 31) /* Enable Next Watchdog Timer */
+
+typedef struct booke_timer_t booke_timer_t;
+struct booke_timer_t {
+
+ uint64_t fit_next;
+ QEMUTimer *fit_timer;
+
+ uint64_t wdt_next;
+ QEMUTimer *wdt_timer;
+
+ uint32_t flags;
+};
+
+static void booke_update_irq(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+
+ ppc_set_irq(cpu, PPC_INTERRUPT_DECR,
+ (env->spr[SPR_BOOKE_TSR] & TSR_DIS
+ && env->spr[SPR_BOOKE_TCR] & TCR_DIE));
+
+ ppc_set_irq(cpu, PPC_INTERRUPT_WDT,
+ (env->spr[SPR_BOOKE_TSR] & TSR_WIS
+ && env->spr[SPR_BOOKE_TCR] & TCR_WIE));
+
+ ppc_set_irq(cpu, PPC_INTERRUPT_FIT,
+ (env->spr[SPR_BOOKE_TSR] & TSR_FIS
+ && env->spr[SPR_BOOKE_TCR] & TCR_FIE));
+}
+
+/* Return the location of the bit of time base at which the FIT will raise an
+ interrupt */
+static uint8_t booke_get_fit_target(CPUPPCState *env, ppc_tb_t *tb_env)
+{
+ uint8_t fp = (env->spr[SPR_BOOKE_TCR] & TCR_FP_MASK) >> TCR_FP_SHIFT;
+
+ if (tb_env->flags & PPC_TIMER_E500) {
+ /* e500 Fixed-interval timer period extension */
+ uint32_t fpext = (env->spr[SPR_BOOKE_TCR] & TCR_E500_FPEXT_MASK)
+ >> TCR_E500_FPEXT_SHIFT;
+ fp = 63 - (fp | fpext << 2);
+ } else {
+ fp = env->fit_period[fp];
+ }
+
+ return fp;
+}
+
+/* Return the location of the bit of time base at which the WDT will raise an
+ interrupt */
+static uint8_t booke_get_wdt_target(CPUPPCState *env, ppc_tb_t *tb_env)
+{
+ uint8_t wp = (env->spr[SPR_BOOKE_TCR] & TCR_WP_MASK) >> TCR_WP_SHIFT;
+
+ if (tb_env->flags & PPC_TIMER_E500) {
+ /* e500 Watchdog timer period extension */
+ uint32_t wpext = (env->spr[SPR_BOOKE_TCR] & TCR_E500_WPEXT_MASK)
+ >> TCR_E500_WPEXT_SHIFT;
+ wp = 63 - (wp | wpext << 2);
+ } else {
+ wp = env->wdt_period[wp];
+ }
+
+ return wp;
+}
+
+static void booke_update_fixed_timer(CPUPPCState *env,
+ uint8_t target_bit,
+ uint64_t *next,
+ QEMUTimer *timer,
+ int tsr_bit)
+{
+ ppc_tb_t *tb_env = env->tb_env;
+ uint64_t delta_tick, ticks = 0;
+ uint64_t tb;
+ uint64_t period;
+ uint64_t now;
+
+ if (!(env->spr[SPR_BOOKE_TSR] & tsr_bit)) {
+ /*
+ * Don't arm the timer again when the guest has the current
+ * interrupt still pending. Wait for it to ack it.
+ */
+ return;
+ }
+
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ tb = cpu_ppc_get_tb(tb_env, now, tb_env->tb_offset);
+ period = 1ULL << target_bit;
+ delta_tick = period - (tb & (period - 1));
+
+ /* the timer triggers only when the selected bit toggles from 0 to 1 */
+ if (tb & period) {
+ ticks = period;
+ }
+
+ if (ticks + delta_tick < ticks) {
+ /* Overflow, so assume the biggest number we can express. */
+ ticks = UINT64_MAX;
+ } else {
+ ticks += delta_tick;
+ }
+
+ *next = now + muldiv64(ticks, get_ticks_per_sec(), tb_env->tb_freq);
+ if ((*next < now) || (*next > INT64_MAX)) {
+ /* Overflow, so assume the biggest number the qemu timer supports. */
+ *next = INT64_MAX;
+ }
+
+ /* XXX: If expire time is now. We can't run the callback because we don't
+ * have access to it. So we just set the timer one nanosecond later.
+ */
+
+ if (*next == now) {
+ (*next)++;
+ } else {
+ /*
+ * There's no point to fake any granularity that's more fine grained
+ * than milliseconds. Anything beyond that just overloads the system.
+ */
+ *next = MAX(*next, now + SCALE_MS);
+ }
+
+ /* Fire the next timer */
+ timer_mod(timer, *next);
+}
+
+static void booke_decr_cb(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ env->spr[SPR_BOOKE_TSR] |= TSR_DIS;
+ booke_update_irq(cpu);
+
+ if (env->spr[SPR_BOOKE_TCR] & TCR_ARE) {
+ /* Auto Reload */
+ cpu_ppc_store_decr(env, env->spr[SPR_BOOKE_DECAR]);
+ }
+}
+
+static void booke_fit_cb(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ ppc_tb_t *tb_env;
+ booke_timer_t *booke_timer;
+
+ tb_env = env->tb_env;
+ booke_timer = tb_env->opaque;
+ env->spr[SPR_BOOKE_TSR] |= TSR_FIS;
+
+ booke_update_irq(cpu);
+
+ booke_update_fixed_timer(env,
+ booke_get_fit_target(env, tb_env),
+ &booke_timer->fit_next,
+ booke_timer->fit_timer,
+ TSR_FIS);
+}
+
+static void booke_wdt_cb(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ ppc_tb_t *tb_env;
+ booke_timer_t *booke_timer;
+
+ tb_env = env->tb_env;
+ booke_timer = tb_env->opaque;
+
+ /* TODO: There's lots of complicated stuff to do here */
+
+ booke_update_irq(cpu);
+
+ booke_update_fixed_timer(env,
+ booke_get_wdt_target(env, tb_env),
+ &booke_timer->wdt_next,
+ booke_timer->wdt_timer,
+ TSR_WIS);
+}
+
+void store_booke_tsr(CPUPPCState *env, target_ulong val)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ ppc_tb_t *tb_env = env->tb_env;
+ booke_timer_t *booke_timer = tb_env->opaque;
+
+ env->spr[SPR_BOOKE_TSR] &= ~val;
+ kvmppc_clear_tsr_bits(cpu, val);
+
+ if (val & TSR_FIS) {
+ booke_update_fixed_timer(env,
+ booke_get_fit_target(env, tb_env),
+ &booke_timer->fit_next,
+ booke_timer->fit_timer,
+ TSR_FIS);
+ }
+
+ if (val & TSR_WIS) {
+ booke_update_fixed_timer(env,
+ booke_get_wdt_target(env, tb_env),
+ &booke_timer->wdt_next,
+ booke_timer->wdt_timer,
+ TSR_WIS);
+ }
+
+ booke_update_irq(cpu);
+}
+
+void store_booke_tcr(CPUPPCState *env, target_ulong val)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ ppc_tb_t *tb_env = env->tb_env;
+ booke_timer_t *booke_timer = tb_env->opaque;
+
+ tb_env = env->tb_env;
+ env->spr[SPR_BOOKE_TCR] = val;
+ kvmppc_set_tcr(cpu);
+
+ booke_update_irq(cpu);
+
+ booke_update_fixed_timer(env,
+ booke_get_fit_target(env, tb_env),
+ &booke_timer->fit_next,
+ booke_timer->fit_timer,
+ TSR_FIS);
+
+ booke_update_fixed_timer(env,
+ booke_get_wdt_target(env, tb_env),
+ &booke_timer->wdt_next,
+ booke_timer->wdt_timer,
+ TSR_WIS);
+}
+
+static void ppc_booke_timer_reset_handle(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ store_booke_tcr(env, 0);
+ store_booke_tsr(env, -1);
+}
+
+/*
+ * This function will be called whenever the CPU state changes.
+ * CPU states are defined "typedef enum RunState".
+ * Regarding timer, When CPU state changes to running after debug halt
+ * or similar cases which takes time then in between final watchdog
+ * expiry happenes. This will cause exit to QEMU and configured watchdog
+ * action will be taken. To avoid this we always clear the watchdog state when
+ * state changes to running.
+ */
+static void cpu_state_change_handler(void *opaque, int running, RunState state)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ if (!running) {
+ return;
+ }
+
+ /*
+ * Clear watchdog interrupt condition by clearing TSR.
+ */
+ store_booke_tsr(env, TSR_ENW | TSR_WIS | TSR_WRS_MASK);
+}
+
+void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags)
+{
+ ppc_tb_t *tb_env;
+ booke_timer_t *booke_timer;
+ int ret = 0;
+
+ tb_env = g_malloc0(sizeof(ppc_tb_t));
+ booke_timer = g_malloc0(sizeof(booke_timer_t));
+
+ cpu->env.tb_env = tb_env;
+ tb_env->flags = flags | PPC_TIMER_BOOKE | PPC_DECR_ZERO_TRIGGERED;
+
+ tb_env->tb_freq = freq;
+ tb_env->decr_freq = freq;
+ tb_env->opaque = booke_timer;
+ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_decr_cb, cpu);
+
+ booke_timer->fit_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_fit_cb, cpu);
+ booke_timer->wdt_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &booke_wdt_cb, cpu);
+
+ ret = kvmppc_booke_watchdog_enable(cpu);
+
+ if (ret) {
+ /* TODO: Start the QEMU emulated watchdog if not running on KVM.
+ * Also start the QEMU emulated watchdog if KVM does not support
+ * emulated watchdog or somehow it is not enabled (supported but
+ * not enabled is though some bug and requires debugging :)).
+ */
+ }
+
+ qemu_add_vm_change_state_handler(cpu_state_change_handler, cpu);
+
+ qemu_register_reset(ppc_booke_timer_reset_handle, cpu);
+}
diff --git a/src/hw/ppc/ppce500_spin.c b/src/hw/ppc/ppce500_spin.c
new file mode 100644
index 0000000..a99f7b0
--- /dev/null
+++ b/src/hw/ppc/ppce500_spin.c
@@ -0,0 +1,224 @@
+/*
+ * QEMU PowerPC e500v2 ePAPR spinning code
+ *
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Alexander Graf, <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * This code is not really a device, but models an interface that usually
+ * firmware takes care of. It's used when QEMU plays the role of firmware.
+ *
+ * Specification:
+ *
+ * https://www.power.org/resources/downloads/Power_ePAPR_APPROVED_v1.1.pdf
+ *
+ */
+
+#include "hw/hw.h"
+#include "sysemu/sysemu.h"
+#include "hw/sysbus.h"
+#include "sysemu/kvm.h"
+
+#define MAX_CPUS 32
+
+typedef struct spin_info {
+ uint64_t addr;
+ uint64_t r3;
+ uint32_t resv;
+ uint32_t pir;
+ uint64_t reserved;
+} QEMU_PACKED SpinInfo;
+
+#define TYPE_E500_SPIN "e500-spin"
+#define E500_SPIN(obj) OBJECT_CHECK(SpinState, (obj), TYPE_E500_SPIN)
+
+typedef struct SpinState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ SpinInfo spin[MAX_CPUS];
+} SpinState;
+
+typedef struct spin_kick {
+ PowerPCCPU *cpu;
+ SpinInfo *spin;
+} SpinKick;
+
+static void spin_reset(void *opaque)
+{
+ SpinState *s = opaque;
+ int i;
+
+ for (i = 0; i < MAX_CPUS; i++) {
+ SpinInfo *info = &s->spin[i];
+
+ stl_p(&info->pir, i);
+ stq_p(&info->r3, i);
+ stq_p(&info->addr, 1);
+ }
+}
+
+/* Create -kernel TLB entries for BookE, linearly spanning 256MB. */
+static inline hwaddr booke206_page_size_to_tlb(uint64_t size)
+{
+ return ctz32(size >> 10) >> 1;
+}
+
+static void mmubooke_create_initial_mapping(CPUPPCState *env,
+ target_ulong va,
+ hwaddr pa,
+ hwaddr len)
+{
+ ppcmas_tlb_t *tlb = booke206_get_tlbm(env, 1, 0, 1);
+ hwaddr size;
+
+ size = (booke206_page_size_to_tlb(len) << MAS1_TSIZE_SHIFT);
+ tlb->mas1 = MAS1_VALID | size;
+ tlb->mas2 = (va & TARGET_PAGE_MASK) | MAS2_M;
+ tlb->mas7_3 = pa & TARGET_PAGE_MASK;
+ tlb->mas7_3 |= MAS3_UR | MAS3_UW | MAS3_UX | MAS3_SR | MAS3_SW | MAS3_SX;
+ env->tlb_dirty = true;
+}
+
+static void spin_kick(void *data)
+{
+ SpinKick *kick = data;
+ CPUState *cpu = CPU(kick->cpu);
+ CPUPPCState *env = &kick->cpu->env;
+ SpinInfo *curspin = kick->spin;
+ hwaddr map_size = 64 * 1024 * 1024;
+ hwaddr map_start;
+
+ cpu_synchronize_state(cpu);
+ stl_p(&curspin->pir, env->spr[SPR_PIR]);
+ env->nip = ldq_p(&curspin->addr) & (map_size - 1);
+ env->gpr[3] = ldq_p(&curspin->r3);
+ env->gpr[4] = 0;
+ env->gpr[5] = 0;
+ env->gpr[6] = 0;
+ env->gpr[7] = map_size;
+ env->gpr[8] = 0;
+ env->gpr[9] = 0;
+
+ map_start = ldq_p(&curspin->addr) & ~(map_size - 1);
+ mmubooke_create_initial_mapping(env, 0, map_start, map_size);
+
+ cpu->halted = 0;
+ cpu->exception_index = -1;
+ cpu->stopped = false;
+ qemu_cpu_kick(cpu);
+}
+
+static void spin_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned len)
+{
+ SpinState *s = opaque;
+ int env_idx = addr / sizeof(SpinInfo);
+ CPUState *cpu;
+ SpinInfo *curspin = &s->spin[env_idx];
+ uint8_t *curspin_p = (uint8_t*)curspin;
+
+ cpu = qemu_get_cpu(env_idx);
+ if (cpu == NULL) {
+ /* Unknown CPU */
+ return;
+ }
+
+ if (cpu->cpu_index == 0) {
+ /* primary CPU doesn't spin */
+ return;
+ }
+
+ curspin_p = &curspin_p[addr % sizeof(SpinInfo)];
+ switch (len) {
+ case 1:
+ stb_p(curspin_p, value);
+ break;
+ case 2:
+ stw_p(curspin_p, value);
+ break;
+ case 4:
+ stl_p(curspin_p, value);
+ break;
+ }
+
+ if (!(ldq_p(&curspin->addr) & 1)) {
+ /* run CPU */
+ SpinKick kick = {
+ .cpu = POWERPC_CPU(cpu),
+ .spin = curspin,
+ };
+
+ run_on_cpu(cpu, spin_kick, &kick);
+ }
+}
+
+static uint64_t spin_read(void *opaque, hwaddr addr, unsigned len)
+{
+ SpinState *s = opaque;
+ uint8_t *spin_p = &((uint8_t*)s->spin)[addr];
+
+ switch (len) {
+ case 1:
+ return ldub_p(spin_p);
+ case 2:
+ return lduw_p(spin_p);
+ case 4:
+ return ldl_p(spin_p);
+ default:
+ hw_error("ppce500: unexpected %s with len = %u", __func__, len);
+ }
+}
+
+static const MemoryRegionOps spin_rw_ops = {
+ .read = spin_read,
+ .write = spin_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static int ppce500_spin_initfn(SysBusDevice *dev)
+{
+ SpinState *s = E500_SPIN(dev);
+
+ memory_region_init_io(&s->iomem, OBJECT(s), &spin_rw_ops, s,
+ "e500 spin pv device", sizeof(SpinInfo) * MAX_CPUS);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ qemu_register_reset(spin_reset, s);
+
+ return 0;
+}
+
+static void ppce500_spin_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = ppce500_spin_initfn;
+}
+
+static const TypeInfo ppce500_spin_info = {
+ .name = TYPE_E500_SPIN,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SpinState),
+ .class_init = ppce500_spin_class_init,
+};
+
+static void ppce500_spin_register_types(void)
+{
+ type_register_static(&ppce500_spin_info);
+}
+
+type_init(ppce500_spin_register_types)
diff --git a/src/hw/ppc/prep.c b/src/hw/ppc/prep.c
new file mode 100644
index 0000000..5ad28f7
--- /dev/null
+++ b/src/hw/ppc/prep.c
@@ -0,0 +1,674 @@
+/*
+ * QEMU PPC PREP hardware System Emulator
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/timer/m48t59.h"
+#include "hw/i386/pc.h"
+#include "hw/char/serial.h"
+#include "hw/block/fdc.h"
+#include "net/net.h"
+#include "sysemu/sysemu.h"
+#include "hw/isa/isa.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pci_host.h"
+#include "hw/ppc/ppc.h"
+#include "hw/boards.h"
+#include "qemu/log.h"
+#include "hw/ide.h"
+#include "hw/loader.h"
+#include "hw/timer/mc146818rtc.h"
+#include "hw/isa/pc87312.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/arch_init.h"
+#include "sysemu/qtest.h"
+#include "exec/address-spaces.h"
+#include "trace.h"
+#include "elf.h"
+
+/* SMP is not enabled, for now */
+#define MAX_CPUS 1
+
+#define MAX_IDE_BUS 2
+
+#define BIOS_SIZE (1024 * 1024)
+#define BIOS_FILENAME "ppc_rom.bin"
+#define KERNEL_LOAD_ADDR 0x01000000
+#define INITRD_LOAD_ADDR 0x01800000
+
+/* Constants for devices init */
+static const int ide_iobase[2] = { 0x1f0, 0x170 };
+static const int ide_iobase2[2] = { 0x3f6, 0x376 };
+static const int ide_irq[2] = { 13, 13 };
+
+#define NE2000_NB_MAX 6
+
+static uint32_t ne2000_io[NE2000_NB_MAX] = { 0x300, 0x320, 0x340, 0x360, 0x280, 0x380 };
+static int ne2000_irq[NE2000_NB_MAX] = { 9, 10, 11, 3, 4, 5 };
+
+/* ISA IO ports bridge */
+#define PPC_IO_BASE 0x80000000
+
+/* PowerPC control and status registers */
+#if 0 // Not used
+static struct {
+ /* IDs */
+ uint32_t veni_devi;
+ uint32_t revi;
+ /* Control and status */
+ uint32_t gcsr;
+ uint32_t xcfr;
+ uint32_t ct32;
+ uint32_t mcsr;
+ /* General purpose registers */
+ uint32_t gprg[6];
+ /* Exceptions */
+ uint32_t feen;
+ uint32_t fest;
+ uint32_t fema;
+ uint32_t fecl;
+ uint32_t eeen;
+ uint32_t eest;
+ uint32_t eecl;
+ uint32_t eeint;
+ uint32_t eemck0;
+ uint32_t eemck1;
+ /* Error diagnostic */
+} XCSR;
+
+static void PPC_XCSR_writeb (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+ printf("%s: 0x" TARGET_FMT_plx " => 0x%08" PRIx32 "\n", __func__, addr,
+ value);
+}
+
+static void PPC_XCSR_writew (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+ printf("%s: 0x" TARGET_FMT_plx " => 0x%08" PRIx32 "\n", __func__, addr,
+ value);
+}
+
+static void PPC_XCSR_writel (void *opaque,
+ hwaddr addr, uint32_t value)
+{
+ printf("%s: 0x" TARGET_FMT_plx " => 0x%08" PRIx32 "\n", __func__, addr,
+ value);
+}
+
+static uint32_t PPC_XCSR_readb (void *opaque, hwaddr addr)
+{
+ uint32_t retval = 0;
+
+ printf("%s: 0x" TARGET_FMT_plx " <= %08" PRIx32 "\n", __func__, addr,
+ retval);
+
+ return retval;
+}
+
+static uint32_t PPC_XCSR_readw (void *opaque, hwaddr addr)
+{
+ uint32_t retval = 0;
+
+ printf("%s: 0x" TARGET_FMT_plx " <= %08" PRIx32 "\n", __func__, addr,
+ retval);
+
+ return retval;
+}
+
+static uint32_t PPC_XCSR_readl (void *opaque, hwaddr addr)
+{
+ uint32_t retval = 0;
+
+ printf("%s: 0x" TARGET_FMT_plx " <= %08" PRIx32 "\n", __func__, addr,
+ retval);
+
+ return retval;
+}
+
+static const MemoryRegionOps PPC_XCSR_ops = {
+ .old_mmio = {
+ .read = { PPC_XCSR_readb, PPC_XCSR_readw, PPC_XCSR_readl, },
+ .write = { PPC_XCSR_writeb, PPC_XCSR_writew, PPC_XCSR_writel, },
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+#endif
+
+/* Fake super-io ports for PREP platform (Intel 82378ZB) */
+typedef struct sysctrl_t {
+ qemu_irq reset_irq;
+ Nvram *nvram;
+ uint8_t state;
+ uint8_t syscontrol;
+ int contiguous_map;
+ qemu_irq contiguous_map_irq;
+ int endian;
+} sysctrl_t;
+
+enum {
+ STATE_HARDFILE = 0x01,
+};
+
+static sysctrl_t *sysctrl;
+
+static void PREP_io_800_writeb (void *opaque, uint32_t addr, uint32_t val)
+{
+ sysctrl_t *sysctrl = opaque;
+
+ trace_prep_io_800_writeb(addr - PPC_IO_BASE, val);
+ switch (addr) {
+ case 0x0092:
+ /* Special port 92 */
+ /* Check soft reset asked */
+ if (val & 0x01) {
+ qemu_irq_raise(sysctrl->reset_irq);
+ } else {
+ qemu_irq_lower(sysctrl->reset_irq);
+ }
+ /* Check LE mode */
+ if (val & 0x02) {
+ sysctrl->endian = 1;
+ } else {
+ sysctrl->endian = 0;
+ }
+ break;
+ case 0x0800:
+ /* Motorola CPU configuration register : read-only */
+ break;
+ case 0x0802:
+ /* Motorola base module feature register : read-only */
+ break;
+ case 0x0803:
+ /* Motorola base module status register : read-only */
+ break;
+ case 0x0808:
+ /* Hardfile light register */
+ if (val & 1)
+ sysctrl->state |= STATE_HARDFILE;
+ else
+ sysctrl->state &= ~STATE_HARDFILE;
+ break;
+ case 0x0810:
+ /* Password protect 1 register */
+ if (sysctrl->nvram != NULL) {
+ NvramClass *k = NVRAM_GET_CLASS(sysctrl->nvram);
+ (k->toggle_lock)(sysctrl->nvram, 1);
+ }
+ break;
+ case 0x0812:
+ /* Password protect 2 register */
+ if (sysctrl->nvram != NULL) {
+ NvramClass *k = NVRAM_GET_CLASS(sysctrl->nvram);
+ (k->toggle_lock)(sysctrl->nvram, 2);
+ }
+ break;
+ case 0x0814:
+ /* L2 invalidate register */
+ // tlb_flush(first_cpu, 1);
+ break;
+ case 0x081C:
+ /* system control register */
+ sysctrl->syscontrol = val & 0x0F;
+ break;
+ case 0x0850:
+ /* I/O map type register */
+ sysctrl->contiguous_map = val & 0x01;
+ qemu_set_irq(sysctrl->contiguous_map_irq, sysctrl->contiguous_map);
+ break;
+ default:
+ printf("ERROR: unaffected IO port write: %04" PRIx32
+ " => %02" PRIx32"\n", addr, val);
+ break;
+ }
+}
+
+static uint32_t PREP_io_800_readb (void *opaque, uint32_t addr)
+{
+ sysctrl_t *sysctrl = opaque;
+ uint32_t retval = 0xFF;
+
+ switch (addr) {
+ case 0x0092:
+ /* Special port 92 */
+ retval = sysctrl->endian << 1;
+ break;
+ case 0x0800:
+ /* Motorola CPU configuration register */
+ retval = 0xEF; /* MPC750 */
+ break;
+ case 0x0802:
+ /* Motorola Base module feature register */
+ retval = 0xAD; /* No ESCC, PMC slot neither ethernet */
+ break;
+ case 0x0803:
+ /* Motorola base module status register */
+ retval = 0xE0; /* Standard MPC750 */
+ break;
+ case 0x080C:
+ /* Equipment present register:
+ * no L2 cache
+ * no upgrade processor
+ * no cards in PCI slots
+ * SCSI fuse is bad
+ */
+ retval = 0x3C;
+ break;
+ case 0x0810:
+ /* Motorola base module extended feature register */
+ retval = 0x39; /* No USB, CF and PCI bridge. NVRAM present */
+ break;
+ case 0x0814:
+ /* L2 invalidate: don't care */
+ break;
+ case 0x0818:
+ /* Keylock */
+ retval = 0x00;
+ break;
+ case 0x081C:
+ /* system control register
+ * 7 - 6 / 1 - 0: L2 cache enable
+ */
+ retval = sysctrl->syscontrol;
+ break;
+ case 0x0823:
+ /* */
+ retval = 0x03; /* no L2 cache */
+ break;
+ case 0x0850:
+ /* I/O map type register */
+ retval = sysctrl->contiguous_map;
+ break;
+ default:
+ printf("ERROR: unaffected IO port: %04" PRIx32 " read\n", addr);
+ break;
+ }
+ trace_prep_io_800_readb(addr - PPC_IO_BASE, retval);
+
+ return retval;
+}
+
+
+#define NVRAM_SIZE 0x2000
+
+static void ppc_prep_reset(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ cpu_reset(CPU(cpu));
+}
+
+static const MemoryRegionPortio prep_portio_list[] = {
+ /* System control ports */
+ { 0x0092, 1, 1, .read = PREP_io_800_readb, .write = PREP_io_800_writeb, },
+ { 0x0800, 0x52, 1,
+ .read = PREP_io_800_readb, .write = PREP_io_800_writeb, },
+ /* Special port to get debug messages from Open-Firmware */
+ { 0x0F00, 4, 1, .write = PPC_debug_write, },
+ PORTIO_END_OF_LIST(),
+};
+
+static PortioList prep_port_list;
+
+/*****************************************************************************/
+/* NVRAM helpers */
+static inline uint32_t nvram_read(Nvram *nvram, uint32_t addr)
+{
+ NvramClass *k = NVRAM_GET_CLASS(sysctrl->nvram);
+ return (k->read)(nvram, addr);
+}
+
+static inline void nvram_write(Nvram *nvram, uint32_t addr, uint32_t val)
+{
+ NvramClass *k = NVRAM_GET_CLASS(sysctrl->nvram);
+ (k->write)(nvram, addr, val);
+}
+
+static void NVRAM_set_byte(Nvram *nvram, uint32_t addr, uint8_t value)
+{
+ nvram_write(nvram, addr, value);
+}
+
+static uint8_t NVRAM_get_byte(Nvram *nvram, uint32_t addr)
+{
+ return nvram_read(nvram, addr);
+}
+
+static void NVRAM_set_word(Nvram *nvram, uint32_t addr, uint16_t value)
+{
+ nvram_write(nvram, addr, value >> 8);
+ nvram_write(nvram, addr + 1, value & 0xFF);
+}
+
+static uint16_t NVRAM_get_word(Nvram *nvram, uint32_t addr)
+{
+ uint16_t tmp;
+
+ tmp = nvram_read(nvram, addr) << 8;
+ tmp |= nvram_read(nvram, addr + 1);
+
+ return tmp;
+}
+
+static void NVRAM_set_lword(Nvram *nvram, uint32_t addr, uint32_t value)
+{
+ nvram_write(nvram, addr, value >> 24);
+ nvram_write(nvram, addr + 1, (value >> 16) & 0xFF);
+ nvram_write(nvram, addr + 2, (value >> 8) & 0xFF);
+ nvram_write(nvram, addr + 3, value & 0xFF);
+}
+
+static void NVRAM_set_string(Nvram *nvram, uint32_t addr, const char *str,
+ uint32_t max)
+{
+ int i;
+
+ for (i = 0; i < max && str[i] != '\0'; i++) {
+ nvram_write(nvram, addr + i, str[i]);
+ }
+ nvram_write(nvram, addr + i, str[i]);
+ nvram_write(nvram, addr + max - 1, '\0');
+}
+
+static uint16_t NVRAM_crc_update (uint16_t prev, uint16_t value)
+{
+ uint16_t tmp;
+ uint16_t pd, pd1, pd2;
+
+ tmp = prev >> 8;
+ pd = prev ^ value;
+ pd1 = pd & 0x000F;
+ pd2 = ((pd >> 4) & 0x000F) ^ pd1;
+ tmp ^= (pd1 << 3) | (pd1 << 8);
+ tmp ^= pd2 | (pd2 << 7) | (pd2 << 12);
+
+ return tmp;
+}
+
+static uint16_t NVRAM_compute_crc (Nvram *nvram, uint32_t start, uint32_t count)
+{
+ uint32_t i;
+ uint16_t crc = 0xFFFF;
+ int odd;
+
+ odd = count & 1;
+ count &= ~1;
+ for (i = 0; i != count; i++) {
+ crc = NVRAM_crc_update(crc, NVRAM_get_word(nvram, start + i));
+ }
+ if (odd) {
+ crc = NVRAM_crc_update(crc, NVRAM_get_byte(nvram, start + i) << 8);
+ }
+
+ return crc;
+}
+
+#define CMDLINE_ADDR 0x017ff000
+
+static int PPC_NVRAM_set_params (Nvram *nvram, uint16_t NVRAM_size,
+ const char *arch,
+ uint32_t RAM_size, int boot_device,
+ uint32_t kernel_image, uint32_t kernel_size,
+ const char *cmdline,
+ uint32_t initrd_image, uint32_t initrd_size,
+ uint32_t NVRAM_image,
+ int width, int height, int depth)
+{
+ uint16_t crc;
+
+ /* Set parameters for Open Hack'Ware BIOS */
+ NVRAM_set_string(nvram, 0x00, "QEMU_BIOS", 16);
+ NVRAM_set_lword(nvram, 0x10, 0x00000002); /* structure v2 */
+ NVRAM_set_word(nvram, 0x14, NVRAM_size);
+ NVRAM_set_string(nvram, 0x20, arch, 16);
+ NVRAM_set_lword(nvram, 0x30, RAM_size);
+ NVRAM_set_byte(nvram, 0x34, boot_device);
+ NVRAM_set_lword(nvram, 0x38, kernel_image);
+ NVRAM_set_lword(nvram, 0x3C, kernel_size);
+ if (cmdline) {
+ /* XXX: put the cmdline in NVRAM too ? */
+ pstrcpy_targphys("cmdline", CMDLINE_ADDR, RAM_size - CMDLINE_ADDR,
+ cmdline);
+ NVRAM_set_lword(nvram, 0x40, CMDLINE_ADDR);
+ NVRAM_set_lword(nvram, 0x44, strlen(cmdline));
+ } else {
+ NVRAM_set_lword(nvram, 0x40, 0);
+ NVRAM_set_lword(nvram, 0x44, 0);
+ }
+ NVRAM_set_lword(nvram, 0x48, initrd_image);
+ NVRAM_set_lword(nvram, 0x4C, initrd_size);
+ NVRAM_set_lword(nvram, 0x50, NVRAM_image);
+
+ NVRAM_set_word(nvram, 0x54, width);
+ NVRAM_set_word(nvram, 0x56, height);
+ NVRAM_set_word(nvram, 0x58, depth);
+ crc = NVRAM_compute_crc(nvram, 0x00, 0xF8);
+ NVRAM_set_word(nvram, 0xFC, crc);
+
+ return 0;
+}
+
+/* PowerPC PREP hardware initialisation */
+static void ppc_prep_init(MachineState *machine)
+{
+ ram_addr_t ram_size = machine->ram_size;
+ const char *kernel_filename = machine->kernel_filename;
+ const char *kernel_cmdline = machine->kernel_cmdline;
+ const char *initrd_filename = machine->initrd_filename;
+ const char *boot_device = machine->boot_order;
+ MemoryRegion *sysmem = get_system_memory();
+ PowerPCCPU *cpu = NULL;
+ CPUPPCState *env = NULL;
+ Nvram *m48t59;
+#if 0
+ MemoryRegion *xcsr = g_new(MemoryRegion, 1);
+#endif
+ int linux_boot, i, nb_nics1;
+ MemoryRegion *ram = g_new(MemoryRegion, 1);
+ uint32_t kernel_base, initrd_base;
+ long kernel_size, initrd_size;
+ DeviceState *dev;
+ PCIHostState *pcihost;
+ PCIBus *pci_bus;
+ PCIDevice *pci;
+ ISABus *isa_bus;
+ ISADevice *isa;
+ int ppc_boot_device;
+ DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS];
+
+ sysctrl = g_malloc0(sizeof(sysctrl_t));
+
+ linux_boot = (kernel_filename != NULL);
+
+ /* init CPUs */
+ if (machine->cpu_model == NULL)
+ machine->cpu_model = "602";
+ for (i = 0; i < smp_cpus; i++) {
+ cpu = cpu_ppc_init(machine->cpu_model);
+ if (cpu == NULL) {
+ fprintf(stderr, "Unable to find PowerPC CPU definition\n");
+ exit(1);
+ }
+ env = &cpu->env;
+
+ if (env->flags & POWERPC_FLAG_RTC_CLK) {
+ /* POWER / PowerPC 601 RTC clock frequency is 7.8125 MHz */
+ cpu_ppc_tb_init(env, 7812500UL);
+ } else {
+ /* Set time-base frequency to 100 Mhz */
+ cpu_ppc_tb_init(env, 100UL * 1000UL * 1000UL);
+ }
+ qemu_register_reset(ppc_prep_reset, cpu);
+ }
+
+ /* allocate RAM */
+ memory_region_allocate_system_memory(ram, NULL, "ppc_prep.ram", ram_size);
+ memory_region_add_subregion(sysmem, 0, ram);
+
+ if (linux_boot) {
+ kernel_base = KERNEL_LOAD_ADDR;
+ /* now we can load the kernel */
+ kernel_size = load_image_targphys(kernel_filename, kernel_base,
+ ram_size - kernel_base);
+ if (kernel_size < 0) {
+ hw_error("qemu: could not load kernel '%s'\n", kernel_filename);
+ exit(1);
+ }
+ /* load initrd */
+ if (initrd_filename) {
+ initrd_base = INITRD_LOAD_ADDR;
+ initrd_size = load_image_targphys(initrd_filename, initrd_base,
+ ram_size - initrd_base);
+ if (initrd_size < 0) {
+ hw_error("qemu: could not load initial ram disk '%s'\n",
+ initrd_filename);
+ }
+ } else {
+ initrd_base = 0;
+ initrd_size = 0;
+ }
+ ppc_boot_device = 'm';
+ } else {
+ kernel_base = 0;
+ kernel_size = 0;
+ initrd_base = 0;
+ initrd_size = 0;
+ ppc_boot_device = '\0';
+ /* For now, OHW cannot boot from the network. */
+ for (i = 0; boot_device[i] != '\0'; i++) {
+ if (boot_device[i] >= 'a' && boot_device[i] <= 'f') {
+ ppc_boot_device = boot_device[i];
+ break;
+ }
+ }
+ if (ppc_boot_device == '\0') {
+ fprintf(stderr, "No valid boot device for Mac99 machine\n");
+ exit(1);
+ }
+ }
+
+ if (PPC_INPUT(env) != PPC_FLAGS_INPUT_6xx) {
+ hw_error("Only 6xx bus is supported on PREP machine\n");
+ }
+
+ dev = qdev_create(NULL, "raven-pcihost");
+ if (bios_name == NULL) {
+ bios_name = BIOS_FILENAME;
+ }
+ qdev_prop_set_string(dev, "bios-name", bios_name);
+ qdev_prop_set_uint32(dev, "elf-machine", PPC_ELF_MACHINE);
+ pcihost = PCI_HOST_BRIDGE(dev);
+ object_property_add_child(qdev_get_machine(), "raven", OBJECT(dev), NULL);
+ qdev_init_nofail(dev);
+ pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci.0");
+ if (pci_bus == NULL) {
+ fprintf(stderr, "Couldn't create PCI host controller.\n");
+ exit(1);
+ }
+ sysctrl->contiguous_map_irq = qdev_get_gpio_in(dev, 0);
+
+ /* PCI -> ISA bridge */
+ pci = pci_create_simple(pci_bus, PCI_DEVFN(1, 0), "i82378");
+ cpu = POWERPC_CPU(first_cpu);
+ qdev_connect_gpio_out(&pci->qdev, 0,
+ cpu->env.irq_inputs[PPC6xx_INPUT_INT]);
+ sysbus_connect_irq(&pcihost->busdev, 0, qdev_get_gpio_in(&pci->qdev, 9));
+ sysbus_connect_irq(&pcihost->busdev, 1, qdev_get_gpio_in(&pci->qdev, 11));
+ sysbus_connect_irq(&pcihost->busdev, 2, qdev_get_gpio_in(&pci->qdev, 9));
+ sysbus_connect_irq(&pcihost->busdev, 3, qdev_get_gpio_in(&pci->qdev, 11));
+ isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(pci), "isa.0"));
+
+ /* Super I/O (parallel + serial ports) */
+ isa = isa_create(isa_bus, TYPE_PC87312);
+ dev = DEVICE(isa);
+ qdev_prop_set_uint8(dev, "config", 13); /* fdc, ser0, ser1, par0 */
+ qdev_init_nofail(dev);
+
+ /* init basic PC hardware */
+ pci_vga_init(pci_bus);
+
+ nb_nics1 = nb_nics;
+ if (nb_nics1 > NE2000_NB_MAX)
+ nb_nics1 = NE2000_NB_MAX;
+ for(i = 0; i < nb_nics1; i++) {
+ if (nd_table[i].model == NULL) {
+ nd_table[i].model = g_strdup("ne2k_isa");
+ }
+ if (strcmp(nd_table[i].model, "ne2k_isa") == 0) {
+ isa_ne2000_init(isa_bus, ne2000_io[i], ne2000_irq[i],
+ &nd_table[i]);
+ } else {
+ pci_nic_init_nofail(&nd_table[i], pci_bus, "ne2k_pci", NULL);
+ }
+ }
+
+ ide_drive_get(hd, ARRAY_SIZE(hd));
+ for(i = 0; i < MAX_IDE_BUS; i++) {
+ isa_ide_init(isa_bus, ide_iobase[i], ide_iobase2[i], ide_irq[i],
+ hd[2 * i],
+ hd[2 * i + 1]);
+ }
+ isa_create_simple(isa_bus, "i8042");
+
+ cpu = POWERPC_CPU(first_cpu);
+ sysctrl->reset_irq = cpu->env.irq_inputs[PPC6xx_INPUT_HRESET];
+
+ portio_list_init(&prep_port_list, NULL, prep_portio_list, sysctrl, "prep");
+ portio_list_add(&prep_port_list, isa_address_space_io(isa), 0x0);
+
+ /* PowerPC control and status register group */
+#if 0
+ memory_region_init_io(xcsr, NULL, &PPC_XCSR_ops, NULL, "ppc-xcsr", 0x1000);
+ memory_region_add_subregion(sysmem, 0xFEFF0000, xcsr);
+#endif
+
+ if (usb_enabled()) {
+ pci_create_simple(pci_bus, -1, "pci-ohci");
+ }
+
+ m48t59 = m48t59_init_isa(isa_bus, 0x0074, NVRAM_SIZE, 2000, 59);
+ if (m48t59 == NULL)
+ return;
+ sysctrl->nvram = m48t59;
+
+ /* Initialise NVRAM */
+ PPC_NVRAM_set_params(m48t59, NVRAM_SIZE, "PREP", ram_size,
+ ppc_boot_device,
+ kernel_base, kernel_size,
+ kernel_cmdline,
+ initrd_base, initrd_size,
+ /* XXX: need an option to load a NVRAM image */
+ 0,
+ graphic_width, graphic_height, graphic_depth);
+}
+
+static void prep_machine_init(MachineClass *mc)
+{
+ mc->desc = "PowerPC PREP platform";
+ mc->init = ppc_prep_init;
+ mc->max_cpus = MAX_CPUS;
+ mc->default_boot_order = "cad";
+}
+
+DEFINE_MACHINE("prep", prep_machine_init)
diff --git a/src/hw/ppc/spapr.c b/src/hw/ppc/spapr.c
new file mode 100644
index 0000000..ff1537a
--- /dev/null
+++ b/src/hw/ppc/spapr.c
@@ -0,0 +1,2464 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
+ *
+ * Copyright (c) 2004-2007 Fabrice Bellard
+ * Copyright (c) 2007 Jocelyn Mayer
+ * Copyright (c) 2010 David Gibson, IBM Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+#include "sysemu/sysemu.h"
+#include "sysemu/numa.h"
+#include "hw/hw.h"
+#include "hw/fw-path-provider.h"
+#include "elf.h"
+#include "net/net.h"
+#include "sysemu/device_tree.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/cpus.h"
+#include "sysemu/kvm.h"
+#include "sysemu/device_tree.h"
+#include "kvm_ppc.h"
+#include "migration/migration.h"
+#include "mmu-hash64.h"
+#include "qom/cpu.h"
+
+#include "hw/boards.h"
+#include "hw/ppc/ppc.h"
+#include "hw/loader.h"
+
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_vio.h"
+#include "hw/pci-host/spapr.h"
+#include "hw/ppc/xics.h"
+#include "hw/pci/msi.h"
+
+#include "hw/pci/pci.h"
+#include "hw/scsi/scsi.h"
+#include "hw/virtio/virtio-scsi.h"
+
+#include "exec/address-spaces.h"
+#include "hw/usb.h"
+#include "qemu/config-file.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+#include "hw/nmi.h"
+
+#include "hw/compat.h"
+#include "qemu-common.h"
+
+#include <libfdt.h>
+
+/* SLOF memory layout:
+ *
+ * SLOF raw image loaded at 0, copies its romfs right below the flat
+ * device-tree, then position SLOF itself 31M below that
+ *
+ * So we set FW_OVERHEAD to 40MB which should account for all of that
+ * and more
+ *
+ * We load our kernel at 4M, leaving space for SLOF initial image
+ */
+#define FDT_MAX_SIZE 0x100000
+#define RTAS_MAX_SIZE 0x10000
+#define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */
+#define FW_MAX_SIZE 0x400000
+#define FW_FILE_NAME "slof.bin"
+#define FW_OVERHEAD 0x2800000
+#define KERNEL_LOAD_ADDR FW_MAX_SIZE
+
+#define MIN_RMA_SLOF 128UL
+
+#define TIMEBASE_FREQ 512000000ULL
+
+#define PHANDLE_XICP 0x00001111
+
+#define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift))
+
+static XICSState *try_create_xics(const char *type, int nr_servers,
+ int nr_irqs, Error **errp)
+{
+ Error *err = NULL;
+ DeviceState *dev;
+
+ dev = qdev_create(NULL, type);
+ qdev_prop_set_uint32(dev, "nr_servers", nr_servers);
+ qdev_prop_set_uint32(dev, "nr_irqs", nr_irqs);
+ object_property_set_bool(OBJECT(dev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ object_unparent(OBJECT(dev));
+ return NULL;
+ }
+ return XICS_COMMON(dev);
+}
+
+static XICSState *xics_system_init(MachineState *machine,
+ int nr_servers, int nr_irqs)
+{
+ XICSState *icp = NULL;
+
+ if (kvm_enabled()) {
+ Error *err = NULL;
+
+ if (machine_kernel_irqchip_allowed(machine)) {
+ icp = try_create_xics(TYPE_KVM_XICS, nr_servers, nr_irqs, &err);
+ }
+ if (machine_kernel_irqchip_required(machine) && !icp) {
+ error_report("kernel_irqchip requested but unavailable: %s",
+ error_get_pretty(err));
+ }
+ error_free(err);
+ }
+
+ if (!icp) {
+ icp = try_create_xics(TYPE_XICS, nr_servers, nr_irqs, &error_abort);
+ }
+
+ return icp;
+}
+
+static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
+ int smt_threads)
+{
+ int i, ret = 0;
+ uint32_t servers_prop[smt_threads];
+ uint32_t gservers_prop[smt_threads * 2];
+ int index = ppc_get_vcpu_dt_id(cpu);
+
+ if (cpu->cpu_version) {
+ ret = fdt_setprop_cell(fdt, offset, "cpu-version", cpu->cpu_version);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ /* Build interrupt servers and gservers properties */
+ for (i = 0; i < smt_threads; i++) {
+ servers_prop[i] = cpu_to_be32(index + i);
+ /* Hack, direct the group queues back to cpu 0 */
+ gservers_prop[i*2] = cpu_to_be32(index + i);
+ gservers_prop[i*2 + 1] = 0;
+ }
+ ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-server#s",
+ servers_prop, sizeof(servers_prop));
+ if (ret < 0) {
+ return ret;
+ }
+ ret = fdt_setprop(fdt, offset, "ibm,ppc-interrupt-gserver#s",
+ gservers_prop, sizeof(gservers_prop));
+
+ return ret;
+}
+
+static int spapr_fixup_cpu_numa_dt(void *fdt, int offset, CPUState *cs)
+{
+ int ret = 0;
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ int index = ppc_get_vcpu_dt_id(cpu);
+ uint32_t associativity[] = {cpu_to_be32(0x5),
+ cpu_to_be32(0x0),
+ cpu_to_be32(0x0),
+ cpu_to_be32(0x0),
+ cpu_to_be32(cs->numa_node),
+ cpu_to_be32(index)};
+
+ /* Advertise NUMA via ibm,associativity */
+ if (nb_numa_nodes > 1) {
+ ret = fdt_setprop(fdt, offset, "ibm,associativity", associativity,
+ sizeof(associativity));
+ }
+
+ return ret;
+}
+
+static int spapr_fixup_cpu_dt(void *fdt, sPAPRMachineState *spapr)
+{
+ int ret = 0, offset, cpus_offset;
+ CPUState *cs;
+ char cpu_model[32];
+ int smt = kvmppc_smt_threads();
+ uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
+
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ DeviceClass *dc = DEVICE_GET_CLASS(cs);
+ int index = ppc_get_vcpu_dt_id(cpu);
+
+ if ((index % smt) != 0) {
+ continue;
+ }
+
+ snprintf(cpu_model, 32, "%s@%x", dc->fw_name, index);
+
+ cpus_offset = fdt_path_offset(fdt, "/cpus");
+ if (cpus_offset < 0) {
+ cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"),
+ "cpus");
+ if (cpus_offset < 0) {
+ return cpus_offset;
+ }
+ }
+ offset = fdt_subnode_offset(fdt, cpus_offset, cpu_model);
+ if (offset < 0) {
+ offset = fdt_add_subnode(fdt, cpus_offset, cpu_model);
+ if (offset < 0) {
+ return offset;
+ }
+ }
+
+ ret = fdt_setprop(fdt, offset, "ibm,pft-size",
+ pft_size_prop, sizeof(pft_size_prop));
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = spapr_fixup_cpu_numa_dt(fdt, offset, cs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = spapr_fixup_cpu_smt_dt(fdt, offset, cpu,
+ ppc_get_compat_smt_threads(cpu));
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ return ret;
+}
+
+
+static size_t create_page_sizes_prop(CPUPPCState *env, uint32_t *prop,
+ size_t maxsize)
+{
+ size_t maxcells = maxsize / sizeof(uint32_t);
+ int i, j, count;
+ uint32_t *p = prop;
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
+
+ if (!sps->page_shift) {
+ break;
+ }
+ for (count = 0; count < PPC_PAGE_SIZES_MAX_SZ; count++) {
+ if (sps->enc[count].page_shift == 0) {
+ break;
+ }
+ }
+ if ((p - prop) >= (maxcells - 3 - count * 2)) {
+ break;
+ }
+ *(p++) = cpu_to_be32(sps->page_shift);
+ *(p++) = cpu_to_be32(sps->slb_enc);
+ *(p++) = cpu_to_be32(count);
+ for (j = 0; j < count; j++) {
+ *(p++) = cpu_to_be32(sps->enc[j].page_shift);
+ *(p++) = cpu_to_be32(sps->enc[j].pte_enc);
+ }
+ }
+
+ return (p - prop) * sizeof(uint32_t);
+}
+
+static hwaddr spapr_node0_size(void)
+{
+ MachineState *machine = MACHINE(qdev_get_machine());
+
+ if (nb_numa_nodes) {
+ int i;
+ for (i = 0; i < nb_numa_nodes; ++i) {
+ if (numa_info[i].node_mem) {
+ return MIN(pow2floor(numa_info[i].node_mem),
+ machine->ram_size);
+ }
+ }
+ }
+ return machine->ram_size;
+}
+
+#define _FDT(exp) \
+ do { \
+ int ret = (exp); \
+ if (ret < 0) { \
+ fprintf(stderr, "qemu: error creating device tree: %s: %s\n", \
+ #exp, fdt_strerror(ret)); \
+ exit(1); \
+ } \
+ } while (0)
+
+static void add_str(GString *s, const gchar *s1)
+{
+ g_string_append_len(s, s1, strlen(s1) + 1);
+}
+
+static void *spapr_create_fdt_skel(hwaddr initrd_base,
+ hwaddr initrd_size,
+ hwaddr kernel_size,
+ bool little_endian,
+ const char *kernel_cmdline,
+ uint32_t epow_irq)
+{
+ void *fdt;
+ uint32_t start_prop = cpu_to_be32(initrd_base);
+ uint32_t end_prop = cpu_to_be32(initrd_base + initrd_size);
+ GString *hypertas = g_string_sized_new(256);
+ GString *qemu_hypertas = g_string_sized_new(256);
+ uint32_t refpoints[] = {cpu_to_be32(0x4), cpu_to_be32(0x4)};
+ uint32_t interrupt_server_ranges_prop[] = {0, cpu_to_be32(max_cpus)};
+ unsigned char vec5[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x80};
+ char *buf;
+
+ add_str(hypertas, "hcall-pft");
+ add_str(hypertas, "hcall-term");
+ add_str(hypertas, "hcall-dabr");
+ add_str(hypertas, "hcall-interrupt");
+ add_str(hypertas, "hcall-tce");
+ add_str(hypertas, "hcall-vio");
+ add_str(hypertas, "hcall-splpar");
+ add_str(hypertas, "hcall-bulk");
+ add_str(hypertas, "hcall-set-mode");
+ add_str(qemu_hypertas, "hcall-memop1");
+
+ fdt = g_malloc0(FDT_MAX_SIZE);
+ _FDT((fdt_create(fdt, FDT_MAX_SIZE)));
+
+ if (kernel_size) {
+ _FDT((fdt_add_reservemap_entry(fdt, KERNEL_LOAD_ADDR, kernel_size)));
+ }
+ if (initrd_size) {
+ _FDT((fdt_add_reservemap_entry(fdt, initrd_base, initrd_size)));
+ }
+ _FDT((fdt_finish_reservemap(fdt)));
+
+ /* Root node */
+ _FDT((fdt_begin_node(fdt, "")));
+ _FDT((fdt_property_string(fdt, "device_type", "chrp")));
+ _FDT((fdt_property_string(fdt, "model", "IBM pSeries (emulated by qemu)")));
+ _FDT((fdt_property_string(fdt, "compatible", "qemu,pseries")));
+
+ /*
+ * Add info to guest to indentify which host is it being run on
+ * and what is the uuid of the guest
+ */
+ if (kvmppc_get_host_model(&buf)) {
+ _FDT((fdt_property_string(fdt, "host-model", buf)));
+ g_free(buf);
+ }
+ if (kvmppc_get_host_serial(&buf)) {
+ _FDT((fdt_property_string(fdt, "host-serial", buf)));
+ g_free(buf);
+ }
+
+ buf = g_strdup_printf(UUID_FMT, qemu_uuid[0], qemu_uuid[1],
+ qemu_uuid[2], qemu_uuid[3], qemu_uuid[4],
+ qemu_uuid[5], qemu_uuid[6], qemu_uuid[7],
+ qemu_uuid[8], qemu_uuid[9], qemu_uuid[10],
+ qemu_uuid[11], qemu_uuid[12], qemu_uuid[13],
+ qemu_uuid[14], qemu_uuid[15]);
+
+ _FDT((fdt_property_string(fdt, "vm,uuid", buf)));
+ g_free(buf);
+
+ if (qemu_get_vm_name()) {
+ _FDT((fdt_property_string(fdt, "ibm,partition-name",
+ qemu_get_vm_name())));
+ }
+
+ _FDT((fdt_property_cell(fdt, "#address-cells", 0x2)));
+ _FDT((fdt_property_cell(fdt, "#size-cells", 0x2)));
+
+ /* /chosen */
+ _FDT((fdt_begin_node(fdt, "chosen")));
+
+ /* Set Form1_affinity */
+ _FDT((fdt_property(fdt, "ibm,architecture-vec-5", vec5, sizeof(vec5))));
+
+ _FDT((fdt_property_string(fdt, "bootargs", kernel_cmdline)));
+ _FDT((fdt_property(fdt, "linux,initrd-start",
+ &start_prop, sizeof(start_prop))));
+ _FDT((fdt_property(fdt, "linux,initrd-end",
+ &end_prop, sizeof(end_prop))));
+ if (kernel_size) {
+ uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR),
+ cpu_to_be64(kernel_size) };
+
+ _FDT((fdt_property(fdt, "qemu,boot-kernel", &kprop, sizeof(kprop))));
+ if (little_endian) {
+ _FDT((fdt_property(fdt, "qemu,boot-kernel-le", NULL, 0)));
+ }
+ }
+ if (boot_menu) {
+ _FDT((fdt_property_cell(fdt, "qemu,boot-menu", boot_menu)));
+ }
+ _FDT((fdt_property_cell(fdt, "qemu,graphic-width", graphic_width)));
+ _FDT((fdt_property_cell(fdt, "qemu,graphic-height", graphic_height)));
+ _FDT((fdt_property_cell(fdt, "qemu,graphic-depth", graphic_depth)));
+
+ _FDT((fdt_end_node(fdt)));
+
+ /* RTAS */
+ _FDT((fdt_begin_node(fdt, "rtas")));
+
+ if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
+ add_str(hypertas, "hcall-multi-tce");
+ }
+ _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas->str,
+ hypertas->len)));
+ g_string_free(hypertas, TRUE);
+ _FDT((fdt_property(fdt, "qemu,hypertas-functions", qemu_hypertas->str,
+ qemu_hypertas->len)));
+ g_string_free(qemu_hypertas, TRUE);
+
+ _FDT((fdt_property(fdt, "ibm,associativity-reference-points",
+ refpoints, sizeof(refpoints))));
+
+ _FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX)));
+ _FDT((fdt_property_cell(fdt, "rtas-event-scan-rate",
+ RTAS_EVENT_SCAN_RATE)));
+
+ if (msi_supported) {
+ _FDT((fdt_property(fdt, "ibm,change-msix-capable", NULL, 0)));
+ }
+
+ /*
+ * According to PAPR, rtas ibm,os-term does not guarantee a return
+ * back to the guest cpu.
+ *
+ * While an additional ibm,extended-os-term property indicates that
+ * rtas call return will always occur. Set this property.
+ */
+ _FDT((fdt_property(fdt, "ibm,extended-os-term", NULL, 0)));
+
+ _FDT((fdt_end_node(fdt)));
+
+ /* interrupt controller */
+ _FDT((fdt_begin_node(fdt, "interrupt-controller")));
+
+ _FDT((fdt_property_string(fdt, "device_type",
+ "PowerPC-External-Interrupt-Presentation")));
+ _FDT((fdt_property_string(fdt, "compatible", "IBM,ppc-xicp")));
+ _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
+ _FDT((fdt_property(fdt, "ibm,interrupt-server-ranges",
+ interrupt_server_ranges_prop,
+ sizeof(interrupt_server_ranges_prop))));
+ _FDT((fdt_property_cell(fdt, "#interrupt-cells", 2)));
+ _FDT((fdt_property_cell(fdt, "linux,phandle", PHANDLE_XICP)));
+ _FDT((fdt_property_cell(fdt, "phandle", PHANDLE_XICP)));
+
+ _FDT((fdt_end_node(fdt)));
+
+ /* vdevice */
+ _FDT((fdt_begin_node(fdt, "vdevice")));
+
+ _FDT((fdt_property_string(fdt, "device_type", "vdevice")));
+ _FDT((fdt_property_string(fdt, "compatible", "IBM,vdevice")));
+ _FDT((fdt_property_cell(fdt, "#address-cells", 0x1)));
+ _FDT((fdt_property_cell(fdt, "#size-cells", 0x0)));
+ _FDT((fdt_property_cell(fdt, "#interrupt-cells", 0x2)));
+ _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
+
+ _FDT((fdt_end_node(fdt)));
+
+ /* event-sources */
+ spapr_events_fdt_skel(fdt, epow_irq);
+
+ /* /hypervisor node */
+ if (kvm_enabled()) {
+ uint8_t hypercall[16];
+
+ /* indicate KVM hypercall interface */
+ _FDT((fdt_begin_node(fdt, "hypervisor")));
+ _FDT((fdt_property_string(fdt, "compatible", "linux,kvm")));
+ if (kvmppc_has_cap_fixup_hcalls()) {
+ /*
+ * Older KVM versions with older guest kernels were broken with the
+ * magic page, don't allow the guest to map it.
+ */
+ kvmppc_get_hypercall(first_cpu->env_ptr, hypercall,
+ sizeof(hypercall));
+ _FDT((fdt_property(fdt, "hcall-instructions", hypercall,
+ sizeof(hypercall))));
+ }
+ _FDT((fdt_end_node(fdt)));
+ }
+
+ _FDT((fdt_end_node(fdt))); /* close root node */
+ _FDT((fdt_finish(fdt)));
+
+ return fdt;
+}
+
+static int spapr_populate_memory_node(void *fdt, int nodeid, hwaddr start,
+ hwaddr size)
+{
+ uint32_t associativity[] = {
+ cpu_to_be32(0x4), /* length */
+ cpu_to_be32(0x0), cpu_to_be32(0x0),
+ cpu_to_be32(0x0), cpu_to_be32(nodeid)
+ };
+ char mem_name[32];
+ uint64_t mem_reg_property[2];
+ int off;
+
+ mem_reg_property[0] = cpu_to_be64(start);
+ mem_reg_property[1] = cpu_to_be64(size);
+
+ sprintf(mem_name, "memory@" TARGET_FMT_lx, start);
+ off = fdt_add_subnode(fdt, 0, mem_name);
+ _FDT(off);
+ _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
+ _FDT((fdt_setprop(fdt, off, "reg", mem_reg_property,
+ sizeof(mem_reg_property))));
+ _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
+ sizeof(associativity))));
+ return off;
+}
+
+static int spapr_populate_memory(sPAPRMachineState *spapr, void *fdt)
+{
+ MachineState *machine = MACHINE(spapr);
+ hwaddr mem_start, node_size;
+ int i, nb_nodes = nb_numa_nodes;
+ NodeInfo *nodes = numa_info;
+ NodeInfo ramnode;
+
+ /* No NUMA nodes, assume there is just one node with whole RAM */
+ if (!nb_numa_nodes) {
+ nb_nodes = 1;
+ ramnode.node_mem = machine->ram_size;
+ nodes = &ramnode;
+ }
+
+ for (i = 0, mem_start = 0; i < nb_nodes; ++i) {
+ if (!nodes[i].node_mem) {
+ continue;
+ }
+ if (mem_start >= machine->ram_size) {
+ node_size = 0;
+ } else {
+ node_size = nodes[i].node_mem;
+ if (node_size > machine->ram_size - mem_start) {
+ node_size = machine->ram_size - mem_start;
+ }
+ }
+ if (!mem_start) {
+ /* ppc_spapr_init() checks for rma_size <= node0_size already */
+ spapr_populate_memory_node(fdt, i, 0, spapr->rma_size);
+ mem_start += spapr->rma_size;
+ node_size -= spapr->rma_size;
+ }
+ for ( ; node_size; ) {
+ hwaddr sizetmp = pow2floor(node_size);
+
+ /* mem_start != 0 here */
+ if (ctzl(mem_start) < ctzl(sizetmp)) {
+ sizetmp = 1ULL << ctzl(mem_start);
+ }
+
+ spapr_populate_memory_node(fdt, i, mem_start, sizetmp);
+ node_size -= sizetmp;
+ mem_start += sizetmp;
+ }
+ }
+
+ return 0;
+}
+
+static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset,
+ sPAPRMachineState *spapr)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
+ int index = ppc_get_vcpu_dt_id(cpu);
+ uint32_t segs[] = {cpu_to_be32(28), cpu_to_be32(40),
+ 0xffffffff, 0xffffffff};
+ uint32_t tbfreq = kvm_enabled() ? kvmppc_get_tbfreq() : TIMEBASE_FREQ;
+ uint32_t cpufreq = kvm_enabled() ? kvmppc_get_clockfreq() : 1000000000;
+ uint32_t page_sizes_prop[64];
+ size_t page_sizes_prop_size;
+ uint32_t vcpus_per_socket = smp_threads * smp_cores;
+ uint32_t pft_size_prop[] = {0, cpu_to_be32(spapr->htab_shift)};
+
+ /* Note: we keep CI large pages off for now because a 64K capable guest
+ * provisioned with large pages might otherwise try to map a qemu
+ * framebuffer (or other kind of memory mapped PCI BAR) using 64K pages
+ * even if that qemu runs on a 4k host.
+ *
+ * We can later add this bit back when we are confident this is not
+ * an issue (!HV KVM or 64K host)
+ */
+ uint8_t pa_features_206[] = { 6, 0,
+ 0xf6, 0x1f, 0xc7, 0x00, 0x80, 0xc0 };
+ uint8_t pa_features_207[] = { 24, 0,
+ 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00,
+ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00 };
+ uint8_t *pa_features;
+ size_t pa_size;
+
+ _FDT((fdt_setprop_cell(fdt, offset, "reg", index)));
+ _FDT((fdt_setprop_string(fdt, offset, "device_type", "cpu")));
+
+ _FDT((fdt_setprop_cell(fdt, offset, "cpu-version", env->spr[SPR_PVR])));
+ _FDT((fdt_setprop_cell(fdt, offset, "d-cache-block-size",
+ env->dcache_line_size)));
+ _FDT((fdt_setprop_cell(fdt, offset, "d-cache-line-size",
+ env->dcache_line_size)));
+ _FDT((fdt_setprop_cell(fdt, offset, "i-cache-block-size",
+ env->icache_line_size)));
+ _FDT((fdt_setprop_cell(fdt, offset, "i-cache-line-size",
+ env->icache_line_size)));
+
+ if (pcc->l1_dcache_size) {
+ _FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
+ pcc->l1_dcache_size)));
+ } else {
+ fprintf(stderr, "Warning: Unknown L1 dcache size for cpu\n");
+ }
+ if (pcc->l1_icache_size) {
+ _FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
+ pcc->l1_icache_size)));
+ } else {
+ fprintf(stderr, "Warning: Unknown L1 icache size for cpu\n");
+ }
+
+ _FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
+ _FDT((fdt_setprop_cell(fdt, offset, "clock-frequency", cpufreq)));
+ _FDT((fdt_setprop_cell(fdt, offset, "slb-size", env->slb_nr)));
+ _FDT((fdt_setprop_cell(fdt, offset, "ibm,slb-size", env->slb_nr)));
+ _FDT((fdt_setprop_string(fdt, offset, "status", "okay")));
+ _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0)));
+
+ if (env->spr_cb[SPR_PURR].oea_read) {
+ _FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0)));
+ }
+
+ if (env->mmu_model & POWERPC_MMU_1TSEG) {
+ _FDT((fdt_setprop(fdt, offset, "ibm,processor-segment-sizes",
+ segs, sizeof(segs))));
+ }
+
+ /* Advertise VMX/VSX (vector extensions) if available
+ * 0 / no property == no vector extensions
+ * 1 == VMX / Altivec available
+ * 2 == VSX available */
+ if (env->insns_flags & PPC_ALTIVEC) {
+ uint32_t vmx = (env->insns_flags2 & PPC2_VSX) ? 2 : 1;
+
+ _FDT((fdt_setprop_cell(fdt, offset, "ibm,vmx", vmx)));
+ }
+
+ /* Advertise DFP (Decimal Floating Point) if available
+ * 0 / no property == no DFP
+ * 1 == DFP available */
+ if (env->insns_flags2 & PPC2_DFP) {
+ _FDT((fdt_setprop_cell(fdt, offset, "ibm,dfp", 1)));
+ }
+
+ page_sizes_prop_size = create_page_sizes_prop(env, page_sizes_prop,
+ sizeof(page_sizes_prop));
+ if (page_sizes_prop_size) {
+ _FDT((fdt_setprop(fdt, offset, "ibm,segment-page-sizes",
+ page_sizes_prop, page_sizes_prop_size)));
+ }
+
+ /* Do the ibm,pa-features property, adjust it for ci-large-pages */
+ if (env->mmu_model == POWERPC_MMU_2_06) {
+ pa_features = pa_features_206;
+ pa_size = sizeof(pa_features_206);
+ } else /* env->mmu_model == POWERPC_MMU_2_07 */ {
+ pa_features = pa_features_207;
+ pa_size = sizeof(pa_features_207);
+ }
+ if (env->ci_large_pages) {
+ pa_features[3] |= 0x20;
+ }
+ _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size)));
+
+ _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id",
+ cs->cpu_index / vcpus_per_socket)));
+
+ _FDT((fdt_setprop(fdt, offset, "ibm,pft-size",
+ pft_size_prop, sizeof(pft_size_prop))));
+
+ _FDT(spapr_fixup_cpu_numa_dt(fdt, offset, cs));
+
+ _FDT(spapr_fixup_cpu_smt_dt(fdt, offset, cpu,
+ ppc_get_compat_smt_threads(cpu)));
+}
+
+static void spapr_populate_cpus_dt_node(void *fdt, sPAPRMachineState *spapr)
+{
+ CPUState *cs;
+ int cpus_offset;
+ char *nodename;
+ int smt = kvmppc_smt_threads();
+
+ cpus_offset = fdt_add_subnode(fdt, 0, "cpus");
+ _FDT(cpus_offset);
+ _FDT((fdt_setprop_cell(fdt, cpus_offset, "#address-cells", 0x1)));
+ _FDT((fdt_setprop_cell(fdt, cpus_offset, "#size-cells", 0x0)));
+
+ /*
+ * We walk the CPUs in reverse order to ensure that CPU DT nodes
+ * created by fdt_add_subnode() end up in the right order in FDT
+ * for the guest kernel the enumerate the CPUs correctly.
+ */
+ CPU_FOREACH_REVERSE(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ int index = ppc_get_vcpu_dt_id(cpu);
+ DeviceClass *dc = DEVICE_GET_CLASS(cs);
+ int offset;
+
+ if ((index % smt) != 0) {
+ continue;
+ }
+
+ nodename = g_strdup_printf("%s@%x", dc->fw_name, index);
+ offset = fdt_add_subnode(fdt, cpus_offset, nodename);
+ g_free(nodename);
+ _FDT(offset);
+ spapr_populate_cpu_dt(cs, fdt, offset, spapr);
+ }
+
+}
+
+/*
+ * Adds ibm,dynamic-reconfiguration-memory node.
+ * Refer to docs/specs/ppc-spapr-hotplug.txt for the documentation
+ * of this device tree node.
+ */
+static int spapr_populate_drconf_memory(sPAPRMachineState *spapr, void *fdt)
+{
+ MachineState *machine = MACHINE(spapr);
+ int ret, i, offset;
+ uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
+ uint32_t prop_lmb_size[] = {0, cpu_to_be32(lmb_size)};
+ uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
+ uint32_t *int_buf, *cur_index, buf_len;
+ int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
+
+ /*
+ * Allocate enough buffer size to fit in ibm,dynamic-memory
+ * or ibm,associativity-lookup-arrays
+ */
+ buf_len = MAX(nr_lmbs * SPAPR_DR_LMB_LIST_ENTRY_SIZE + 1, nr_nodes * 4 + 2)
+ * sizeof(uint32_t);
+ cur_index = int_buf = g_malloc0(buf_len);
+
+ offset = fdt_add_subnode(fdt, 0, "ibm,dynamic-reconfiguration-memory");
+
+ ret = fdt_setprop(fdt, offset, "ibm,lmb-size", prop_lmb_size,
+ sizeof(prop_lmb_size));
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = fdt_setprop_cell(fdt, offset, "ibm,memory-flags-mask", 0xff);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = fdt_setprop_cell(fdt, offset, "ibm,memory-preservation-time", 0x0);
+ if (ret < 0) {
+ goto out;
+ }
+
+ /* ibm,dynamic-memory */
+ int_buf[0] = cpu_to_be32(nr_lmbs);
+ cur_index++;
+ for (i = 0; i < nr_lmbs; i++) {
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ uint64_t addr = i * lmb_size + spapr->hotplug_memory.base;;
+ uint32_t *dynamic_memory = cur_index;
+
+ drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
+ addr/lmb_size);
+ g_assert(drc);
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ dynamic_memory[0] = cpu_to_be32(addr >> 32);
+ dynamic_memory[1] = cpu_to_be32(addr & 0xffffffff);
+ dynamic_memory[2] = cpu_to_be32(drck->get_index(drc));
+ dynamic_memory[3] = cpu_to_be32(0); /* reserved */
+ dynamic_memory[4] = cpu_to_be32(numa_get_node(addr, NULL));
+ if (addr < machine->ram_size ||
+ memory_region_present(get_system_memory(), addr)) {
+ dynamic_memory[5] = cpu_to_be32(SPAPR_LMB_FLAGS_ASSIGNED);
+ } else {
+ dynamic_memory[5] = cpu_to_be32(0);
+ }
+
+ cur_index += SPAPR_DR_LMB_LIST_ENTRY_SIZE;
+ }
+ ret = fdt_setprop(fdt, offset, "ibm,dynamic-memory", int_buf, buf_len);
+ if (ret < 0) {
+ goto out;
+ }
+
+ /* ibm,associativity-lookup-arrays */
+ cur_index = int_buf;
+ int_buf[0] = cpu_to_be32(nr_nodes);
+ int_buf[1] = cpu_to_be32(4); /* Number of entries per associativity list */
+ cur_index += 2;
+ for (i = 0; i < nr_nodes; i++) {
+ uint32_t associativity[] = {
+ cpu_to_be32(0x0),
+ cpu_to_be32(0x0),
+ cpu_to_be32(0x0),
+ cpu_to_be32(i)
+ };
+ memcpy(cur_index, associativity, sizeof(associativity));
+ cur_index += 4;
+ }
+ ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
+ (cur_index - int_buf) * sizeof(uint32_t));
+out:
+ g_free(int_buf);
+ return ret;
+}
+
+int spapr_h_cas_compose_response(sPAPRMachineState *spapr,
+ target_ulong addr, target_ulong size,
+ bool cpu_update, bool memory_update)
+{
+ void *fdt, *fdt_skel;
+ sPAPRDeviceTreeUpdateHeader hdr = { .version_id = 1 };
+ sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
+
+ size -= sizeof(hdr);
+
+ /* Create sceleton */
+ fdt_skel = g_malloc0(size);
+ _FDT((fdt_create(fdt_skel, size)));
+ _FDT((fdt_begin_node(fdt_skel, "")));
+ _FDT((fdt_end_node(fdt_skel)));
+ _FDT((fdt_finish(fdt_skel)));
+ fdt = g_malloc0(size);
+ _FDT((fdt_open_into(fdt_skel, fdt, size)));
+ g_free(fdt_skel);
+
+ /* Fixup cpu nodes */
+ if (cpu_update) {
+ _FDT((spapr_fixup_cpu_dt(fdt, spapr)));
+ }
+
+ /* Generate memory nodes or ibm,dynamic-reconfiguration-memory node */
+ if (memory_update && smc->dr_lmb_enabled) {
+ _FDT((spapr_populate_drconf_memory(spapr, fdt)));
+ }
+
+ /* Pack resulting tree */
+ _FDT((fdt_pack(fdt)));
+
+ if (fdt_totalsize(fdt) + sizeof(hdr) > size) {
+ trace_spapr_cas_failed(size);
+ return -1;
+ }
+
+ cpu_physical_memory_write(addr, &hdr, sizeof(hdr));
+ cpu_physical_memory_write(addr + sizeof(hdr), fdt, fdt_totalsize(fdt));
+ trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr));
+ g_free(fdt);
+
+ return 0;
+}
+
+static void spapr_finalize_fdt(sPAPRMachineState *spapr,
+ hwaddr fdt_addr,
+ hwaddr rtas_addr,
+ hwaddr rtas_size)
+{
+ MachineState *machine = MACHINE(qdev_get_machine());
+ sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
+ const char *boot_device = machine->boot_order;
+ int ret, i;
+ size_t cb = 0;
+ char *bootlist;
+ void *fdt;
+ sPAPRPHBState *phb;
+
+ fdt = g_malloc(FDT_MAX_SIZE);
+
+ /* open out the base tree into a temp buffer for the final tweaks */
+ _FDT((fdt_open_into(spapr->fdt_skel, fdt, FDT_MAX_SIZE)));
+
+ ret = spapr_populate_memory(spapr, fdt);
+ if (ret < 0) {
+ fprintf(stderr, "couldn't setup memory nodes in fdt\n");
+ exit(1);
+ }
+
+ ret = spapr_populate_vdevice(spapr->vio_bus, fdt);
+ if (ret < 0) {
+ fprintf(stderr, "couldn't setup vio devices in fdt\n");
+ exit(1);
+ }
+
+ if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL)) {
+ ret = spapr_rng_populate_dt(fdt);
+ if (ret < 0) {
+ fprintf(stderr, "could not set up rng device in the fdt\n");
+ exit(1);
+ }
+ }
+
+ QLIST_FOREACH(phb, &spapr->phbs, list) {
+ ret = spapr_populate_pci_dt(phb, PHANDLE_XICP, fdt);
+ }
+
+ if (ret < 0) {
+ fprintf(stderr, "couldn't setup PCI devices in fdt\n");
+ exit(1);
+ }
+
+ /* RTAS */
+ ret = spapr_rtas_device_tree_setup(fdt, rtas_addr, rtas_size);
+ if (ret < 0) {
+ fprintf(stderr, "Couldn't set up RTAS device tree properties\n");
+ }
+
+ /* cpus */
+ spapr_populate_cpus_dt_node(fdt, spapr);
+
+ bootlist = get_boot_devices_list(&cb, true);
+ if (cb && bootlist) {
+ int offset = fdt_path_offset(fdt, "/chosen");
+ if (offset < 0) {
+ exit(1);
+ }
+ for (i = 0; i < cb; i++) {
+ if (bootlist[i] == '\n') {
+ bootlist[i] = ' ';
+ }
+
+ }
+ ret = fdt_setprop_string(fdt, offset, "qemu,boot-list", bootlist);
+ }
+
+ if (boot_device && strlen(boot_device)) {
+ int offset = fdt_path_offset(fdt, "/chosen");
+
+ if (offset < 0) {
+ exit(1);
+ }
+ fdt_setprop_string(fdt, offset, "qemu,boot-device", boot_device);
+ }
+
+ if (!spapr->has_graphics) {
+ spapr_populate_chosen_stdout(fdt, spapr->vio_bus);
+ }
+
+ if (smc->dr_lmb_enabled) {
+ _FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB));
+ }
+
+ _FDT((fdt_pack(fdt)));
+
+ if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
+ error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
+ fdt_totalsize(fdt), FDT_MAX_SIZE);
+ exit(1);
+ }
+
+ qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt));
+ cpu_physical_memory_write(fdt_addr, fdt, fdt_totalsize(fdt));
+
+ g_free(bootlist);
+ g_free(fdt);
+}
+
+static uint64_t translate_kernel_address(void *opaque, uint64_t addr)
+{
+ return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR;
+}
+
+static void emulate_spapr_hypercall(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+
+ if (msr_pr) {
+ hcall_dprintf("Hypercall made with MSR[PR]=1\n");
+ env->gpr[3] = H_PRIVILEGE;
+ } else {
+ env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]);
+ }
+}
+
+#define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2))
+#define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID)
+#define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY)
+#define CLEAN_HPTE(_hpte) ((*(uint64_t *)(_hpte)) &= tswap64(~HPTE64_V_HPTE_DIRTY))
+#define DIRTY_HPTE(_hpte) ((*(uint64_t *)(_hpte)) |= tswap64(HPTE64_V_HPTE_DIRTY))
+
+static void spapr_alloc_htab(sPAPRMachineState *spapr)
+{
+ long shift;
+ int index;
+
+ /* allocate hash page table. For now we always make this 16mb,
+ * later we should probably make it scale to the size of guest
+ * RAM */
+
+ shift = kvmppc_reset_htab(spapr->htab_shift);
+ if (shift < 0) {
+ /*
+ * For HV KVM, host kernel will return -ENOMEM when requested
+ * HTAB size can't be allocated.
+ */
+ error_setg(&error_abort, "Failed to allocate HTAB of requested size, try with smaller maxmem");
+ } else if (shift > 0) {
+ /*
+ * Kernel handles htab, we don't need to allocate one
+ *
+ * Older kernels can fall back to lower HTAB shift values,
+ * but we don't allow booting of such guests.
+ */
+ if (shift != spapr->htab_shift) {
+ error_setg(&error_abort, "Failed to allocate HTAB of requested size, try with smaller maxmem");
+ }
+
+ spapr->htab_shift = shift;
+ kvmppc_kern_htab = true;
+ } else {
+ /* Allocate htab */
+ spapr->htab = qemu_memalign(HTAB_SIZE(spapr), HTAB_SIZE(spapr));
+
+ /* And clear it */
+ memset(spapr->htab, 0, HTAB_SIZE(spapr));
+
+ for (index = 0; index < HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; index++) {
+ DIRTY_HPTE(HPTE(spapr->htab, index));
+ }
+ }
+}
+
+/*
+ * Clear HTAB entries during reset.
+ *
+ * If host kernel has allocated HTAB, KVM_PPC_ALLOCATE_HTAB ioctl is
+ * used to clear HTAB. Otherwise QEMU-allocated HTAB is cleared manually.
+ */
+static void spapr_reset_htab(sPAPRMachineState *spapr)
+{
+ long shift;
+ int index;
+
+ shift = kvmppc_reset_htab(spapr->htab_shift);
+ if (shift < 0) {
+ error_setg(&error_abort, "Failed to reset HTAB");
+ } else if (shift > 0) {
+ if (shift != spapr->htab_shift) {
+ error_setg(&error_abort, "Requested HTAB allocation failed during reset");
+ }
+
+ /* Tell readers to update their file descriptor */
+ if (spapr->htab_fd >= 0) {
+ spapr->htab_fd_stale = true;
+ }
+ } else {
+ memset(spapr->htab, 0, HTAB_SIZE(spapr));
+
+ for (index = 0; index < HTAB_SIZE(spapr) / HASH_PTE_SIZE_64; index++) {
+ DIRTY_HPTE(HPTE(spapr->htab, index));
+ }
+ }
+
+ /* Update the RMA size if necessary */
+ if (spapr->vrma_adjust) {
+ spapr->rma_size = kvmppc_rma_size(spapr_node0_size(),
+ spapr->htab_shift);
+ }
+}
+
+static int find_unknown_sysbus_device(SysBusDevice *sbdev, void *opaque)
+{
+ bool matched = false;
+
+ if (object_dynamic_cast(OBJECT(sbdev), TYPE_SPAPR_PCI_HOST_BRIDGE)) {
+ matched = true;
+ }
+
+ if (!matched) {
+ error_report("Device %s is not supported by this machine yet.",
+ qdev_fw_name(DEVICE(sbdev)));
+ exit(1);
+ }
+
+ return 0;
+}
+
+/*
+ * A guest reset will cause spapr->htab_fd to become stale if being used.
+ * Reopen the file descriptor to make sure the whole HTAB is properly read.
+ */
+static int spapr_check_htab_fd(sPAPRMachineState *spapr)
+{
+ int rc = 0;
+
+ if (spapr->htab_fd_stale) {
+ close(spapr->htab_fd);
+ spapr->htab_fd = kvmppc_get_htab_fd(false);
+ if (spapr->htab_fd < 0) {
+ error_report("Unable to open fd for reading hash table from KVM: "
+ "%s", strerror(errno));
+ rc = -1;
+ }
+ spapr->htab_fd_stale = false;
+ }
+
+ return rc;
+}
+
+static void ppc_spapr_reset(void)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ PowerPCCPU *first_ppc_cpu;
+ uint32_t rtas_limit;
+
+ /* Check for unknown sysbus devices */
+ foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL);
+
+ /* Reset the hash table & recalc the RMA */
+ spapr_reset_htab(spapr);
+
+ qemu_devices_reset();
+
+ /*
+ * We place the device tree and RTAS just below either the top of the RMA,
+ * or just below 2GB, whichever is lowere, so that it can be
+ * processed with 32-bit real mode code if necessary
+ */
+ rtas_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR);
+ spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE;
+ spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE;
+
+ /* Load the fdt */
+ spapr_finalize_fdt(spapr, spapr->fdt_addr, spapr->rtas_addr,
+ spapr->rtas_size);
+
+ /* Copy RTAS over */
+ cpu_physical_memory_write(spapr->rtas_addr, spapr->rtas_blob,
+ spapr->rtas_size);
+
+ /* Set up the entry state */
+ first_ppc_cpu = POWERPC_CPU(first_cpu);
+ first_ppc_cpu->env.gpr[3] = spapr->fdt_addr;
+ first_ppc_cpu->env.gpr[5] = 0;
+ first_cpu->halted = 0;
+ first_ppc_cpu->env.nip = SPAPR_ENTRY_POINT;
+
+}
+
+static void spapr_cpu_reset(void *opaque)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ PowerPCCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ cpu_reset(cs);
+
+ /* All CPUs start halted. CPU0 is unhalted from the machine level
+ * reset code and the rest are explicitly started up by the guest
+ * using an RTAS call */
+ cs->halted = 1;
+
+ env->spr[SPR_HIOR] = 0;
+
+ env->external_htab = (uint8_t *)spapr->htab;
+ if (kvm_enabled() && !env->external_htab) {
+ /*
+ * HV KVM, set external_htab to 1 so our ppc_hash64_load_hpte*
+ * functions do the right thing.
+ */
+ env->external_htab = (void *)1;
+ }
+ env->htab_base = -1;
+ /*
+ * htab_mask is the mask used to normalize hash value to PTEG index.
+ * htab_shift is log2 of hash table size.
+ * We have 8 hpte per group, and each hpte is 16 bytes.
+ * ie have 128 bytes per hpte entry.
+ */
+ env->htab_mask = (1ULL << (spapr->htab_shift - 7)) - 1;
+ env->spr[SPR_SDR1] = (target_ulong)(uintptr_t)spapr->htab |
+ (spapr->htab_shift - 18);
+}
+
+static void spapr_create_nvram(sPAPRMachineState *spapr)
+{
+ DeviceState *dev = qdev_create(&spapr->vio_bus->bus, "spapr-nvram");
+ DriveInfo *dinfo = drive_get(IF_PFLASH, 0, 0);
+
+ if (dinfo) {
+ qdev_prop_set_drive_nofail(dev, "drive", blk_by_legacy_dinfo(dinfo));
+ }
+
+ qdev_init_nofail(dev);
+
+ spapr->nvram = (struct sPAPRNVRAM *)dev;
+}
+
+static void spapr_rtc_create(sPAPRMachineState *spapr)
+{
+ DeviceState *dev = qdev_create(NULL, TYPE_SPAPR_RTC);
+
+ qdev_init_nofail(dev);
+ spapr->rtc = dev;
+
+ object_property_add_alias(qdev_get_machine(), "rtc-time",
+ OBJECT(spapr->rtc), "date", NULL);
+}
+
+/* Returns whether we want to use VGA or not */
+static int spapr_vga_init(PCIBus *pci_bus)
+{
+ switch (vga_interface_type) {
+ case VGA_NONE:
+ return false;
+ case VGA_DEVICE:
+ return true;
+ case VGA_STD:
+ case VGA_VIRTIO:
+ return pci_vga_init(pci_bus) != NULL;
+ default:
+ fprintf(stderr, "This vga model is not supported,"
+ "currently it only supports -vga std\n");
+ exit(0);
+ }
+}
+
+static int spapr_post_load(void *opaque, int version_id)
+{
+ sPAPRMachineState *spapr = (sPAPRMachineState *)opaque;
+ int err = 0;
+
+ /* In earlier versions, there was no separate qdev for the PAPR
+ * RTC, so the RTC offset was stored directly in sPAPREnvironment.
+ * So when migrating from those versions, poke the incoming offset
+ * value into the RTC device */
+ if (version_id < 3) {
+ err = spapr_rtc_import_offset(spapr->rtc, spapr->rtc_offset);
+ }
+
+ return err;
+}
+
+static bool version_before_3(void *opaque, int version_id)
+{
+ return version_id < 3;
+}
+
+static const VMStateDescription vmstate_spapr = {
+ .name = "spapr",
+ .version_id = 3,
+ .minimum_version_id = 1,
+ .post_load = spapr_post_load,
+ .fields = (VMStateField[]) {
+ /* used to be @next_irq */
+ VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4),
+
+ /* RTC offset */
+ VMSTATE_UINT64_TEST(rtc_offset, sPAPRMachineState, version_before_3),
+
+ VMSTATE_PPC_TIMEBASE_V(tb, sPAPRMachineState, 2),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static int htab_save_setup(QEMUFile *f, void *opaque)
+{
+ sPAPRMachineState *spapr = opaque;
+
+ /* "Iteration" header */
+ qemu_put_be32(f, spapr->htab_shift);
+
+ if (spapr->htab) {
+ spapr->htab_save_index = 0;
+ spapr->htab_first_pass = true;
+ } else {
+ assert(kvm_enabled());
+
+ spapr->htab_fd = kvmppc_get_htab_fd(false);
+ spapr->htab_fd_stale = false;
+ if (spapr->htab_fd < 0) {
+ fprintf(stderr, "Unable to open fd for reading hash table from KVM: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ }
+
+
+ return 0;
+}
+
+static void htab_save_first_pass(QEMUFile *f, sPAPRMachineState *spapr,
+ int64_t max_ns)
+{
+ int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
+ int index = spapr->htab_save_index;
+ int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+
+ assert(spapr->htab_first_pass);
+
+ do {
+ int chunkstart;
+
+ /* Consume invalid HPTEs */
+ while ((index < htabslots)
+ && !HPTE_VALID(HPTE(spapr->htab, index))) {
+ index++;
+ CLEAN_HPTE(HPTE(spapr->htab, index));
+ }
+
+ /* Consume valid HPTEs */
+ chunkstart = index;
+ while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
+ && HPTE_VALID(HPTE(spapr->htab, index))) {
+ index++;
+ CLEAN_HPTE(HPTE(spapr->htab, index));
+ }
+
+ if (index > chunkstart) {
+ int n_valid = index - chunkstart;
+
+ qemu_put_be32(f, chunkstart);
+ qemu_put_be16(f, n_valid);
+ qemu_put_be16(f, 0);
+ qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
+ HASH_PTE_SIZE_64 * n_valid);
+
+ if ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
+ break;
+ }
+ }
+ } while ((index < htabslots) && !qemu_file_rate_limit(f));
+
+ if (index >= htabslots) {
+ assert(index == htabslots);
+ index = 0;
+ spapr->htab_first_pass = false;
+ }
+ spapr->htab_save_index = index;
+}
+
+static int htab_save_later_pass(QEMUFile *f, sPAPRMachineState *spapr,
+ int64_t max_ns)
+{
+ bool final = max_ns < 0;
+ int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
+ int examined = 0, sent = 0;
+ int index = spapr->htab_save_index;
+ int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+
+ assert(!spapr->htab_first_pass);
+
+ do {
+ int chunkstart, invalidstart;
+
+ /* Consume non-dirty HPTEs */
+ while ((index < htabslots)
+ && !HPTE_DIRTY(HPTE(spapr->htab, index))) {
+ index++;
+ examined++;
+ }
+
+ chunkstart = index;
+ /* Consume valid dirty HPTEs */
+ while ((index < htabslots) && (index - chunkstart < USHRT_MAX)
+ && HPTE_DIRTY(HPTE(spapr->htab, index))
+ && HPTE_VALID(HPTE(spapr->htab, index))) {
+ CLEAN_HPTE(HPTE(spapr->htab, index));
+ index++;
+ examined++;
+ }
+
+ invalidstart = index;
+ /* Consume invalid dirty HPTEs */
+ while ((index < htabslots) && (index - invalidstart < USHRT_MAX)
+ && HPTE_DIRTY(HPTE(spapr->htab, index))
+ && !HPTE_VALID(HPTE(spapr->htab, index))) {
+ CLEAN_HPTE(HPTE(spapr->htab, index));
+ index++;
+ examined++;
+ }
+
+ if (index > chunkstart) {
+ int n_valid = invalidstart - chunkstart;
+ int n_invalid = index - invalidstart;
+
+ qemu_put_be32(f, chunkstart);
+ qemu_put_be16(f, n_valid);
+ qemu_put_be16(f, n_invalid);
+ qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
+ HASH_PTE_SIZE_64 * n_valid);
+ sent += index - chunkstart;
+
+ if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
+ break;
+ }
+ }
+
+ if (examined >= htabslots) {
+ break;
+ }
+
+ if (index >= htabslots) {
+ assert(index == htabslots);
+ index = 0;
+ }
+ } while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final));
+
+ if (index >= htabslots) {
+ assert(index == htabslots);
+ index = 0;
+ }
+
+ spapr->htab_save_index = index;
+
+ return (examined >= htabslots) && (sent == 0) ? 1 : 0;
+}
+
+#define MAX_ITERATION_NS 5000000 /* 5 ms */
+#define MAX_KVM_BUF_SIZE 2048
+
+static int htab_save_iterate(QEMUFile *f, void *opaque)
+{
+ sPAPRMachineState *spapr = opaque;
+ int rc = 0;
+
+ /* Iteration header */
+ qemu_put_be32(f, 0);
+
+ if (!spapr->htab) {
+ assert(kvm_enabled());
+
+ rc = spapr_check_htab_fd(spapr);
+ if (rc < 0) {
+ return rc;
+ }
+
+ rc = kvmppc_save_htab(f, spapr->htab_fd,
+ MAX_KVM_BUF_SIZE, MAX_ITERATION_NS);
+ if (rc < 0) {
+ return rc;
+ }
+ } else if (spapr->htab_first_pass) {
+ htab_save_first_pass(f, spapr, MAX_ITERATION_NS);
+ } else {
+ rc = htab_save_later_pass(f, spapr, MAX_ITERATION_NS);
+ }
+
+ /* End marker */
+ qemu_put_be32(f, 0);
+ qemu_put_be16(f, 0);
+ qemu_put_be16(f, 0);
+
+ return rc;
+}
+
+static int htab_save_complete(QEMUFile *f, void *opaque)
+{
+ sPAPRMachineState *spapr = opaque;
+
+ /* Iteration header */
+ qemu_put_be32(f, 0);
+
+ if (!spapr->htab) {
+ int rc;
+
+ assert(kvm_enabled());
+
+ rc = spapr_check_htab_fd(spapr);
+ if (rc < 0) {
+ return rc;
+ }
+
+ rc = kvmppc_save_htab(f, spapr->htab_fd, MAX_KVM_BUF_SIZE, -1);
+ if (rc < 0) {
+ return rc;
+ }
+ close(spapr->htab_fd);
+ spapr->htab_fd = -1;
+ } else {
+ htab_save_later_pass(f, spapr, -1);
+ }
+
+ /* End marker */
+ qemu_put_be32(f, 0);
+ qemu_put_be16(f, 0);
+ qemu_put_be16(f, 0);
+
+ return 0;
+}
+
+static int htab_load(QEMUFile *f, void *opaque, int version_id)
+{
+ sPAPRMachineState *spapr = opaque;
+ uint32_t section_hdr;
+ int fd = -1;
+
+ if (version_id < 1 || version_id > 1) {
+ fprintf(stderr, "htab_load() bad version\n");
+ return -EINVAL;
+ }
+
+ section_hdr = qemu_get_be32(f);
+
+ if (section_hdr) {
+ /* First section, just the hash shift */
+ if (spapr->htab_shift != section_hdr) {
+ error_report("htab_shift mismatch: source %d target %d",
+ section_hdr, spapr->htab_shift);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if (!spapr->htab) {
+ assert(kvm_enabled());
+
+ fd = kvmppc_get_htab_fd(true);
+ if (fd < 0) {
+ fprintf(stderr, "Unable to open fd to restore KVM hash table: %s\n",
+ strerror(errno));
+ }
+ }
+
+ while (true) {
+ uint32_t index;
+ uint16_t n_valid, n_invalid;
+
+ index = qemu_get_be32(f);
+ n_valid = qemu_get_be16(f);
+ n_invalid = qemu_get_be16(f);
+
+ if ((index == 0) && (n_valid == 0) && (n_invalid == 0)) {
+ /* End of Stream */
+ break;
+ }
+
+ if ((index + n_valid + n_invalid) >
+ (HTAB_SIZE(spapr) / HASH_PTE_SIZE_64)) {
+ /* Bad index in stream */
+ fprintf(stderr, "htab_load() bad index %d (%hd+%hd entries) "
+ "in htab stream (htab_shift=%d)\n", index, n_valid, n_invalid,
+ spapr->htab_shift);
+ return -EINVAL;
+ }
+
+ if (spapr->htab) {
+ if (n_valid) {
+ qemu_get_buffer(f, HPTE(spapr->htab, index),
+ HASH_PTE_SIZE_64 * n_valid);
+ }
+ if (n_invalid) {
+ memset(HPTE(spapr->htab, index + n_valid), 0,
+ HASH_PTE_SIZE_64 * n_invalid);
+ }
+ } else {
+ int rc;
+
+ assert(fd >= 0);
+
+ rc = kvmppc_load_htab_chunk(f, fd, index, n_valid, n_invalid);
+ if (rc < 0) {
+ return rc;
+ }
+ }
+ }
+
+ if (!spapr->htab) {
+ assert(fd >= 0);
+ close(fd);
+ }
+
+ return 0;
+}
+
+static SaveVMHandlers savevm_htab_handlers = {
+ .save_live_setup = htab_save_setup,
+ .save_live_iterate = htab_save_iterate,
+ .save_live_complete_precopy = htab_save_complete,
+ .load_state = htab_load,
+};
+
+static void spapr_boot_set(void *opaque, const char *boot_device,
+ Error **errp)
+{
+ MachineState *machine = MACHINE(qdev_get_machine());
+ machine->boot_order = g_strdup(boot_device);
+}
+
+static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+
+ /* Set time-base frequency to 512 MHz */
+ cpu_ppc_tb_init(env, TIMEBASE_FREQ);
+
+ /* PAPR always has exception vectors in RAM not ROM. To ensure this,
+ * MSR[IP] should never be set.
+ */
+ env->msr_mask &= ~(1 << 6);
+
+ /* Tell KVM that we're in PAPR mode */
+ if (kvm_enabled()) {
+ kvmppc_set_papr(cpu);
+ }
+
+ if (cpu->max_compat) {
+ if (ppc_set_compat(cpu, cpu->max_compat) < 0) {
+ exit(1);
+ }
+ }
+
+ xics_cpu_setup(spapr->icp, cpu);
+
+ qemu_register_reset(spapr_cpu_reset, cpu);
+}
+
+/*
+ * Reset routine for LMB DR devices.
+ *
+ * Unlike PCI DR devices, LMB DR devices explicitly register this reset
+ * routine. Reset for PCI DR devices will be handled by PHB reset routine
+ * when it walks all its children devices. LMB devices reset occurs
+ * as part of spapr_ppc_reset().
+ */
+static void spapr_drc_reset(void *opaque)
+{
+ sPAPRDRConnector *drc = opaque;
+ DeviceState *d = DEVICE(drc);
+
+ if (d) {
+ device_reset(d);
+ }
+}
+
+static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
+{
+ MachineState *machine = MACHINE(spapr);
+ uint64_t lmb_size = SPAPR_MEMORY_BLOCK_SIZE;
+ uint32_t nr_lmbs = (machine->maxram_size - machine->ram_size)/lmb_size;
+ int i;
+
+ for (i = 0; i < nr_lmbs; i++) {
+ sPAPRDRConnector *drc;
+ uint64_t addr;
+
+ addr = i * lmb_size + spapr->hotplug_memory.base;
+ drc = spapr_dr_connector_new(OBJECT(spapr), SPAPR_DR_CONNECTOR_TYPE_LMB,
+ addr/lmb_size);
+ qemu_register_reset(spapr_drc_reset, drc);
+ }
+}
+
+/*
+ * If RAM size, maxmem size and individual node mem sizes aren't aligned
+ * to SPAPR_MEMORY_BLOCK_SIZE(256MB), then refuse to start the guest
+ * since we can't support such unaligned sizes with DRCONF_MEMORY.
+ */
+static void spapr_validate_node_memory(MachineState *machine)
+{
+ int i;
+
+ if (machine->maxram_size % SPAPR_MEMORY_BLOCK_SIZE ||
+ machine->ram_size % SPAPR_MEMORY_BLOCK_SIZE) {
+ error_report("Can't support memory configuration where RAM size "
+ "0x" RAM_ADDR_FMT " or maxmem size "
+ "0x" RAM_ADDR_FMT " isn't aligned to %llu MB",
+ machine->ram_size, machine->maxram_size,
+ SPAPR_MEMORY_BLOCK_SIZE/M_BYTE);
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 0; i < nb_numa_nodes; i++) {
+ if (numa_info[i].node_mem % SPAPR_MEMORY_BLOCK_SIZE) {
+ error_report("Can't support memory configuration where memory size"
+ " %" PRIx64 " of node %d isn't aligned to %llu MB",
+ numa_info[i].node_mem, i,
+ SPAPR_MEMORY_BLOCK_SIZE/M_BYTE);
+ exit(EXIT_FAILURE);
+ }
+ }
+}
+
+/* pSeries LPAR / sPAPR hardware init */
+static void ppc_spapr_init(MachineState *machine)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
+ sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
+ const char *kernel_filename = machine->kernel_filename;
+ const char *kernel_cmdline = machine->kernel_cmdline;
+ const char *initrd_filename = machine->initrd_filename;
+ PowerPCCPU *cpu;
+ PCIHostState *phb;
+ int i;
+ MemoryRegion *sysmem = get_system_memory();
+ MemoryRegion *ram = g_new(MemoryRegion, 1);
+ MemoryRegion *rma_region;
+ void *rma = NULL;
+ hwaddr rma_alloc_size;
+ hwaddr node0_size = spapr_node0_size();
+ uint32_t initrd_base = 0;
+ long kernel_size = 0, initrd_size = 0;
+ long load_limit, fw_size;
+ bool kernel_le = false;
+ char *filename;
+
+ msi_supported = true;
+
+ QLIST_INIT(&spapr->phbs);
+
+ cpu_ppc_hypercall = emulate_spapr_hypercall;
+
+ /* Allocate RMA if necessary */
+ rma_alloc_size = kvmppc_alloc_rma(&rma);
+
+ if (rma_alloc_size == -1) {
+ error_report("Unable to create RMA");
+ exit(1);
+ }
+
+ if (rma_alloc_size && (rma_alloc_size < node0_size)) {
+ spapr->rma_size = rma_alloc_size;
+ } else {
+ spapr->rma_size = node0_size;
+
+ /* With KVM, we don't actually know whether KVM supports an
+ * unbounded RMA (PR KVM) or is limited by the hash table size
+ * (HV KVM using VRMA), so we always assume the latter
+ *
+ * In that case, we also limit the initial allocations for RTAS
+ * etc... to 256M since we have no way to know what the VRMA size
+ * is going to be as it depends on the size of the hash table
+ * isn't determined yet.
+ */
+ if (kvm_enabled()) {
+ spapr->vrma_adjust = 1;
+ spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
+ }
+ }
+
+ if (spapr->rma_size > node0_size) {
+ fprintf(stderr, "Error: Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")\n",
+ spapr->rma_size);
+ exit(1);
+ }
+
+ /* Setup a load limit for the ramdisk leaving room for SLOF and FDT */
+ load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD;
+
+ /* We aim for a hash table of size 1/128 the size of RAM. The
+ * normal rule of thumb is 1/64 the size of RAM, but that's much
+ * more than needed for the Linux guests we support. */
+ spapr->htab_shift = 18; /* Minimum architected size */
+ while (spapr->htab_shift <= 46) {
+ if ((1ULL << (spapr->htab_shift + 7)) >= machine->maxram_size) {
+ break;
+ }
+ spapr->htab_shift++;
+ }
+ spapr_alloc_htab(spapr);
+
+ /* Set up Interrupt Controller before we create the VCPUs */
+ spapr->icp = xics_system_init(machine,
+ DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(),
+ smp_threads),
+ XICS_IRQS);
+
+ if (smc->dr_lmb_enabled) {
+ spapr_validate_node_memory(machine);
+ }
+
+ /* init CPUs */
+ if (machine->cpu_model == NULL) {
+ machine->cpu_model = kvm_enabled() ? "host" : "POWER7";
+ }
+ for (i = 0; i < smp_cpus; i++) {
+ cpu = cpu_ppc_init(machine->cpu_model);
+ if (cpu == NULL) {
+ fprintf(stderr, "Unable to find PowerPC CPU definition\n");
+ exit(1);
+ }
+ spapr_cpu_init(spapr, cpu);
+ }
+
+ if (kvm_enabled()) {
+ /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
+ kvmppc_enable_logical_ci_hcalls();
+ kvmppc_enable_set_mode_hcall();
+ }
+
+ /* allocate RAM */
+ memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram",
+ machine->ram_size);
+ memory_region_add_subregion(sysmem, 0, ram);
+
+ if (rma_alloc_size && rma) {
+ rma_region = g_new(MemoryRegion, 1);
+ memory_region_init_ram_ptr(rma_region, NULL, "ppc_spapr.rma",
+ rma_alloc_size, rma);
+ vmstate_register_ram_global(rma_region);
+ memory_region_add_subregion(sysmem, 0, rma_region);
+ }
+
+ /* initialize hotplug memory address space */
+ if (machine->ram_size < machine->maxram_size) {
+ ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size;
+
+ if (machine->ram_slots > SPAPR_MAX_RAM_SLOTS) {
+ error_report("Specified number of memory slots %"PRIu64" exceeds max supported %d\n",
+ machine->ram_slots, SPAPR_MAX_RAM_SLOTS);
+ exit(EXIT_FAILURE);
+ }
+
+ spapr->hotplug_memory.base = ROUND_UP(machine->ram_size,
+ SPAPR_HOTPLUG_MEM_ALIGN);
+ memory_region_init(&spapr->hotplug_memory.mr, OBJECT(spapr),
+ "hotplug-memory", hotplug_mem_size);
+ memory_region_add_subregion(sysmem, spapr->hotplug_memory.base,
+ &spapr->hotplug_memory.mr);
+ }
+
+ if (smc->dr_lmb_enabled) {
+ spapr_create_lmb_dr_connectors(spapr);
+ }
+
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
+ if (!filename) {
+ error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
+ exit(1);
+ }
+ spapr->rtas_size = get_image_size(filename);
+ spapr->rtas_blob = g_malloc(spapr->rtas_size);
+ if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) {
+ error_report("Could not load LPAR rtas '%s'", filename);
+ exit(1);
+ }
+ if (spapr->rtas_size > RTAS_MAX_SIZE) {
+ error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
+ (size_t)spapr->rtas_size, RTAS_MAX_SIZE);
+ exit(1);
+ }
+ g_free(filename);
+
+ /* Set up EPOW events infrastructure */
+ spapr_events_init(spapr);
+
+ /* Set up the RTC RTAS interfaces */
+ spapr_rtc_create(spapr);
+
+ /* Set up VIO bus */
+ spapr->vio_bus = spapr_vio_bus_init();
+
+ for (i = 0; i < MAX_SERIAL_PORTS; i++) {
+ if (serial_hds[i]) {
+ spapr_vty_create(spapr->vio_bus, serial_hds[i]);
+ }
+ }
+
+ /* We always have at least the nvram device on VIO */
+ spapr_create_nvram(spapr);
+
+ /* Set up PCI */
+ spapr_pci_rtas_init();
+
+ phb = spapr_create_phb(spapr, 0);
+
+ for (i = 0; i < nb_nics; i++) {
+ NICInfo *nd = &nd_table[i];
+
+ if (!nd->model) {
+ nd->model = g_strdup("ibmveth");
+ }
+
+ if (strcmp(nd->model, "ibmveth") == 0) {
+ spapr_vlan_create(spapr->vio_bus, nd);
+ } else {
+ pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
+ }
+ }
+
+ for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
+ spapr_vscsi_create(spapr->vio_bus);
+ }
+
+ /* Graphics */
+ if (spapr_vga_init(phb->bus)) {
+ spapr->has_graphics = true;
+ machine->usb |= defaults_enabled() && !machine->usb_disabled;
+ }
+
+ if (machine->usb) {
+ pci_create_simple(phb->bus, -1, "pci-ohci");
+
+ if (spapr->has_graphics) {
+ USBBus *usb_bus = usb_bus_find(-1);
+
+ usb_create_simple(usb_bus, "usb-kbd");
+ usb_create_simple(usb_bus, "usb-mouse");
+ }
+ }
+
+ if (spapr->rma_size < (MIN_RMA_SLOF << 20)) {
+ fprintf(stderr, "qemu: pSeries SLOF firmware requires >= "
+ "%ldM guest RMA (Real Mode Area memory)\n", MIN_RMA_SLOF);
+ exit(1);
+ }
+
+ if (kernel_filename) {
+ uint64_t lowaddr = 0;
+
+ kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL,
+ NULL, &lowaddr, NULL, 1, PPC_ELF_MACHINE, 0);
+ if (kernel_size == ELF_LOAD_WRONG_ENDIAN) {
+ kernel_size = load_elf(kernel_filename,
+ translate_kernel_address, NULL,
+ NULL, &lowaddr, NULL, 0, PPC_ELF_MACHINE, 0);
+ kernel_le = kernel_size > 0;
+ }
+ if (kernel_size < 0) {
+ fprintf(stderr, "qemu: error loading %s: %s\n",
+ kernel_filename, load_elf_strerror(kernel_size));
+ exit(1);
+ }
+
+ /* load initrd */
+ if (initrd_filename) {
+ /* Try to locate the initrd in the gap between the kernel
+ * and the firmware. Add a bit of space just in case
+ */
+ initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff;
+ initrd_size = load_image_targphys(initrd_filename, initrd_base,
+ load_limit - initrd_base);
+ if (initrd_size < 0) {
+ fprintf(stderr, "qemu: could not load initial ram disk '%s'\n",
+ initrd_filename);
+ exit(1);
+ }
+ } else {
+ initrd_base = 0;
+ initrd_size = 0;
+ }
+ }
+
+ if (bios_name == NULL) {
+ bios_name = FW_FILE_NAME;
+ }
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ if (!filename) {
+ error_report("Could not find LPAR firmware '%s'", bios_name);
+ exit(1);
+ }
+ fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
+ if (fw_size <= 0) {
+ error_report("Could not load LPAR firmware '%s'", filename);
+ exit(1);
+ }
+ g_free(filename);
+
+ /* FIXME: Should register things through the MachineState's qdev
+ * interface, this is a legacy from the sPAPREnvironment structure
+ * which predated MachineState but had a similar function */
+ vmstate_register(NULL, 0, &vmstate_spapr, spapr);
+ register_savevm_live(NULL, "spapr/htab", -1, 1,
+ &savevm_htab_handlers, spapr);
+
+ /* Prepare the device tree */
+ spapr->fdt_skel = spapr_create_fdt_skel(initrd_base, initrd_size,
+ kernel_size, kernel_le,
+ kernel_cmdline,
+ spapr->check_exception_irq);
+ assert(spapr->fdt_skel != NULL);
+
+ /* used by RTAS */
+ QTAILQ_INIT(&spapr->ccs_list);
+ qemu_register_reset(spapr_ccs_reset_hook, spapr);
+
+ qemu_register_boot_set(spapr_boot_set, spapr);
+}
+
+static int spapr_kvm_type(const char *vm_type)
+{
+ if (!vm_type) {
+ return 0;
+ }
+
+ if (!strcmp(vm_type, "HV")) {
+ return 1;
+ }
+
+ if (!strcmp(vm_type, "PR")) {
+ return 2;
+ }
+
+ error_report("Unknown kvm-type specified '%s'", vm_type);
+ exit(1);
+}
+
+/*
+ * Implementation of an interface to adjust firmware path
+ * for the bootindex property handling.
+ */
+static char *spapr_get_fw_dev_path(FWPathProvider *p, BusState *bus,
+ DeviceState *dev)
+{
+#define CAST(type, obj, name) \
+ ((type *)object_dynamic_cast(OBJECT(obj), (name)))
+ SCSIDevice *d = CAST(SCSIDevice, dev, TYPE_SCSI_DEVICE);
+ sPAPRPHBState *phb = CAST(sPAPRPHBState, dev, TYPE_SPAPR_PCI_HOST_BRIDGE);
+
+ if (d) {
+ void *spapr = CAST(void, bus->parent, "spapr-vscsi");
+ VirtIOSCSI *virtio = CAST(VirtIOSCSI, bus->parent, TYPE_VIRTIO_SCSI);
+ USBDevice *usb = CAST(USBDevice, bus->parent, TYPE_USB_DEVICE);
+
+ if (spapr) {
+ /*
+ * Replace "channel@0/disk@0,0" with "disk@8000000000000000":
+ * We use SRP luns of the form 8000 | (bus << 8) | (id << 5) | lun
+ * in the top 16 bits of the 64-bit LUN
+ */
+ unsigned id = 0x8000 | (d->id << 8) | d->lun;
+ return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
+ (uint64_t)id << 48);
+ } else if (virtio) {
+ /*
+ * We use SRP luns of the form 01000000 | (target << 8) | lun
+ * in the top 32 bits of the 64-bit LUN
+ * Note: the quote above is from SLOF and it is wrong,
+ * the actual binding is:
+ * swap 0100 or 10 << or 20 << ( target lun-id -- srplun )
+ */
+ unsigned id = 0x1000000 | (d->id << 16) | d->lun;
+ return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
+ (uint64_t)id << 32);
+ } else if (usb) {
+ /*
+ * We use SRP luns of the form 01000000 | (usb-port << 16) | lun
+ * in the top 32 bits of the 64-bit LUN
+ */
+ unsigned usb_port = atoi(usb->port->path);
+ unsigned id = 0x1000000 | (usb_port << 16) | d->lun;
+ return g_strdup_printf("%s@%"PRIX64, qdev_fw_name(dev),
+ (uint64_t)id << 32);
+ }
+ }
+
+ if (phb) {
+ /* Replace "pci" with "pci@800000020000000" */
+ return g_strdup_printf("pci@%"PRIX64, phb->buid);
+ }
+
+ return NULL;
+}
+
+static char *spapr_get_kvm_type(Object *obj, Error **errp)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
+
+ return g_strdup(spapr->kvm_type);
+}
+
+static void spapr_set_kvm_type(Object *obj, const char *value, Error **errp)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(obj);
+
+ g_free(spapr->kvm_type);
+ spapr->kvm_type = g_strdup(value);
+}
+
+static void spapr_machine_initfn(Object *obj)
+{
+ object_property_add_str(obj, "kvm-type",
+ spapr_get_kvm_type, spapr_set_kvm_type, NULL);
+ object_property_set_description(obj, "kvm-type",
+ "Specifies the KVM virtualization mode (HV, PR)",
+ NULL);
+}
+
+static void ppc_cpu_do_nmi_on_cpu(void *arg)
+{
+ CPUState *cs = arg;
+
+ cpu_synchronize_state(cs);
+ ppc_cpu_do_system_reset(cs);
+}
+
+static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
+{
+ CPUState *cs;
+
+ CPU_FOREACH(cs) {
+ async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, cs);
+ }
+}
+
+static void spapr_add_lmbs(DeviceState *dev, uint64_t addr, uint64_t size,
+ uint32_t node, Error **errp)
+{
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
+ int i, fdt_offset, fdt_size;
+ void *fdt;
+
+ /*
+ * Check for DRC connectors and send hotplug notification to the
+ * guest only in case of hotplugged memory. This allows cold plugged
+ * memory to be specified at boot time.
+ */
+ if (!dev->hotplugged) {
+ return;
+ }
+
+ for (i = 0; i < nr_lmbs; i++) {
+ drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_LMB,
+ addr/SPAPR_MEMORY_BLOCK_SIZE);
+ g_assert(drc);
+
+ fdt = create_device_tree(&fdt_size);
+ fdt_offset = spapr_populate_memory_node(fdt, node, addr,
+ SPAPR_MEMORY_BLOCK_SIZE);
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, errp);
+ addr += SPAPR_MEMORY_BLOCK_SIZE;
+ }
+ spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB, nr_lmbs);
+}
+
+static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
+ uint32_t node, Error **errp)
+{
+ Error *local_err = NULL;
+ sPAPRMachineState *ms = SPAPR_MACHINE(hotplug_dev);
+ PCDIMMDevice *dimm = PC_DIMM(dev);
+ PCDIMMDeviceClass *ddc = PC_DIMM_GET_CLASS(dimm);
+ MemoryRegion *mr = ddc->get_memory_region(dimm);
+ uint64_t align = memory_region_get_alignment(mr);
+ uint64_t size = memory_region_size(mr);
+ uint64_t addr;
+
+ if (size % SPAPR_MEMORY_BLOCK_SIZE) {
+ error_setg(&local_err, "Hotplugged memory size must be a multiple of "
+ "%lld MB", SPAPR_MEMORY_BLOCK_SIZE/M_BYTE);
+ goto out;
+ }
+
+ pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err);
+ if (local_err) {
+ goto out;
+ }
+
+ addr = object_property_get_int(OBJECT(dimm), PC_DIMM_ADDR_PROP, &local_err);
+ if (local_err) {
+ pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
+ goto out;
+ }
+
+ spapr_add_lmbs(dev, addr, size, node, &error_abort);
+
+out:
+ error_propagate(errp, local_err);
+}
+
+static void spapr_machine_device_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(qdev_get_machine());
+
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ int node;
+
+ if (!smc->dr_lmb_enabled) {
+ error_setg(errp, "Memory hotplug not supported for this machine");
+ return;
+ }
+ node = object_property_get_int(OBJECT(dev), PC_DIMM_NODE_PROP, errp);
+ if (*errp) {
+ return;
+ }
+
+ /*
+ * Currently PowerPC kernel doesn't allow hot-adding memory to
+ * memory-less node, but instead will silently add the memory
+ * to the first node that has some memory. This causes two
+ * unexpected behaviours for the user.
+ *
+ * - Memory gets hotplugged to a different node than what the user
+ * specified.
+ * - Since pc-dimm subsystem in QEMU still thinks that memory belongs
+ * to memory-less node, a reboot will set things accordingly
+ * and the previously hotplugged memory now ends in the right node.
+ * This appears as if some memory moved from one node to another.
+ *
+ * So until kernel starts supporting memory hotplug to memory-less
+ * nodes, just prevent such attempts upfront in QEMU.
+ */
+ if (nb_numa_nodes && !numa_info[node].node_mem) {
+ error_setg(errp, "Can't hotplug memory to memory-less node %d",
+ node);
+ return;
+ }
+
+ spapr_memory_plug(hotplug_dev, dev, node, errp);
+ }
+}
+
+static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ error_setg(errp, "Memory hot unplug not supported by sPAPR");
+ }
+}
+
+static HotplugHandler *spapr_get_hotpug_handler(MachineState *machine,
+ DeviceState *dev)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ return HOTPLUG_HANDLER(machine);
+ }
+ return NULL;
+}
+
+static unsigned spapr_cpu_index_to_socket_id(unsigned cpu_index)
+{
+ /* Allocate to NUMA nodes on a "socket" basis (not that concept of
+ * socket means much for the paravirtualized PAPR platform) */
+ return cpu_index / smp_threads / smp_cores;
+}
+
+static void spapr_machine_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
+ FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(oc);
+ NMIClass *nc = NMI_CLASS(oc);
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
+
+ mc->init = ppc_spapr_init;
+ mc->reset = ppc_spapr_reset;
+ mc->block_default_type = IF_SCSI;
+ mc->max_cpus = MAX_CPUMASK_BITS;
+ mc->no_parallel = 1;
+ mc->default_boot_order = "";
+ mc->default_ram_size = 512 * M_BYTE;
+ mc->kvm_type = spapr_kvm_type;
+ mc->has_dynamic_sysbus = true;
+ mc->pci_allow_0_address = true;
+ mc->get_hotplug_handler = spapr_get_hotpug_handler;
+ hc->plug = spapr_machine_device_plug;
+ hc->unplug = spapr_machine_device_unplug;
+ mc->cpu_index_to_socket_id = spapr_cpu_index_to_socket_id;
+
+ smc->dr_lmb_enabled = false;
+ fwc->get_dev_path = spapr_get_fw_dev_path;
+ nc->nmi_monitor_handler = spapr_nmi;
+}
+
+static const TypeInfo spapr_machine_info = {
+ .name = TYPE_SPAPR_MACHINE,
+ .parent = TYPE_MACHINE,
+ .abstract = true,
+ .instance_size = sizeof(sPAPRMachineState),
+ .instance_init = spapr_machine_initfn,
+ .class_size = sizeof(sPAPRMachineClass),
+ .class_init = spapr_machine_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_FW_PATH_PROVIDER },
+ { TYPE_NMI },
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ },
+};
+
+#define SPAPR_COMPAT_2_4 \
+ HW_COMPAT_2_4
+
+#define SPAPR_COMPAT_2_3 \
+ SPAPR_COMPAT_2_4 \
+ HW_COMPAT_2_3 \
+ {\
+ .driver = "spapr-pci-host-bridge",\
+ .property = "dynamic-reconfiguration",\
+ .value = "off",\
+ },
+
+#define SPAPR_COMPAT_2_2 \
+ SPAPR_COMPAT_2_3 \
+ HW_COMPAT_2_2 \
+ {\
+ .driver = TYPE_SPAPR_PCI_HOST_BRIDGE,\
+ .property = "mem_win_size",\
+ .value = "0x20000000",\
+ },
+
+#define SPAPR_COMPAT_2_1 \
+ SPAPR_COMPAT_2_2 \
+ HW_COMPAT_2_1
+
+static void spapr_compat_2_3(Object *obj)
+{
+ savevm_skip_section_footers();
+ global_state_set_optional();
+ savevm_skip_configuration();
+}
+
+static void spapr_compat_2_2(Object *obj)
+{
+ spapr_compat_2_3(obj);
+}
+
+static void spapr_compat_2_1(Object *obj)
+{
+ spapr_compat_2_2(obj);
+}
+
+static void spapr_machine_2_3_instance_init(Object *obj)
+{
+ spapr_compat_2_3(obj);
+ spapr_machine_initfn(obj);
+}
+
+static void spapr_machine_2_2_instance_init(Object *obj)
+{
+ spapr_compat_2_2(obj);
+ spapr_machine_initfn(obj);
+}
+
+static void spapr_machine_2_1_instance_init(Object *obj)
+{
+ spapr_compat_2_1(obj);
+ spapr_machine_initfn(obj);
+}
+
+static void spapr_machine_2_1_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ static GlobalProperty compat_props[] = {
+ SPAPR_COMPAT_2_1
+ { /* end of list */ }
+ };
+
+ mc->desc = "pSeries Logical Partition (PAPR compliant) v2.1";
+ mc->compat_props = compat_props;
+}
+
+static const TypeInfo spapr_machine_2_1_info = {
+ .name = MACHINE_TYPE_NAME("pseries-2.1"),
+ .parent = TYPE_SPAPR_MACHINE,
+ .class_init = spapr_machine_2_1_class_init,
+ .instance_init = spapr_machine_2_1_instance_init,
+};
+
+static void spapr_machine_2_2_class_init(ObjectClass *oc, void *data)
+{
+ static GlobalProperty compat_props[] = {
+ SPAPR_COMPAT_2_2
+ { /* end of list */ }
+ };
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "pSeries Logical Partition (PAPR compliant) v2.2";
+ mc->compat_props = compat_props;
+}
+
+static const TypeInfo spapr_machine_2_2_info = {
+ .name = MACHINE_TYPE_NAME("pseries-2.2"),
+ .parent = TYPE_SPAPR_MACHINE,
+ .class_init = spapr_machine_2_2_class_init,
+ .instance_init = spapr_machine_2_2_instance_init,
+};
+
+static void spapr_machine_2_3_class_init(ObjectClass *oc, void *data)
+{
+ static GlobalProperty compat_props[] = {
+ SPAPR_COMPAT_2_3
+ { /* end of list */ }
+ };
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "pSeries Logical Partition (PAPR compliant) v2.3";
+ mc->compat_props = compat_props;
+}
+
+static const TypeInfo spapr_machine_2_3_info = {
+ .name = MACHINE_TYPE_NAME("pseries-2.3"),
+ .parent = TYPE_SPAPR_MACHINE,
+ .class_init = spapr_machine_2_3_class_init,
+ .instance_init = spapr_machine_2_3_instance_init,
+};
+
+static void spapr_machine_2_4_class_init(ObjectClass *oc, void *data)
+{
+ static GlobalProperty compat_props[] = {
+ SPAPR_COMPAT_2_4
+ { /* end of list */ }
+ };
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->desc = "pSeries Logical Partition (PAPR compliant) v2.4";
+ mc->compat_props = compat_props;
+}
+
+static const TypeInfo spapr_machine_2_4_info = {
+ .name = MACHINE_TYPE_NAME("pseries-2.4"),
+ .parent = TYPE_SPAPR_MACHINE,
+ .class_init = spapr_machine_2_4_class_init,
+};
+
+static void spapr_machine_2_5_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(oc);
+
+ mc->name = "pseries-2.5";
+ mc->desc = "pSeries Logical Partition (PAPR compliant) v2.5";
+ mc->alias = "pseries";
+ mc->is_default = 1;
+ smc->dr_lmb_enabled = true;
+}
+
+static const TypeInfo spapr_machine_2_5_info = {
+ .name = MACHINE_TYPE_NAME("pseries-2.5"),
+ .parent = TYPE_SPAPR_MACHINE,
+ .class_init = spapr_machine_2_5_class_init,
+};
+
+static void spapr_machine_register_types(void)
+{
+ type_register_static(&spapr_machine_info);
+ type_register_static(&spapr_machine_2_1_info);
+ type_register_static(&spapr_machine_2_2_info);
+ type_register_static(&spapr_machine_2_3_info);
+ type_register_static(&spapr_machine_2_4_info);
+ type_register_static(&spapr_machine_2_5_info);
+}
+
+type_init(spapr_machine_register_types)
diff --git a/src/hw/ppc/spapr_drc.c b/src/hw/ppc/spapr_drc.c
new file mode 100644
index 0000000..8be62c3
--- /dev/null
+++ b/src/hw/ppc/spapr_drc.c
@@ -0,0 +1,802 @@
+/*
+ * QEMU SPAPR Dynamic Reconfiguration Connector Implementation
+ *
+ * Copyright IBM Corp. 2014
+ *
+ * Authors:
+ * Michael Roth <mdroth@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "hw/ppc/spapr_drc.h"
+#include "qom/object.h"
+#include "hw/qdev.h"
+#include "qapi/visitor.h"
+#include "qemu/error-report.h"
+#include "hw/ppc/spapr.h" /* for RTAS return codes */
+
+/* #define DEBUG_SPAPR_DRC */
+
+#ifdef DEBUG_SPAPR_DRC
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#define DPRINTFN(fmt, ...) \
+ do { DPRINTF(fmt, ## __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#define DPRINTFN(fmt, ...) \
+ do { } while (0)
+#endif
+
+#define DRC_CONTAINER_PATH "/dr-connector"
+#define DRC_INDEX_TYPE_SHIFT 28
+#define DRC_INDEX_ID_MASK ((1ULL << DRC_INDEX_TYPE_SHIFT) - 1)
+
+static sPAPRDRConnectorTypeShift get_type_shift(sPAPRDRConnectorType type)
+{
+ uint32_t shift = 0;
+
+ /* make sure this isn't SPAPR_DR_CONNECTOR_TYPE_ANY, or some
+ * other wonky value.
+ */
+ g_assert(is_power_of_2(type));
+
+ while (type != (1 << shift)) {
+ shift++;
+ }
+ return shift;
+}
+
+static uint32_t get_index(sPAPRDRConnector *drc)
+{
+ /* no set format for a drc index: it only needs to be globally
+ * unique. this is how we encode the DRC type on bare-metal
+ * however, so might as well do that here
+ */
+ return (get_type_shift(drc->type) << DRC_INDEX_TYPE_SHIFT) |
+ (drc->id & DRC_INDEX_ID_MASK);
+}
+
+static uint32_t set_isolation_state(sPAPRDRConnector *drc,
+ sPAPRDRIsolationState state)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ DPRINTFN("drc: %x, set_isolation_state: %x", get_index(drc), state);
+
+ if (state == SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
+ /* cannot unisolate a non-existant resource, and, or resources
+ * which are in an 'UNUSABLE' allocation state. (PAPR 2.7, 13.5.3.5)
+ */
+ if (!drc->dev ||
+ drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
+ return RTAS_OUT_NO_SUCH_INDICATOR;
+ }
+ }
+
+ drc->isolation_state = state;
+
+ if (drc->isolation_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ /* if we're awaiting release, but still in an unconfigured state,
+ * it's likely the guest is still in the process of configuring
+ * the device and is transitioning the devices to an ISOLATED
+ * state as a part of that process. so we only complete the
+ * removal when this transition happens for a device in a
+ * configured state, as suggested by the state diagram from
+ * PAPR+ 2.7, 13.4
+ */
+ if (drc->awaiting_release) {
+ if (drc->configured) {
+ DPRINTFN("finalizing device removal");
+ drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
+ drc->detach_cb_opaque, NULL);
+ } else {
+ DPRINTFN("deferring device removal on unconfigured device\n");
+ }
+ }
+ drc->configured = false;
+ }
+
+ return RTAS_OUT_SUCCESS;
+}
+
+static uint32_t set_indicator_state(sPAPRDRConnector *drc,
+ sPAPRDRIndicatorState state)
+{
+ DPRINTFN("drc: %x, set_indicator_state: %x", get_index(drc), state);
+ drc->indicator_state = state;
+ return RTAS_OUT_SUCCESS;
+}
+
+static uint32_t set_allocation_state(sPAPRDRConnector *drc,
+ sPAPRDRAllocationState state)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ DPRINTFN("drc: %x, set_allocation_state: %x", get_index(drc), state);
+
+ if (state == SPAPR_DR_ALLOCATION_STATE_USABLE) {
+ /* if there's no resource/device associated with the DRC, there's
+ * no way for us to put it in an allocation state consistent with
+ * being 'USABLE'. PAPR 2.7, 13.5.3.4 documents that this should
+ * result in an RTAS return code of -3 / "no such indicator"
+ */
+ if (!drc->dev) {
+ return RTAS_OUT_NO_SUCH_INDICATOR;
+ }
+ }
+
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ drc->allocation_state = state;
+ if (drc->awaiting_release &&
+ drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
+ DPRINTFN("finalizing device removal");
+ drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
+ drc->detach_cb_opaque, NULL);
+ }
+ }
+ return RTAS_OUT_SUCCESS;
+}
+
+static uint32_t get_type(sPAPRDRConnector *drc)
+{
+ return drc->type;
+}
+
+static const char *get_name(sPAPRDRConnector *drc)
+{
+ return drc->name;
+}
+
+static const void *get_fdt(sPAPRDRConnector *drc, int *fdt_start_offset)
+{
+ if (fdt_start_offset) {
+ *fdt_start_offset = drc->fdt_start_offset;
+ }
+ return drc->fdt;
+}
+
+static void set_configured(sPAPRDRConnector *drc)
+{
+ DPRINTFN("drc: %x, set_configured", get_index(drc));
+
+ if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
+ /* guest should be not configuring an isolated device */
+ DPRINTFN("drc: %x, set_configured: skipping isolated device",
+ get_index(drc));
+ return;
+ }
+ drc->configured = true;
+}
+
+/*
+ * dr-entity-sense sensor value
+ * returned via get-sensor-state RTAS calls
+ * as expected by state diagram in PAPR+ 2.7, 13.4
+ * based on the current allocation/indicator/power states
+ * for the DR connector.
+ */
+static uint32_t entity_sense(sPAPRDRConnector *drc, sPAPRDREntitySense *state)
+{
+ if (drc->dev) {
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
+ /* for logical DR, we return a state of UNUSABLE
+ * iff the allocation state UNUSABLE.
+ * Otherwise, report the state as USABLE/PRESENT,
+ * as we would for PCI.
+ */
+ *state = SPAPR_DR_ENTITY_SENSE_UNUSABLE;
+ } else {
+ /* this assumes all PCI devices are assigned to
+ * a 'live insertion' power domain, where QEMU
+ * manages power state automatically as opposed
+ * to the guest. present, non-PCI resources are
+ * unaffected by power state.
+ */
+ *state = SPAPR_DR_ENTITY_SENSE_PRESENT;
+ }
+ } else {
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ /* PCI devices, and only PCI devices, use EMPTY
+ * in cases where we'd otherwise use UNUSABLE
+ */
+ *state = SPAPR_DR_ENTITY_SENSE_EMPTY;
+ } else {
+ *state = SPAPR_DR_ENTITY_SENSE_UNUSABLE;
+ }
+ }
+
+ DPRINTFN("drc: %x, entity_sense: %x", get_index(drc), state);
+ return RTAS_OUT_SUCCESS;
+}
+
+static void prop_get_index(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ uint32_t value = (uint32_t)drck->get_index(drc);
+ visit_type_uint32(v, &value, name, errp);
+}
+
+static void prop_get_type(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ uint32_t value = (uint32_t)drck->get_type(drc);
+ visit_type_uint32(v, &value, name, errp);
+}
+
+static char *prop_get_name(Object *obj, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ return g_strdup(drck->get_name(drc));
+}
+
+static void prop_get_entity_sense(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ uint32_t value;
+
+ drck->entity_sense(drc, &value);
+ visit_type_uint32(v, &value, name, errp);
+}
+
+static void prop_get_fdt(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ Error *err = NULL;
+ int fdt_offset_next, fdt_offset, fdt_depth;
+ void *fdt;
+
+ if (!drc->fdt) {
+ visit_start_struct(v, NULL, NULL, name, 0, &err);
+ if (!err) {
+ visit_end_struct(v, &err);
+ }
+ error_propagate(errp, err);
+ return;
+ }
+
+ fdt = drc->fdt;
+ fdt_offset = drc->fdt_start_offset;
+ fdt_depth = 0;
+
+ do {
+ const char *name = NULL;
+ const struct fdt_property *prop = NULL;
+ int prop_len = 0, name_len = 0;
+ uint32_t tag;
+
+ tag = fdt_next_tag(fdt, fdt_offset, &fdt_offset_next);
+ switch (tag) {
+ case FDT_BEGIN_NODE:
+ fdt_depth++;
+ name = fdt_get_name(fdt, fdt_offset, &name_len);
+ visit_start_struct(v, NULL, NULL, name, 0, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ break;
+ case FDT_END_NODE:
+ /* shouldn't ever see an FDT_END_NODE before FDT_BEGIN_NODE */
+ g_assert(fdt_depth > 0);
+ visit_end_struct(v, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ fdt_depth--;
+ break;
+ case FDT_PROP: {
+ int i;
+ prop = fdt_get_property_by_offset(fdt, fdt_offset, &prop_len);
+ name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+ visit_start_list(v, name, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ for (i = 0; i < prop_len; i++) {
+ visit_type_uint8(v, (uint8_t *)&prop->data[i], NULL, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ }
+ visit_end_list(v, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ break;
+ }
+ default:
+ error_setg(&error_abort, "device FDT in unexpected state: %d", tag);
+ }
+ fdt_offset = fdt_offset_next;
+ } while (fdt_depth != 0);
+}
+
+static void attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
+ int fdt_start_offset, bool coldplug, Error **errp)
+{
+ DPRINTFN("drc: %x, attach", get_index(drc));
+
+ if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ error_setg(errp, "an attached device is still awaiting release");
+ return;
+ }
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ g_assert(drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE);
+ }
+ g_assert(fdt || coldplug);
+
+ /* NOTE: setting initial isolation state to UNISOLATED means we can't
+ * detach unless guest has a userspace/kernel that moves this state
+ * back to ISOLATED in response to an unplug event, or this is done
+ * manually by the admin prior. if we force things while the guest
+ * may be accessing the device, we can easily crash the guest, so we
+ * we defer completion of removal in such cases to the reset() hook.
+ */
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED;
+ }
+ drc->indicator_state = SPAPR_DR_INDICATOR_STATE_ACTIVE;
+
+ drc->dev = d;
+ drc->fdt = fdt;
+ drc->fdt_start_offset = fdt_start_offset;
+ drc->configured = coldplug;
+
+ object_property_add_link(OBJECT(drc), "device",
+ object_get_typename(OBJECT(drc->dev)),
+ (Object **)(&drc->dev),
+ NULL, 0, NULL);
+}
+
+static void detach(sPAPRDRConnector *drc, DeviceState *d,
+ spapr_drc_detach_cb *detach_cb,
+ void *detach_cb_opaque, Error **errp)
+{
+ DPRINTFN("drc: %x, detach", get_index(drc));
+
+ drc->detach_cb = detach_cb;
+ drc->detach_cb_opaque = detach_cb_opaque;
+
+ if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ DPRINTFN("awaiting transition to isolated state before removal");
+ drc->awaiting_release = true;
+ return;
+ }
+
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
+ DPRINTFN("awaiting transition to unusable state before removal");
+ drc->awaiting_release = true;
+ return;
+ }
+
+ drc->indicator_state = SPAPR_DR_INDICATOR_STATE_INACTIVE;
+
+ if (drc->detach_cb) {
+ drc->detach_cb(drc->dev, drc->detach_cb_opaque);
+ }
+
+ drc->awaiting_release = false;
+ g_free(drc->fdt);
+ drc->fdt = NULL;
+ drc->fdt_start_offset = 0;
+ object_property_del(OBJECT(drc), "device", NULL);
+ drc->dev = NULL;
+ drc->detach_cb = NULL;
+ drc->detach_cb_opaque = NULL;
+}
+
+static bool release_pending(sPAPRDRConnector *drc)
+{
+ return drc->awaiting_release;
+}
+
+static void reset(DeviceState *d)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ DPRINTFN("drc reset: %x", drck->get_index(drc));
+ /* immediately upon reset we can safely assume DRCs whose devices
+ * are pending removal can be safely removed, and that they will
+ * subsequently be left in an ISOLATED state. move the DRC to this
+ * state in these cases (which will in turn complete any pending
+ * device removals)
+ */
+ if (drc->awaiting_release) {
+ drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_ISOLATED);
+ /* generally this should also finalize the removal, but if the device
+ * hasn't yet been configured we normally defer removal under the
+ * assumption that this transition is taking place as part of device
+ * configuration. so check if we're still waiting after this, and
+ * force removal if we are
+ */
+ if (drc->awaiting_release) {
+ drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
+ drc->detach_cb_opaque, NULL);
+ }
+
+ /* non-PCI devices may be awaiting a transition to UNUSABLE */
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ drc->awaiting_release) {
+ drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_UNUSABLE);
+ }
+ }
+}
+
+static void realize(DeviceState *d, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ Object *root_container;
+ char link_name[256];
+ gchar *child_name;
+ Error *err = NULL;
+
+ DPRINTFN("drc realize: %x", drck->get_index(drc));
+ /* NOTE: we do this as part of realize/unrealize due to the fact
+ * that the guest will communicate with the DRC via RTAS calls
+ * referencing the global DRC index. By unlinking the DRC
+ * from DRC_CONTAINER_PATH/<drc_index> we effectively make it
+ * inaccessible by the guest, since lookups rely on this path
+ * existing in the composition tree
+ */
+ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+ snprintf(link_name, sizeof(link_name), "%x", drck->get_index(drc));
+ child_name = object_get_canonical_path_component(OBJECT(drc));
+ DPRINTFN("drc child name: %s", child_name);
+ object_property_add_alias(root_container, link_name,
+ drc->owner, child_name, &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ error_free(err);
+ object_unref(OBJECT(drc));
+ }
+ g_free(child_name);
+ DPRINTFN("drc realize complete");
+}
+
+static void unrealize(DeviceState *d, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ Object *root_container;
+ char name[256];
+ Error *err = NULL;
+
+ DPRINTFN("drc unrealize: %x", drck->get_index(drc));
+ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+ snprintf(name, sizeof(name), "%x", drck->get_index(drc));
+ object_property_del(root_container, name, &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ error_free(err);
+ object_unref(OBJECT(drc));
+ }
+}
+
+sPAPRDRConnector *spapr_dr_connector_new(Object *owner,
+ sPAPRDRConnectorType type,
+ uint32_t id)
+{
+ sPAPRDRConnector *drc =
+ SPAPR_DR_CONNECTOR(object_new(TYPE_SPAPR_DR_CONNECTOR));
+ char *prop_name;
+
+ g_assert(type);
+
+ drc->type = type;
+ drc->id = id;
+ drc->owner = owner;
+ prop_name = g_strdup_printf("dr-connector[%"PRIu32"]", get_index(drc));
+ object_property_add_child(owner, prop_name, OBJECT(drc), NULL);
+ object_property_set_bool(OBJECT(drc), true, "realized", NULL);
+ g_free(prop_name);
+
+ /* human-readable name for a DRC to encode into the DT
+ * description. this is mainly only used within a guest in place
+ * of the unique DRC index.
+ *
+ * in the case of VIO/PCI devices, it corresponds to a
+ * "location code" that maps a logical device/function (DRC index)
+ * to a physical (or virtual in the case of VIO) location in the
+ * system by chaining together the "location label" for each
+ * encapsulating component.
+ *
+ * since this is more to do with diagnosing physical hardware
+ * issues than guest compatibility, we choose location codes/DRC
+ * names that adhere to the documented format, but avoid encoding
+ * the entire topology information into the label/code, instead
+ * just using the location codes based on the labels for the
+ * endpoints (VIO/PCI adaptor connectors), which is basically
+ * just "C" followed by an integer ID.
+ *
+ * DRC names as documented by PAPR+ v2.7, 13.5.2.4
+ * location codes as documented by PAPR+ v2.7, 12.3.1.5
+ */
+ switch (drc->type) {
+ case SPAPR_DR_CONNECTOR_TYPE_CPU:
+ drc->name = g_strdup_printf("CPU %d", id);
+ break;
+ case SPAPR_DR_CONNECTOR_TYPE_PHB:
+ drc->name = g_strdup_printf("PHB %d", id);
+ break;
+ case SPAPR_DR_CONNECTOR_TYPE_VIO:
+ case SPAPR_DR_CONNECTOR_TYPE_PCI:
+ drc->name = g_strdup_printf("C%d", id);
+ break;
+ case SPAPR_DR_CONNECTOR_TYPE_LMB:
+ drc->name = g_strdup_printf("LMB %d", id);
+ break;
+ default:
+ g_assert(false);
+ }
+
+ /* PCI slot always start in a USABLE state, and stay there */
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE;
+ }
+
+ return drc;
+}
+
+static void spapr_dr_connector_instance_init(Object *obj)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+
+ object_property_add_uint32_ptr(obj, "isolation-state",
+ &drc->isolation_state, NULL);
+ object_property_add_uint32_ptr(obj, "indicator-state",
+ &drc->indicator_state, NULL);
+ object_property_add_uint32_ptr(obj, "allocation-state",
+ &drc->allocation_state, NULL);
+ object_property_add_uint32_ptr(obj, "id", &drc->id, NULL);
+ object_property_add(obj, "index", "uint32", prop_get_index,
+ NULL, NULL, NULL, NULL);
+ object_property_add(obj, "connector_type", "uint32", prop_get_type,
+ NULL, NULL, NULL, NULL);
+ object_property_add_str(obj, "name", prop_get_name, NULL, NULL);
+ object_property_add(obj, "entity-sense", "uint32", prop_get_entity_sense,
+ NULL, NULL, NULL, NULL);
+ object_property_add(obj, "fdt", "struct", prop_get_fdt,
+ NULL, NULL, NULL, NULL);
+}
+
+static void spapr_dr_connector_class_init(ObjectClass *k, void *data)
+{
+ DeviceClass *dk = DEVICE_CLASS(k);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
+
+ dk->reset = reset;
+ dk->realize = realize;
+ dk->unrealize = unrealize;
+ drck->set_isolation_state = set_isolation_state;
+ drck->set_indicator_state = set_indicator_state;
+ drck->set_allocation_state = set_allocation_state;
+ drck->get_index = get_index;
+ drck->get_type = get_type;
+ drck->get_name = get_name;
+ drck->get_fdt = get_fdt;
+ drck->set_configured = set_configured;
+ drck->entity_sense = entity_sense;
+ drck->attach = attach;
+ drck->detach = detach;
+ drck->release_pending = release_pending;
+ /*
+ * Reason: it crashes FIXME find and document the real reason
+ */
+ dk->cannot_instantiate_with_device_add_yet = true;
+}
+
+static const TypeInfo spapr_dr_connector_info = {
+ .name = TYPE_SPAPR_DR_CONNECTOR,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(sPAPRDRConnector),
+ .instance_init = spapr_dr_connector_instance_init,
+ .class_size = sizeof(sPAPRDRConnectorClass),
+ .class_init = spapr_dr_connector_class_init,
+};
+
+static void spapr_drc_register_types(void)
+{
+ type_register_static(&spapr_dr_connector_info);
+}
+
+type_init(spapr_drc_register_types)
+
+/* helper functions for external users */
+
+sPAPRDRConnector *spapr_dr_connector_by_index(uint32_t index)
+{
+ Object *obj;
+ char name[256];
+
+ snprintf(name, sizeof(name), "%s/%x", DRC_CONTAINER_PATH, index);
+ obj = object_resolve_path(name, NULL);
+
+ return !obj ? NULL : SPAPR_DR_CONNECTOR(obj);
+}
+
+sPAPRDRConnector *spapr_dr_connector_by_id(sPAPRDRConnectorType type,
+ uint32_t id)
+{
+ return spapr_dr_connector_by_index(
+ (get_type_shift(type) << DRC_INDEX_TYPE_SHIFT) |
+ (id & DRC_INDEX_ID_MASK));
+}
+
+/* generate a string the describes the DRC to encode into the
+ * device tree.
+ *
+ * as documented by PAPR+ v2.7, 13.5.2.6 and C.6.1
+ */
+static const char *spapr_drc_get_type_str(sPAPRDRConnectorType type)
+{
+ switch (type) {
+ case SPAPR_DR_CONNECTOR_TYPE_CPU:
+ return "CPU";
+ case SPAPR_DR_CONNECTOR_TYPE_PHB:
+ return "PHB";
+ case SPAPR_DR_CONNECTOR_TYPE_VIO:
+ return "SLOT";
+ case SPAPR_DR_CONNECTOR_TYPE_PCI:
+ return "28";
+ case SPAPR_DR_CONNECTOR_TYPE_LMB:
+ return "MEM";
+ default:
+ g_assert(false);
+ }
+
+ return NULL;
+}
+
+/**
+ * spapr_drc_populate_dt
+ *
+ * @fdt: libfdt device tree
+ * @path: path in the DT to generate properties
+ * @owner: parent Object/DeviceState for which to generate DRC
+ * descriptions for
+ * @drc_type_mask: mask of sPAPRDRConnectorType values corresponding
+ * to the types of DRCs to generate entries for
+ *
+ * generate OF properties to describe DRC topology/indices to guests
+ *
+ * as documented in PAPR+ v2.1, 13.5.2
+ */
+int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
+ uint32_t drc_type_mask)
+{
+ Object *root_container;
+ ObjectProperty *prop;
+ ObjectPropertyIterator *iter;
+ uint32_t drc_count = 0;
+ GArray *drc_indexes, *drc_power_domains;
+ GString *drc_names, *drc_types;
+ int ret;
+
+ /* the first entry of each properties is a 32-bit integer encoding
+ * the number of elements in the array. we won't know this until
+ * we complete the iteration through all the matching DRCs, but
+ * reserve the space now and set the offsets accordingly so we
+ * can fill them in later.
+ */
+ drc_indexes = g_array_new(false, true, sizeof(uint32_t));
+ drc_indexes = g_array_set_size(drc_indexes, 1);
+ drc_power_domains = g_array_new(false, true, sizeof(uint32_t));
+ drc_power_domains = g_array_set_size(drc_power_domains, 1);
+ drc_names = g_string_set_size(g_string_new(NULL), sizeof(uint32_t));
+ drc_types = g_string_set_size(g_string_new(NULL), sizeof(uint32_t));
+
+ /* aliases for all DRConnector objects will be rooted in QOM
+ * composition tree at DRC_CONTAINER_PATH
+ */
+ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+
+ iter = object_property_iter_init(root_container);
+ while ((prop = object_property_iter_next(iter))) {
+ Object *obj;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ uint32_t drc_index, drc_power_domain;
+
+ if (!strstart(prop->type, "link<", NULL)) {
+ continue;
+ }
+
+ obj = object_property_get_link(root_container, prop->name, NULL);
+ drc = SPAPR_DR_CONNECTOR(obj);
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ if (owner && (drc->owner != owner)) {
+ continue;
+ }
+
+ if ((drc->type & drc_type_mask) == 0) {
+ continue;
+ }
+
+ drc_count++;
+
+ /* ibm,drc-indexes */
+ drc_index = cpu_to_be32(drck->get_index(drc));
+ g_array_append_val(drc_indexes, drc_index);
+
+ /* ibm,drc-power-domains */
+ drc_power_domain = cpu_to_be32(-1);
+ g_array_append_val(drc_power_domains, drc_power_domain);
+
+ /* ibm,drc-names */
+ drc_names = g_string_append(drc_names, drck->get_name(drc));
+ drc_names = g_string_insert_len(drc_names, -1, "\0", 1);
+
+ /* ibm,drc-types */
+ drc_types = g_string_append(drc_types,
+ spapr_drc_get_type_str(drc->type));
+ drc_types = g_string_insert_len(drc_types, -1, "\0", 1);
+ }
+ object_property_iter_free(iter);
+
+ /* now write the drc count into the space we reserved at the
+ * beginning of the arrays previously
+ */
+ *(uint32_t *)drc_indexes->data = cpu_to_be32(drc_count);
+ *(uint32_t *)drc_power_domains->data = cpu_to_be32(drc_count);
+ *(uint32_t *)drc_names->str = cpu_to_be32(drc_count);
+ *(uint32_t *)drc_types->str = cpu_to_be32(drc_count);
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-indexes",
+ drc_indexes->data,
+ drc_indexes->len * sizeof(uint32_t));
+ if (ret) {
+ fprintf(stderr, "Couldn't create ibm,drc-indexes property\n");
+ goto out;
+ }
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-power-domains",
+ drc_power_domains->data,
+ drc_power_domains->len * sizeof(uint32_t));
+ if (ret) {
+ fprintf(stderr, "Couldn't finalize ibm,drc-power-domains property\n");
+ goto out;
+ }
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-names",
+ drc_names->str, drc_names->len);
+ if (ret) {
+ fprintf(stderr, "Couldn't finalize ibm,drc-names property\n");
+ goto out;
+ }
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-types",
+ drc_types->str, drc_types->len);
+ if (ret) {
+ fprintf(stderr, "Couldn't finalize ibm,drc-types property\n");
+ goto out;
+ }
+
+out:
+ g_array_free(drc_indexes, true);
+ g_array_free(drc_power_domains, true);
+ g_string_free(drc_names, true);
+ g_string_free(drc_types, true);
+
+ return ret;
+}
diff --git a/src/hw/ppc/spapr_events.c b/src/hw/ppc/spapr_events.c
new file mode 100644
index 0000000..744ea62
--- /dev/null
+++ b/src/hw/ppc/spapr_events.c
@@ -0,0 +1,596 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
+ *
+ * RTAS events handling
+ *
+ * Copyright (c) 2012 David Gibson, IBM Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+#include "cpu.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/char.h"
+#include "hw/qdev.h"
+#include "sysemu/device_tree.h"
+
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_vio.h"
+#include "hw/pci/pci.h"
+#include "hw/pci-host/spapr.h"
+#include "hw/ppc/spapr_drc.h"
+
+#include <libfdt.h>
+
+struct rtas_error_log {
+ uint32_t summary;
+#define RTAS_LOG_VERSION_MASK 0xff000000
+#define RTAS_LOG_VERSION_6 0x06000000
+#define RTAS_LOG_SEVERITY_MASK 0x00e00000
+#define RTAS_LOG_SEVERITY_ALREADY_REPORTED 0x00c00000
+#define RTAS_LOG_SEVERITY_FATAL 0x00a00000
+#define RTAS_LOG_SEVERITY_ERROR 0x00800000
+#define RTAS_LOG_SEVERITY_ERROR_SYNC 0x00600000
+#define RTAS_LOG_SEVERITY_WARNING 0x00400000
+#define RTAS_LOG_SEVERITY_EVENT 0x00200000
+#define RTAS_LOG_SEVERITY_NO_ERROR 0x00000000
+#define RTAS_LOG_DISPOSITION_MASK 0x00180000
+#define RTAS_LOG_DISPOSITION_FULLY_RECOVERED 0x00000000
+#define RTAS_LOG_DISPOSITION_LIMITED_RECOVERY 0x00080000
+#define RTAS_LOG_DISPOSITION_NOT_RECOVERED 0x00100000
+#define RTAS_LOG_OPTIONAL_PART_PRESENT 0x00040000
+#define RTAS_LOG_INITIATOR_MASK 0x0000f000
+#define RTAS_LOG_INITIATOR_UNKNOWN 0x00000000
+#define RTAS_LOG_INITIATOR_CPU 0x00001000
+#define RTAS_LOG_INITIATOR_PCI 0x00002000
+#define RTAS_LOG_INITIATOR_MEMORY 0x00004000
+#define RTAS_LOG_INITIATOR_HOTPLUG 0x00006000
+#define RTAS_LOG_TARGET_MASK 0x00000f00
+#define RTAS_LOG_TARGET_UNKNOWN 0x00000000
+#define RTAS_LOG_TARGET_CPU 0x00000100
+#define RTAS_LOG_TARGET_PCI 0x00000200
+#define RTAS_LOG_TARGET_MEMORY 0x00000400
+#define RTAS_LOG_TARGET_HOTPLUG 0x00000600
+#define RTAS_LOG_TYPE_MASK 0x000000ff
+#define RTAS_LOG_TYPE_OTHER 0x00000000
+#define RTAS_LOG_TYPE_RETRY 0x00000001
+#define RTAS_LOG_TYPE_TCE_ERR 0x00000002
+#define RTAS_LOG_TYPE_INTERN_DEV_FAIL 0x00000003
+#define RTAS_LOG_TYPE_TIMEOUT 0x00000004
+#define RTAS_LOG_TYPE_DATA_PARITY 0x00000005
+#define RTAS_LOG_TYPE_ADDR_PARITY 0x00000006
+#define RTAS_LOG_TYPE_CACHE_PARITY 0x00000007
+#define RTAS_LOG_TYPE_ADDR_INVALID 0x00000008
+#define RTAS_LOG_TYPE_ECC_UNCORR 0x00000009
+#define RTAS_LOG_TYPE_ECC_CORR 0x0000000a
+#define RTAS_LOG_TYPE_EPOW 0x00000040
+#define RTAS_LOG_TYPE_HOTPLUG 0x000000e5
+ uint32_t extended_length;
+} QEMU_PACKED;
+
+struct rtas_event_log_v6 {
+ uint8_t b0;
+#define RTAS_LOG_V6_B0_VALID 0x80
+#define RTAS_LOG_V6_B0_UNRECOVERABLE_ERROR 0x40
+#define RTAS_LOG_V6_B0_RECOVERABLE_ERROR 0x20
+#define RTAS_LOG_V6_B0_DEGRADED_OPERATION 0x10
+#define RTAS_LOG_V6_B0_PREDICTIVE_ERROR 0x08
+#define RTAS_LOG_V6_B0_NEW_LOG 0x04
+#define RTAS_LOG_V6_B0_BIGENDIAN 0x02
+ uint8_t _resv1;
+ uint8_t b2;
+#define RTAS_LOG_V6_B2_POWERPC_FORMAT 0x80
+#define RTAS_LOG_V6_B2_LOG_FORMAT_MASK 0x0f
+#define RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT 0x0e
+ uint8_t _resv2[9];
+ uint32_t company;
+#define RTAS_LOG_V6_COMPANY_IBM 0x49424d00 /* IBM<null> */
+} QEMU_PACKED;
+
+struct rtas_event_log_v6_section_header {
+ uint16_t section_id;
+ uint16_t section_length;
+ uint8_t section_version;
+ uint8_t section_subtype;
+ uint16_t creator_component_id;
+} QEMU_PACKED;
+
+struct rtas_event_log_v6_maina {
+#define RTAS_LOG_V6_SECTION_ID_MAINA 0x5048 /* PH */
+ struct rtas_event_log_v6_section_header hdr;
+ uint32_t creation_date; /* BCD: YYYYMMDD */
+ uint32_t creation_time; /* BCD: HHMMSS00 */
+ uint8_t _platform1[8];
+ char creator_id;
+ uint8_t _resv1[2];
+ uint8_t section_count;
+ uint8_t _resv2[4];
+ uint8_t _platform2[8];
+ uint32_t plid;
+ uint8_t _platform3[4];
+} QEMU_PACKED;
+
+struct rtas_event_log_v6_mainb {
+#define RTAS_LOG_V6_SECTION_ID_MAINB 0x5548 /* UH */
+ struct rtas_event_log_v6_section_header hdr;
+ uint8_t subsystem_id;
+ uint8_t _platform1;
+ uint8_t event_severity;
+ uint8_t event_subtype;
+ uint8_t _platform2[4];
+ uint8_t _resv1[2];
+ uint16_t action_flags;
+ uint8_t _resv2[4];
+} QEMU_PACKED;
+
+struct rtas_event_log_v6_epow {
+#define RTAS_LOG_V6_SECTION_ID_EPOW 0x4550 /* EP */
+ struct rtas_event_log_v6_section_header hdr;
+ uint8_t sensor_value;
+#define RTAS_LOG_V6_EPOW_ACTION_RESET 0
+#define RTAS_LOG_V6_EPOW_ACTION_WARN_COOLING 1
+#define RTAS_LOG_V6_EPOW_ACTION_WARN_POWER 2
+#define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN 3
+#define RTAS_LOG_V6_EPOW_ACTION_SYSTEM_HALT 4
+#define RTAS_LOG_V6_EPOW_ACTION_MAIN_ENCLOSURE 5
+#define RTAS_LOG_V6_EPOW_ACTION_POWER_OFF 7
+ uint8_t event_modifier;
+#define RTAS_LOG_V6_EPOW_MODIFIER_NORMAL 1
+#define RTAS_LOG_V6_EPOW_MODIFIER_ON_UPS 2
+#define RTAS_LOG_V6_EPOW_MODIFIER_CRITICAL 3
+#define RTAS_LOG_V6_EPOW_MODIFIER_TEMPERATURE 4
+ uint8_t extended_modifier;
+#define RTAS_LOG_V6_EPOW_XMODIFIER_SYSTEM_WIDE 0
+#define RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC 1
+ uint8_t _resv;
+ uint64_t reason_code;
+} QEMU_PACKED;
+
+struct epow_log_full {
+ struct rtas_error_log hdr;
+ struct rtas_event_log_v6 v6hdr;
+ struct rtas_event_log_v6_maina maina;
+ struct rtas_event_log_v6_mainb mainb;
+ struct rtas_event_log_v6_epow epow;
+} QEMU_PACKED;
+
+struct rtas_event_log_v6_hp {
+#define RTAS_LOG_V6_SECTION_ID_HOTPLUG 0x4850 /* HP */
+ struct rtas_event_log_v6_section_header hdr;
+ uint8_t hotplug_type;
+#define RTAS_LOG_V6_HP_TYPE_CPU 1
+#define RTAS_LOG_V6_HP_TYPE_MEMORY 2
+#define RTAS_LOG_V6_HP_TYPE_SLOT 3
+#define RTAS_LOG_V6_HP_TYPE_PHB 4
+#define RTAS_LOG_V6_HP_TYPE_PCI 5
+ uint8_t hotplug_action;
+#define RTAS_LOG_V6_HP_ACTION_ADD 1
+#define RTAS_LOG_V6_HP_ACTION_REMOVE 2
+ uint8_t hotplug_identifier;
+#define RTAS_LOG_V6_HP_ID_DRC_NAME 1
+#define RTAS_LOG_V6_HP_ID_DRC_INDEX 2
+#define RTAS_LOG_V6_HP_ID_DRC_COUNT 3
+ uint8_t reserved;
+ union {
+ uint32_t index;
+ uint32_t count;
+ char name[1];
+ } drc;
+} QEMU_PACKED;
+
+struct hp_log_full {
+ struct rtas_error_log hdr;
+ struct rtas_event_log_v6 v6hdr;
+ struct rtas_event_log_v6_maina maina;
+ struct rtas_event_log_v6_mainb mainb;
+ struct rtas_event_log_v6_hp hp;
+} QEMU_PACKED;
+
+#define EVENT_MASK_INTERNAL_ERRORS 0x80000000
+#define EVENT_MASK_EPOW 0x40000000
+#define EVENT_MASK_HOTPLUG 0x10000000
+#define EVENT_MASK_IO 0x08000000
+
+#define _FDT(exp) \
+ do { \
+ int ret = (exp); \
+ if (ret < 0) { \
+ fprintf(stderr, "qemu: error creating device tree: %s: %s\n", \
+ #exp, fdt_strerror(ret)); \
+ exit(1); \
+ } \
+ } while (0)
+
+void spapr_events_fdt_skel(void *fdt, uint32_t check_exception_irq)
+{
+ uint32_t irq_ranges[] = {cpu_to_be32(check_exception_irq), cpu_to_be32(1)};
+ uint32_t interrupts[] = {cpu_to_be32(check_exception_irq), 0};
+
+ _FDT((fdt_begin_node(fdt, "event-sources")));
+
+ _FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
+ _FDT((fdt_property_cell(fdt, "#interrupt-cells", 2)));
+ _FDT((fdt_property(fdt, "interrupt-ranges",
+ irq_ranges, sizeof(irq_ranges))));
+
+ _FDT((fdt_begin_node(fdt, "epow-events")));
+ _FDT((fdt_property(fdt, "interrupts", interrupts, sizeof(interrupts))));
+ _FDT((fdt_end_node(fdt)));
+
+ _FDT((fdt_end_node(fdt)));
+}
+
+static void rtas_event_log_queue(int log_type, void *data, bool exception)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ sPAPREventLogEntry *entry = g_new(sPAPREventLogEntry, 1);
+
+ g_assert(data);
+ entry->log_type = log_type;
+ entry->exception = exception;
+ entry->data = data;
+ QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next);
+}
+
+static sPAPREventLogEntry *rtas_event_log_dequeue(uint32_t event_mask,
+ bool exception)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ sPAPREventLogEntry *entry = NULL;
+
+ /* we only queue EPOW events atm. */
+ if ((event_mask & EVENT_MASK_EPOW) == 0) {
+ return NULL;
+ }
+
+ QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
+ if (entry->exception != exception) {
+ continue;
+ }
+
+ /* EPOW and hotplug events are surfaced in the same manner */
+ if (entry->log_type == RTAS_LOG_TYPE_EPOW ||
+ entry->log_type == RTAS_LOG_TYPE_HOTPLUG) {
+ break;
+ }
+ }
+
+ if (entry) {
+ QTAILQ_REMOVE(&spapr->pending_events, entry, next);
+ }
+
+ return entry;
+}
+
+static bool rtas_event_log_contains(uint32_t event_mask, bool exception)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ sPAPREventLogEntry *entry = NULL;
+
+ /* we only queue EPOW events atm. */
+ if ((event_mask & EVENT_MASK_EPOW) == 0) {
+ return false;
+ }
+
+ QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
+ if (entry->exception != exception) {
+ continue;
+ }
+
+ /* EPOW and hotplug events are surfaced in the same manner */
+ if (entry->log_type == RTAS_LOG_TYPE_EPOW ||
+ entry->log_type == RTAS_LOG_TYPE_HOTPLUG) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static uint32_t next_plid;
+
+static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr)
+{
+ v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG
+ | RTAS_LOG_V6_B0_BIGENDIAN;
+ v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT
+ | RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT;
+ v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM);
+}
+
+static void spapr_init_maina(struct rtas_event_log_v6_maina *maina,
+ int section_count)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ struct tm tm;
+ int year;
+
+ maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA);
+ maina->hdr.section_length = cpu_to_be16(sizeof(*maina));
+ /* FIXME: section version, subtype and creator id? */
+ spapr_rtc_read(spapr->rtc, &tm, NULL);
+ year = tm.tm_year + 1900;
+ maina->creation_date = cpu_to_be32((to_bcd(year / 100) << 24)
+ | (to_bcd(year % 100) << 16)
+ | (to_bcd(tm.tm_mon + 1) << 8)
+ | to_bcd(tm.tm_mday));
+ maina->creation_time = cpu_to_be32((to_bcd(tm.tm_hour) << 24)
+ | (to_bcd(tm.tm_min) << 16)
+ | (to_bcd(tm.tm_sec) << 8));
+ maina->creator_id = 'H'; /* Hypervisor */
+ maina->section_count = section_count;
+ maina->plid = next_plid++;
+}
+
+static void spapr_powerdown_req(Notifier *n, void *opaque)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ struct rtas_error_log *hdr;
+ struct rtas_event_log_v6 *v6hdr;
+ struct rtas_event_log_v6_maina *maina;
+ struct rtas_event_log_v6_mainb *mainb;
+ struct rtas_event_log_v6_epow *epow;
+ struct epow_log_full *new_epow;
+
+ new_epow = g_malloc0(sizeof(*new_epow));
+ hdr = &new_epow->hdr;
+ v6hdr = &new_epow->v6hdr;
+ maina = &new_epow->maina;
+ mainb = &new_epow->mainb;
+ epow = &new_epow->epow;
+
+ hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
+ | RTAS_LOG_SEVERITY_EVENT
+ | RTAS_LOG_DISPOSITION_NOT_RECOVERED
+ | RTAS_LOG_OPTIONAL_PART_PRESENT
+ | RTAS_LOG_TYPE_EPOW);
+ hdr->extended_length = cpu_to_be32(sizeof(*new_epow)
+ - sizeof(new_epow->hdr));
+
+ spapr_init_v6hdr(v6hdr);
+ spapr_init_maina(maina, 3 /* Main-A, Main-B and EPOW */);
+
+ mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
+ mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
+ /* FIXME: section version, subtype and creator id? */
+ mainb->subsystem_id = 0xa0; /* External environment */
+ mainb->event_severity = 0x00; /* Informational / non-error */
+ mainb->event_subtype = 0xd0; /* Normal shutdown */
+
+ epow->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_EPOW);
+ epow->hdr.section_length = cpu_to_be16(sizeof(*epow));
+ epow->hdr.section_version = 2; /* includes extended modifier */
+ /* FIXME: section subtype and creator id? */
+ epow->sensor_value = RTAS_LOG_V6_EPOW_ACTION_SYSTEM_SHUTDOWN;
+ epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL;
+ epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC;
+
+ rtas_event_log_queue(RTAS_LOG_TYPE_EPOW, new_epow, true);
+
+ qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+}
+
+static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action,
+ sPAPRDRConnectorType drc_type,
+ uint32_t drc)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ struct hp_log_full *new_hp;
+ struct rtas_error_log *hdr;
+ struct rtas_event_log_v6 *v6hdr;
+ struct rtas_event_log_v6_maina *maina;
+ struct rtas_event_log_v6_mainb *mainb;
+ struct rtas_event_log_v6_hp *hp;
+
+ new_hp = g_malloc0(sizeof(struct hp_log_full));
+ hdr = &new_hp->hdr;
+ v6hdr = &new_hp->v6hdr;
+ maina = &new_hp->maina;
+ mainb = &new_hp->mainb;
+ hp = &new_hp->hp;
+
+ hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
+ | RTAS_LOG_SEVERITY_EVENT
+ | RTAS_LOG_DISPOSITION_NOT_RECOVERED
+ | RTAS_LOG_OPTIONAL_PART_PRESENT
+ | RTAS_LOG_INITIATOR_HOTPLUG
+ | RTAS_LOG_TYPE_HOTPLUG);
+ hdr->extended_length = cpu_to_be32(sizeof(*new_hp)
+ - sizeof(new_hp->hdr));
+
+ spapr_init_v6hdr(v6hdr);
+ spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */);
+
+ mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
+ mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
+ mainb->subsystem_id = 0x80; /* External environment */
+ mainb->event_severity = 0x00; /* Informational / non-error */
+ mainb->event_subtype = 0x00; /* Normal shutdown */
+
+ hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG);
+ hp->hdr.section_length = cpu_to_be16(sizeof(*hp));
+ hp->hdr.section_version = 1; /* includes extended modifier */
+ hp->hotplug_action = hp_action;
+ hp->hotplug_identifier = hp_id;
+
+ switch (drc_type) {
+ case SPAPR_DR_CONNECTOR_TYPE_PCI:
+ hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
+ break;
+ case SPAPR_DR_CONNECTOR_TYPE_LMB:
+ hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_MEMORY;
+ break;
+ default:
+ /* we shouldn't be signaling hotplug events for resources
+ * that don't support them
+ */
+ g_assert(false);
+ return;
+ }
+
+ if (hp_id == RTAS_LOG_V6_HP_ID_DRC_COUNT) {
+ hp->drc.count = cpu_to_be32(drc);
+ } else if (hp_id == RTAS_LOG_V6_HP_ID_DRC_INDEX) {
+ hp->drc.index = cpu_to_be32(drc);
+ }
+
+ rtas_event_log_queue(RTAS_LOG_TYPE_HOTPLUG, new_hp, true);
+
+ qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+}
+
+void spapr_hotplug_req_add_by_index(sPAPRDRConnector *drc)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ sPAPRDRConnectorType drc_type = drck->get_type(drc);
+ uint32 index = drck->get_index(drc);
+
+ spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
+ RTAS_LOG_V6_HP_ACTION_ADD, drc_type, index);
+}
+
+void spapr_hotplug_req_remove_by_index(sPAPRDRConnector *drc)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ sPAPRDRConnectorType drc_type = drck->get_type(drc);
+ uint32 index = drck->get_index(drc);
+
+ spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_INDEX,
+ RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, index);
+}
+
+void spapr_hotplug_req_add_by_count(sPAPRDRConnectorType drc_type,
+ uint32_t count)
+{
+ spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
+ RTAS_LOG_V6_HP_ACTION_ADD, drc_type, count);
+}
+
+void spapr_hotplug_req_remove_by_count(sPAPRDRConnectorType drc_type,
+ uint32_t count)
+{
+ spapr_hotplug_req_event(RTAS_LOG_V6_HP_ID_DRC_COUNT,
+ RTAS_LOG_V6_HP_ACTION_REMOVE, drc_type, count);
+}
+
+static void check_exception(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ uint32_t mask, buf, len, event_len;
+ uint64_t xinfo;
+ sPAPREventLogEntry *event;
+ struct rtas_error_log *hdr;
+
+ if ((nargs < 6) || (nargs > 7) || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ xinfo = rtas_ld(args, 1);
+ mask = rtas_ld(args, 2);
+ buf = rtas_ld(args, 4);
+ len = rtas_ld(args, 5);
+ if (nargs == 7) {
+ xinfo |= (uint64_t)rtas_ld(args, 6) << 32;
+ }
+
+ event = rtas_event_log_dequeue(mask, true);
+ if (!event) {
+ goto out_no_events;
+ }
+
+ hdr = event->data;
+ event_len = be32_to_cpu(hdr->extended_length) + sizeof(*hdr);
+
+ if (event_len < len) {
+ len = event_len;
+ }
+
+ cpu_physical_memory_write(buf, event->data, len);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ g_free(event->data);
+ g_free(event);
+
+ /* according to PAPR+, the IRQ must be left asserted, or re-asserted, if
+ * there are still pending events to be fetched via check-exception. We
+ * do the latter here, since our code relies on edge-triggered
+ * interrupts.
+ */
+ if (rtas_event_log_contains(mask, true)) {
+ qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+ }
+
+ return;
+
+out_no_events:
+ rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
+}
+
+static void event_scan(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ uint32_t mask, buf, len, event_len;
+ sPAPREventLogEntry *event;
+ struct rtas_error_log *hdr;
+
+ if (nargs != 4 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ mask = rtas_ld(args, 0);
+ buf = rtas_ld(args, 2);
+ len = rtas_ld(args, 3);
+
+ event = rtas_event_log_dequeue(mask, false);
+ if (!event) {
+ goto out_no_events;
+ }
+
+ hdr = event->data;
+ event_len = be32_to_cpu(hdr->extended_length) + sizeof(*hdr);
+
+ if (event_len < len) {
+ len = event_len;
+ }
+
+ cpu_physical_memory_write(buf, event->data, len);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ g_free(event->data);
+ g_free(event);
+ return;
+
+out_no_events:
+ rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
+}
+
+void spapr_events_init(sPAPRMachineState *spapr)
+{
+ QTAILQ_INIT(&spapr->pending_events);
+ spapr->check_exception_irq = xics_alloc(spapr->icp, 0, 0, false);
+ spapr->epow_notifier.notify = spapr_powerdown_req;
+ qemu_register_powerdown_notifier(&spapr->epow_notifier);
+ spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception",
+ check_exception);
+ spapr_rtas_register(RTAS_EVENT_SCAN, "event-scan", event_scan);
+}
diff --git a/src/hw/ppc/spapr_hcall.c b/src/hw/ppc/spapr_hcall.c
new file mode 100644
index 0000000..cebceea
--- /dev/null
+++ b/src/hw/ppc/spapr_hcall.c
@@ -0,0 +1,1058 @@
+#include "sysemu/sysemu.h"
+#include "cpu.h"
+#include "helper_regs.h"
+#include "hw/ppc/spapr.h"
+#include "mmu-hash64.h"
+#include "cpu-models.h"
+#include "trace.h"
+#include "kvm_ppc.h"
+
+struct SPRSyncState {
+ CPUState *cs;
+ int spr;
+ target_ulong value;
+ target_ulong mask;
+};
+
+static void do_spr_sync(void *arg)
+{
+ struct SPRSyncState *s = arg;
+ PowerPCCPU *cpu = POWERPC_CPU(s->cs);
+ CPUPPCState *env = &cpu->env;
+
+ cpu_synchronize_state(s->cs);
+ env->spr[s->spr] &= ~s->mask;
+ env->spr[s->spr] |= s->value;
+}
+
+static void set_spr(CPUState *cs, int spr, target_ulong value,
+ target_ulong mask)
+{
+ struct SPRSyncState s = {
+ .cs = cs,
+ .spr = spr,
+ .value = value,
+ .mask = mask
+ };
+ run_on_cpu(cs, do_spr_sync, &s);
+}
+
+static target_ulong compute_tlbie_rb(target_ulong v, target_ulong r,
+ target_ulong pte_index)
+{
+ target_ulong rb, va_low;
+
+ rb = (v & ~0x7fULL) << 16; /* AVA field */
+ va_low = pte_index >> 3;
+ if (v & HPTE64_V_SECONDARY) {
+ va_low = ~va_low;
+ }
+ /* xor vsid from AVA */
+ if (!(v & HPTE64_V_1TB_SEG)) {
+ va_low ^= v >> 12;
+ } else {
+ va_low ^= v >> 24;
+ }
+ va_low &= 0x7ff;
+ if (v & HPTE64_V_LARGE) {
+ rb |= 1; /* L field */
+#if 0 /* Disable that P7 specific bit for now */
+ if (r & 0xff000) {
+ /* non-16MB large page, must be 64k */
+ /* (masks depend on page size) */
+ rb |= 0x1000; /* page encoding in LP field */
+ rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+ rb |= (va_low & 0xfe); /* AVAL field */
+ }
+#endif
+ } else {
+ /* 4kB page */
+ rb |= (va_low & 0x7ff) << 12; /* remaining 11b of AVA */
+ }
+ rb |= (v >> 54) & 0x300; /* B field */
+ return rb;
+}
+
+static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
+{
+ /*
+ * hash value/pteg group index is normalized by htab_mask
+ */
+ if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
+ return false;
+ }
+ return true;
+}
+
+static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ MachineState *machine = MACHINE(spapr);
+ CPUPPCState *env = &cpu->env;
+ target_ulong flags = args[0];
+ target_ulong pte_index = args[1];
+ target_ulong pteh = args[2];
+ target_ulong ptel = args[3];
+ target_ulong page_shift = 12;
+ target_ulong raddr;
+ target_ulong index;
+ uint64_t token;
+
+ /* only handle 4k and 16M pages for now */
+ if (pteh & HPTE64_V_LARGE) {
+#if 0 /* We don't support 64k pages yet */
+ if ((ptel & 0xf000) == 0x1000) {
+ /* 64k page */
+ } else
+#endif
+ if ((ptel & 0xff000) == 0) {
+ /* 16M page */
+ page_shift = 24;
+ /* lowest AVA bit must be 0 for 16M pages */
+ if (pteh & 0x80) {
+ return H_PARAMETER;
+ }
+ } else {
+ return H_PARAMETER;
+ }
+ }
+
+ raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << page_shift) - 1);
+
+ if (raddr < machine->ram_size) {
+ /* Regular RAM - should have WIMG=0010 */
+ if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
+ return H_PARAMETER;
+ }
+ } else {
+ /* Looks like an IO address */
+ /* FIXME: What WIMG combinations could be sensible for IO?
+ * For now we allow WIMG=010x, but are there others? */
+ /* FIXME: Should we check against registered IO addresses? */
+ if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) {
+ return H_PARAMETER;
+ }
+ }
+
+ pteh &= ~0x60ULL;
+
+ if (!valid_pte_index(env, pte_index)) {
+ return H_PARAMETER;
+ }
+
+ index = 0;
+ if (likely((flags & H_EXACT) == 0)) {
+ pte_index &= ~7ULL;
+ token = ppc_hash64_start_access(cpu, pte_index);
+ for (; index < 8; index++) {
+ if ((ppc_hash64_load_hpte0(env, token, index) & HPTE64_V_VALID) == 0) {
+ break;
+ }
+ }
+ ppc_hash64_stop_access(token);
+ if (index == 8) {
+ return H_PTEG_FULL;
+ }
+ } else {
+ token = ppc_hash64_start_access(cpu, pte_index);
+ if (ppc_hash64_load_hpte0(env, token, 0) & HPTE64_V_VALID) {
+ ppc_hash64_stop_access(token);
+ return H_PTEG_FULL;
+ }
+ ppc_hash64_stop_access(token);
+ }
+
+ ppc_hash64_store_hpte(env, pte_index + index,
+ pteh | HPTE64_V_HPTE_DIRTY, ptel);
+
+ args[0] = pte_index + index;
+ return H_SUCCESS;
+}
+
+typedef enum {
+ REMOVE_SUCCESS = 0,
+ REMOVE_NOT_FOUND = 1,
+ REMOVE_PARM = 2,
+ REMOVE_HW = 3,
+} RemoveResult;
+
+static RemoveResult remove_hpte(CPUPPCState *env, target_ulong ptex,
+ target_ulong avpn,
+ target_ulong flags,
+ target_ulong *vp, target_ulong *rp)
+{
+ uint64_t token;
+ target_ulong v, r, rb;
+
+ if (!valid_pte_index(env, ptex)) {
+ return REMOVE_PARM;
+ }
+
+ token = ppc_hash64_start_access(ppc_env_get_cpu(env), ptex);
+ v = ppc_hash64_load_hpte0(env, token, 0);
+ r = ppc_hash64_load_hpte1(env, token, 0);
+ ppc_hash64_stop_access(token);
+
+ if ((v & HPTE64_V_VALID) == 0 ||
+ ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
+ ((flags & H_ANDCOND) && (v & avpn) != 0)) {
+ return REMOVE_NOT_FOUND;
+ }
+ *vp = v;
+ *rp = r;
+ ppc_hash64_store_hpte(env, ptex, HPTE64_V_HPTE_DIRTY, 0);
+ rb = compute_tlbie_rb(v, r, ptex);
+ ppc_tlb_invalidate_one(env, rb);
+ return REMOVE_SUCCESS;
+}
+
+static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong flags = args[0];
+ target_ulong pte_index = args[1];
+ target_ulong avpn = args[2];
+ RemoveResult ret;
+
+ ret = remove_hpte(env, pte_index, avpn, flags,
+ &args[0], &args[1]);
+
+ switch (ret) {
+ case REMOVE_SUCCESS:
+ return H_SUCCESS;
+
+ case REMOVE_NOT_FOUND:
+ return H_NOT_FOUND;
+
+ case REMOVE_PARM:
+ return H_PARAMETER;
+
+ case REMOVE_HW:
+ return H_HARDWARE;
+ }
+
+ g_assert_not_reached();
+}
+
+#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
+#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
+#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
+#define H_BULK_REMOVE_END 0xc000000000000000ULL
+#define H_BULK_REMOVE_CODE 0x3000000000000000ULL
+#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
+#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
+#define H_BULK_REMOVE_PARM 0x2000000000000000ULL
+#define H_BULK_REMOVE_HW 0x3000000000000000ULL
+#define H_BULK_REMOVE_RC 0x0c00000000000000ULL
+#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
+#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
+#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
+#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
+#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
+
+#define H_BULK_REMOVE_MAX_BATCH 4
+
+static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
+ target_ulong *tsh = &args[i*2];
+ target_ulong tsl = args[i*2 + 1];
+ target_ulong v, r, ret;
+
+ if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
+ break;
+ } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
+ return H_PARAMETER;
+ }
+
+ *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
+ *tsh |= H_BULK_REMOVE_RESPONSE;
+
+ if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
+ *tsh |= H_BULK_REMOVE_PARM;
+ return H_PARAMETER;
+ }
+
+ ret = remove_hpte(env, *tsh & H_BULK_REMOVE_PTEX, tsl,
+ (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
+ &v, &r);
+
+ *tsh |= ret << 60;
+
+ switch (ret) {
+ case REMOVE_SUCCESS:
+ *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
+ break;
+
+ case REMOVE_PARM:
+ return H_PARAMETER;
+
+ case REMOVE_HW:
+ return H_HARDWARE;
+ }
+ }
+
+ return H_SUCCESS;
+}
+
+static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong flags = args[0];
+ target_ulong pte_index = args[1];
+ target_ulong avpn = args[2];
+ uint64_t token;
+ target_ulong v, r, rb;
+
+ if (!valid_pte_index(env, pte_index)) {
+ return H_PARAMETER;
+ }
+
+ token = ppc_hash64_start_access(cpu, pte_index);
+ v = ppc_hash64_load_hpte0(env, token, 0);
+ r = ppc_hash64_load_hpte1(env, token, 0);
+ ppc_hash64_stop_access(token);
+
+ if ((v & HPTE64_V_VALID) == 0 ||
+ ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
+ return H_NOT_FOUND;
+ }
+
+ r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
+ HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
+ r |= (flags << 55) & HPTE64_R_PP0;
+ r |= (flags << 48) & HPTE64_R_KEY_HI;
+ r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
+ rb = compute_tlbie_rb(v, r, pte_index);
+ ppc_hash64_store_hpte(env, pte_index,
+ (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
+ ppc_tlb_invalidate_one(env, rb);
+ /* Don't need a memory barrier, due to qemu's global lock */
+ ppc_hash64_store_hpte(env, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
+ return H_SUCCESS;
+}
+
+static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong flags = args[0];
+ target_ulong pte_index = args[1];
+ uint8_t *hpte;
+ int i, ridx, n_entries = 1;
+
+ if (!valid_pte_index(env, pte_index)) {
+ return H_PARAMETER;
+ }
+
+ if (flags & H_READ_4) {
+ /* Clear the two low order bits */
+ pte_index &= ~(3ULL);
+ n_entries = 4;
+ }
+
+ hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
+
+ for (i = 0, ridx = 0; i < n_entries; i++) {
+ args[ridx++] = ldq_p(hpte);
+ args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
+ hpte += HASH_PTE_SIZE_64;
+ }
+
+ return H_SUCCESS;
+}
+
+static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ /* FIXME: actually implement this */
+ return H_HARDWARE;
+}
+
+#define FLAGS_REGISTER_VPA 0x0000200000000000ULL
+#define FLAGS_REGISTER_DTL 0x0000400000000000ULL
+#define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL
+#define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL
+#define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL
+#define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
+
+#define VPA_MIN_SIZE 640
+#define VPA_SIZE_OFFSET 0x4
+#define VPA_SHARED_PROC_OFFSET 0x9
+#define VPA_SHARED_PROC_VAL 0x2
+
+static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ uint16_t size;
+ uint8_t tmp;
+
+ if (vpa == 0) {
+ hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
+ return H_HARDWARE;
+ }
+
+ if (vpa % env->dcache_line_size) {
+ return H_PARAMETER;
+ }
+ /* FIXME: bounds check the address */
+
+ size = lduw_be_phys(cs->as, vpa + 0x4);
+
+ if (size < VPA_MIN_SIZE) {
+ return H_PARAMETER;
+ }
+
+ /* VPA is not allowed to cross a page boundary */
+ if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
+ return H_PARAMETER;
+ }
+
+ env->vpa_addr = vpa;
+
+ tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
+ tmp |= VPA_SHARED_PROC_VAL;
+ stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
+
+ return H_SUCCESS;
+}
+
+static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
+{
+ if (env->slb_shadow_addr) {
+ return H_RESOURCE;
+ }
+
+ if (env->dtl_addr) {
+ return H_RESOURCE;
+ }
+
+ env->vpa_addr = 0;
+ return H_SUCCESS;
+}
+
+static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ uint32_t size;
+
+ if (addr == 0) {
+ hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
+ return H_HARDWARE;
+ }
+
+ size = ldl_be_phys(cs->as, addr + 0x4);
+ if (size < 0x8) {
+ return H_PARAMETER;
+ }
+
+ if ((addr / 4096) != ((addr + size - 1) / 4096)) {
+ return H_PARAMETER;
+ }
+
+ if (!env->vpa_addr) {
+ return H_RESOURCE;
+ }
+
+ env->slb_shadow_addr = addr;
+ env->slb_shadow_size = size;
+
+ return H_SUCCESS;
+}
+
+static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
+{
+ env->slb_shadow_addr = 0;
+ env->slb_shadow_size = 0;
+ return H_SUCCESS;
+}
+
+static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ uint32_t size;
+
+ if (addr == 0) {
+ hcall_dprintf("Can't cope with DTL at logical 0\n");
+ return H_HARDWARE;
+ }
+
+ size = ldl_be_phys(cs->as, addr + 0x4);
+
+ if (size < 48) {
+ return H_PARAMETER;
+ }
+
+ if (!env->vpa_addr) {
+ return H_RESOURCE;
+ }
+
+ env->dtl_addr = addr;
+ env->dtl_size = size;
+
+ return H_SUCCESS;
+}
+
+static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
+{
+ env->dtl_addr = 0;
+ env->dtl_size = 0;
+
+ return H_SUCCESS;
+}
+
+static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong flags = args[0];
+ target_ulong procno = args[1];
+ target_ulong vpa = args[2];
+ target_ulong ret = H_PARAMETER;
+ CPUPPCState *tenv;
+ PowerPCCPU *tcpu;
+
+ tcpu = ppc_get_vcpu_by_dt_id(procno);
+ if (!tcpu) {
+ return H_PARAMETER;
+ }
+ tenv = &tcpu->env;
+
+ switch (flags) {
+ case FLAGS_REGISTER_VPA:
+ ret = register_vpa(tenv, vpa);
+ break;
+
+ case FLAGS_DEREGISTER_VPA:
+ ret = deregister_vpa(tenv, vpa);
+ break;
+
+ case FLAGS_REGISTER_SLBSHADOW:
+ ret = register_slb_shadow(tenv, vpa);
+ break;
+
+ case FLAGS_DEREGISTER_SLBSHADOW:
+ ret = deregister_slb_shadow(tenv, vpa);
+ break;
+
+ case FLAGS_REGISTER_DTL:
+ ret = register_dtl(tenv, vpa);
+ break;
+
+ case FLAGS_DEREGISTER_DTL:
+ ret = deregister_dtl(tenv, vpa);
+ break;
+ }
+
+ return ret;
+}
+
+static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+
+ env->msr |= (1ULL << MSR_EE);
+ hreg_compute_hflags(env);
+ if (!cpu_has_work(cs)) {
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ cs->exit_request = 1;
+ }
+ return H_SUCCESS;
+}
+
+static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong rtas_r3 = args[0];
+ uint32_t token = rtas_ld(rtas_r3, 0);
+ uint32_t nargs = rtas_ld(rtas_r3, 1);
+ uint32_t nret = rtas_ld(rtas_r3, 2);
+
+ return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
+ nret, rtas_r3 + 12 + 4*nargs);
+}
+
+static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUState *cs = CPU(cpu);
+ target_ulong size = args[0];
+ target_ulong addr = args[1];
+
+ switch (size) {
+ case 1:
+ args[0] = ldub_phys(cs->as, addr);
+ return H_SUCCESS;
+ case 2:
+ args[0] = lduw_phys(cs->as, addr);
+ return H_SUCCESS;
+ case 4:
+ args[0] = ldl_phys(cs->as, addr);
+ return H_SUCCESS;
+ case 8:
+ args[0] = ldq_phys(cs->as, addr);
+ return H_SUCCESS;
+ }
+ return H_PARAMETER;
+}
+
+static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUState *cs = CPU(cpu);
+
+ target_ulong size = args[0];
+ target_ulong addr = args[1];
+ target_ulong val = args[2];
+
+ switch (size) {
+ case 1:
+ stb_phys(cs->as, addr, val);
+ return H_SUCCESS;
+ case 2:
+ stw_phys(cs->as, addr, val);
+ return H_SUCCESS;
+ case 4:
+ stl_phys(cs->as, addr, val);
+ return H_SUCCESS;
+ case 8:
+ stq_phys(cs->as, addr, val);
+ return H_SUCCESS;
+ }
+ return H_PARAMETER;
+}
+
+static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ CPUState *cs = CPU(cpu);
+
+ target_ulong dst = args[0]; /* Destination address */
+ target_ulong src = args[1]; /* Source address */
+ target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
+ target_ulong count = args[3]; /* Element count */
+ target_ulong op = args[4]; /* 0 = copy, 1 = invert */
+ uint64_t tmp;
+ unsigned int mask = (1 << esize) - 1;
+ int step = 1 << esize;
+
+ if (count > 0x80000000) {
+ return H_PARAMETER;
+ }
+
+ if ((dst & mask) || (src & mask) || (op > 1)) {
+ return H_PARAMETER;
+ }
+
+ if (dst >= src && dst < (src + (count << esize))) {
+ dst = dst + ((count - 1) << esize);
+ src = src + ((count - 1) << esize);
+ step = -step;
+ }
+
+ while (count--) {
+ switch (esize) {
+ case 0:
+ tmp = ldub_phys(cs->as, src);
+ break;
+ case 1:
+ tmp = lduw_phys(cs->as, src);
+ break;
+ case 2:
+ tmp = ldl_phys(cs->as, src);
+ break;
+ case 3:
+ tmp = ldq_phys(cs->as, src);
+ break;
+ default:
+ return H_PARAMETER;
+ }
+ if (op == 1) {
+ tmp = ~tmp;
+ }
+ switch (esize) {
+ case 0:
+ stb_phys(cs->as, dst, tmp);
+ break;
+ case 1:
+ stw_phys(cs->as, dst, tmp);
+ break;
+ case 2:
+ stl_phys(cs->as, dst, tmp);
+ break;
+ case 3:
+ stq_phys(cs->as, dst, tmp);
+ break;
+ }
+ dst = dst + step;
+ src = src + step;
+ }
+
+ return H_SUCCESS;
+}
+
+static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ /* Nothing to do on emulation, KVM will trap this in the kernel */
+ return H_SUCCESS;
+}
+
+static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ /* Nothing to do on emulation, KVM will trap this in the kernel */
+ return H_SUCCESS;
+}
+
+static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
+ target_ulong mflags,
+ target_ulong value1,
+ target_ulong value2)
+{
+ CPUState *cs;
+
+ if (value1) {
+ return H_P3;
+ }
+ if (value2) {
+ return H_P4;
+ }
+
+ switch (mflags) {
+ case H_SET_MODE_ENDIAN_BIG:
+ CPU_FOREACH(cs) {
+ set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
+ }
+ spapr_pci_switch_vga(true);
+ return H_SUCCESS;
+
+ case H_SET_MODE_ENDIAN_LITTLE:
+ CPU_FOREACH(cs) {
+ set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
+ }
+ spapr_pci_switch_vga(false);
+ return H_SUCCESS;
+ }
+
+ return H_UNSUPPORTED_FLAG;
+}
+
+static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
+ target_ulong mflags,
+ target_ulong value1,
+ target_ulong value2)
+{
+ CPUState *cs;
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ target_ulong prefix;
+
+ if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
+ return H_P2;
+ }
+ if (value1) {
+ return H_P3;
+ }
+ if (value2) {
+ return H_P4;
+ }
+
+ switch (mflags) {
+ case H_SET_MODE_ADDR_TRANS_NONE:
+ prefix = 0;
+ break;
+ case H_SET_MODE_ADDR_TRANS_0001_8000:
+ prefix = 0x18000;
+ break;
+ case H_SET_MODE_ADDR_TRANS_C000_0000_0000_4000:
+ prefix = 0xC000000000004000ULL;
+ break;
+ default:
+ return H_UNSUPPORTED_FLAG;
+ }
+
+ CPU_FOREACH(cs) {
+ CPUPPCState *env = &POWERPC_CPU(cpu)->env;
+
+ set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL);
+ env->excp_prefix = prefix;
+ }
+
+ return H_SUCCESS;
+}
+
+static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong resource = args[1];
+ target_ulong ret = H_P2;
+
+ switch (resource) {
+ case H_SET_MODE_RESOURCE_LE:
+ ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
+ break;
+ case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
+ ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
+ args[2], args[3]);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Return the offset to the requested option vector @vector in the
+ * option vector table @table.
+ */
+static target_ulong cas_get_option_vector(int vector, target_ulong table)
+{
+ int i;
+ char nr_vectors, nr_entries;
+
+ if (!table) {
+ return 0;
+ }
+
+ nr_vectors = (ldl_phys(&address_space_memory, table) >> 24) + 1;
+ if (!vector || vector > nr_vectors) {
+ return 0;
+ }
+ table++; /* skip nr option vectors */
+
+ for (i = 0; i < vector - 1; i++) {
+ nr_entries = ldl_phys(&address_space_memory, table) >> 24;
+ table += nr_entries + 2;
+ }
+ return table;
+}
+
+typedef struct {
+ PowerPCCPU *cpu;
+ uint32_t cpu_version;
+ int ret;
+} SetCompatState;
+
+static void do_set_compat(void *arg)
+{
+ SetCompatState *s = arg;
+
+ cpu_synchronize_state(CPU(s->cpu));
+ s->ret = ppc_set_compat(s->cpu, s->cpu_version);
+}
+
+#define get_compat_level(cpuver) ( \
+ ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
+ ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
+ ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
+ ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
+
+#define OV5_DRCONF_MEMORY 0x20
+
+static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
+ sPAPRMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ target_ulong list = args[0], ov_table;
+ PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_);
+ CPUState *cs;
+ bool cpu_match = false, cpu_update = true, memory_update = false;
+ unsigned old_cpu_version = cpu_->cpu_version;
+ unsigned compat_lvl = 0, cpu_version = 0;
+ unsigned max_lvl = get_compat_level(cpu_->max_compat);
+ int counter;
+ char ov5_byte2;
+
+ /* Parse PVR list */
+ for (counter = 0; counter < 512; ++counter) {
+ uint32_t pvr, pvr_mask;
+
+ pvr_mask = rtas_ld(list, 0);
+ list += 4;
+ pvr = rtas_ld(list, 0);
+ list += 4;
+
+ trace_spapr_cas_pvr_try(pvr);
+ if (!max_lvl &&
+ ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) {
+ cpu_match = true;
+ cpu_version = 0;
+ } else if (pvr == cpu_->cpu_version) {
+ cpu_match = true;
+ cpu_version = cpu_->cpu_version;
+ } else if (!cpu_match) {
+ /* If it is a logical PVR, try to determine the highest level */
+ unsigned lvl = get_compat_level(pvr);
+ if (lvl) {
+ bool is205 = (pcc_->pcr_mask & PCR_COMPAT_2_05) &&
+ (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05));
+ bool is206 = (pcc_->pcr_mask & PCR_COMPAT_2_06) &&
+ ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) ||
+ (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS)));
+
+ if (is205 || is206) {
+ if (!max_lvl) {
+ /* User did not set the level, choose the highest */
+ if (compat_lvl <= lvl) {
+ compat_lvl = lvl;
+ cpu_version = pvr;
+ }
+ } else if (max_lvl >= lvl) {
+ /* User chose the level, don't set higher than this */
+ compat_lvl = lvl;
+ cpu_version = pvr;
+ }
+ }
+ }
+ }
+ /* Terminator record */
+ if (~pvr_mask & pvr) {
+ break;
+ }
+ }
+
+ /* Parsing finished */
+ trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match,
+ cpu_version, pcc_->pcr_mask);
+
+ /* Update CPUs */
+ if (old_cpu_version != cpu_version) {
+ CPU_FOREACH(cs) {
+ SetCompatState s = {
+ .cpu = POWERPC_CPU(cs),
+ .cpu_version = cpu_version,
+ .ret = 0
+ };
+
+ run_on_cpu(cs, do_set_compat, &s);
+
+ if (s.ret < 0) {
+ fprintf(stderr, "Unable to set compatibility mode\n");
+ return H_HARDWARE;
+ }
+ }
+ }
+
+ if (!cpu_version) {
+ cpu_update = false;
+ }
+
+ /* For the future use: here @ov_table points to the first option vector */
+ ov_table = list;
+
+ list = cas_get_option_vector(5, ov_table);
+ if (!list) {
+ return H_SUCCESS;
+ }
+
+ /* @list now points to OV 5 */
+ list += 2;
+ ov5_byte2 = rtas_ld(list, 0) >> 24;
+ if (ov5_byte2 & OV5_DRCONF_MEMORY) {
+ memory_update = true;
+ }
+
+ if (spapr_h_cas_compose_response(spapr, args[1], args[2],
+ cpu_update, memory_update)) {
+ qemu_system_reset_request();
+ }
+
+ return H_SUCCESS;
+}
+
+static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
+static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
+
+void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
+{
+ spapr_hcall_fn *slot;
+
+ if (opcode <= MAX_HCALL_OPCODE) {
+ assert((opcode & 0x3) == 0);
+
+ slot = &papr_hypercall_table[opcode / 4];
+ } else {
+ assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
+
+ slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
+ }
+
+ assert(!(*slot));
+ *slot = fn;
+}
+
+target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
+ target_ulong *args)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+
+ if ((opcode <= MAX_HCALL_OPCODE)
+ && ((opcode & 0x3) == 0)) {
+ spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
+
+ if (fn) {
+ return fn(cpu, spapr, opcode, args);
+ }
+ } else if ((opcode >= KVMPPC_HCALL_BASE) &&
+ (opcode <= KVMPPC_HCALL_MAX)) {
+ spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
+
+ if (fn) {
+ return fn(cpu, spapr, opcode, args);
+ }
+ }
+
+ qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
+ opcode);
+ return H_FUNCTION;
+}
+
+static void hypercall_register_types(void)
+{
+ /* hcall-pft */
+ spapr_register_hypercall(H_ENTER, h_enter);
+ spapr_register_hypercall(H_REMOVE, h_remove);
+ spapr_register_hypercall(H_PROTECT, h_protect);
+ spapr_register_hypercall(H_READ, h_read);
+
+ /* hcall-bulk */
+ spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
+
+ /* hcall-dabr */
+ spapr_register_hypercall(H_SET_DABR, h_set_dabr);
+
+ /* hcall-splpar */
+ spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
+ spapr_register_hypercall(H_CEDE, h_cede);
+
+ /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
+ * here between the "CI" and the "CACHE" variants, they will use whatever
+ * mapping attributes qemu is using. When using KVM, the kernel will
+ * enforce the attributes more strongly
+ */
+ spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
+ spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
+ spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
+ spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
+ spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
+ spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
+ spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
+
+ /* qemu/KVM-PPC specific hcalls */
+ spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
+
+ spapr_register_hypercall(H_SET_MODE, h_set_mode);
+
+ /* ibm,client-architecture-support support */
+ spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
+}
+
+type_init(hypercall_register_types)
diff --git a/src/hw/ppc/spapr_iommu.c b/src/hw/ppc/spapr_iommu.c
new file mode 100644
index 0000000..ed28565
--- /dev/null
+++ b/src/hw/ppc/spapr_iommu.c
@@ -0,0 +1,511 @@
+/*
+ * QEMU sPAPR IOMMU (TCE) code
+ *
+ * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "hw/hw.h"
+#include "sysemu/kvm.h"
+#include "hw/qdev.h"
+#include "kvm_ppc.h"
+#include "sysemu/dma.h"
+#include "exec/address-spaces.h"
+#include "trace.h"
+
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_vio.h"
+
+#include <libfdt.h>
+
+enum sPAPRTCEAccess {
+ SPAPR_TCE_FAULT = 0,
+ SPAPR_TCE_RO = 1,
+ SPAPR_TCE_WO = 2,
+ SPAPR_TCE_RW = 3,
+};
+
+#define IOMMU_PAGE_SIZE(shift) (1ULL << (shift))
+#define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1))
+
+static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
+
+sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn)
+{
+ sPAPRTCETable *tcet;
+
+ if (liobn & 0xFFFFFFFF00000000ULL) {
+ hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n",
+ liobn);
+ return NULL;
+ }
+
+ QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
+ if (tcet->liobn == (uint32_t)liobn) {
+ return tcet;
+ }
+ }
+
+ return NULL;
+}
+
+static IOMMUAccessFlags spapr_tce_iommu_access_flags(uint64_t tce)
+{
+ switch (tce & SPAPR_TCE_RW) {
+ case SPAPR_TCE_FAULT:
+ return IOMMU_NONE;
+ case SPAPR_TCE_RO:
+ return IOMMU_RO;
+ case SPAPR_TCE_WO:
+ return IOMMU_WO;
+ default: /* SPAPR_TCE_RW */
+ return IOMMU_RW;
+ }
+}
+
+/* Called from RCU critical section */
+static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr,
+ bool is_write)
+{
+ sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
+ uint64_t tce;
+ IOMMUTLBEntry ret = {
+ .target_as = &address_space_memory,
+ .iova = 0,
+ .translated_addr = 0,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE,
+ };
+
+ if ((addr >> tcet->page_shift) < tcet->nb_table) {
+ /* Check if we are in bound */
+ hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
+
+ tce = tcet->table[addr >> tcet->page_shift];
+ ret.iova = addr & page_mask;
+ ret.translated_addr = tce & page_mask;
+ ret.addr_mask = ~page_mask;
+ ret.perm = spapr_tce_iommu_access_flags(tce);
+ }
+ trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm,
+ ret.addr_mask);
+
+ return ret;
+}
+
+static int spapr_tce_table_post_load(void *opaque, int version_id)
+{
+ sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
+
+ if (tcet->vdev) {
+ spapr_vio_set_bypass(tcet->vdev, tcet->bypass);
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_spapr_tce_table = {
+ .name = "spapr_iommu",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .post_load = spapr_tce_table_post_load,
+ .fields = (VMStateField []) {
+ /* Sanity check */
+ VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable),
+ VMSTATE_UINT32_EQUAL(nb_table, sPAPRTCETable),
+
+ /* IOMMU state */
+ VMSTATE_BOOL(bypass, sPAPRTCETable),
+ VMSTATE_VARRAY_UINT32(table, sPAPRTCETable, nb_table, 0, vmstate_info_uint64, uint64_t),
+
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static MemoryRegionIOMMUOps spapr_iommu_ops = {
+ .translate = spapr_tce_translate_iommu,
+};
+
+static int spapr_tce_table_realize(DeviceState *dev)
+{
+ sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
+ uint64_t window_size = (uint64_t)tcet->nb_table << tcet->page_shift;
+
+ if (kvm_enabled() && !(window_size >> 32)) {
+ tcet->table = kvmppc_create_spapr_tce(tcet->liobn,
+ window_size,
+ &tcet->fd,
+ tcet->need_vfio);
+ }
+
+ if (!tcet->table) {
+ size_t table_size = tcet->nb_table * sizeof(uint64_t);
+ tcet->table = g_malloc0(table_size);
+ }
+
+ trace_spapr_iommu_new_table(tcet->liobn, tcet, tcet->table, tcet->fd);
+
+ memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops,
+ "iommu-spapr",
+ (uint64_t)tcet->nb_table << tcet->page_shift);
+
+ QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list);
+
+ vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table,
+ tcet);
+
+ return 0;
+}
+
+void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool need_vfio)
+{
+ size_t table_size = tcet->nb_table * sizeof(uint64_t);
+ void *newtable;
+
+ if (need_vfio == tcet->need_vfio) {
+ /* Nothing to do */
+ return;
+ }
+
+ if (!need_vfio) {
+ /* FIXME: We don't support transition back to KVM accelerated
+ * TCEs yet */
+ return;
+ }
+
+ tcet->need_vfio = true;
+
+ if (tcet->fd < 0) {
+ /* Table is already in userspace, nothing to be do */
+ return;
+ }
+
+ newtable = g_malloc(table_size);
+ memcpy(newtable, tcet->table, table_size);
+
+ kvmppc_remove_spapr_tce(tcet->table, tcet->fd, tcet->nb_table);
+
+ tcet->fd = -1;
+ tcet->table = newtable;
+}
+
+sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn,
+ uint64_t bus_offset,
+ uint32_t page_shift,
+ uint32_t nb_table,
+ bool need_vfio)
+{
+ sPAPRTCETable *tcet;
+ char tmp[64];
+
+ if (spapr_tce_find_by_liobn(liobn)) {
+ fprintf(stderr, "Attempted to create TCE table with duplicate"
+ " LIOBN 0x%x\n", liobn);
+ return NULL;
+ }
+
+ if (!nb_table) {
+ return NULL;
+ }
+
+ tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE));
+ tcet->liobn = liobn;
+ tcet->bus_offset = bus_offset;
+ tcet->page_shift = page_shift;
+ tcet->nb_table = nb_table;
+ tcet->need_vfio = need_vfio;
+
+ snprintf(tmp, sizeof(tmp), "tce-table-%x", liobn);
+ object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL);
+
+ object_property_set_bool(OBJECT(tcet), true, "realized", NULL);
+
+ return tcet;
+}
+
+static void spapr_tce_table_unrealize(DeviceState *dev, Error **errp)
+{
+ sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
+
+ QLIST_REMOVE(tcet, list);
+
+ if (!kvm_enabled() ||
+ (kvmppc_remove_spapr_tce(tcet->table, tcet->fd,
+ tcet->nb_table) != 0)) {
+ g_free(tcet->table);
+ }
+}
+
+MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet)
+{
+ return &tcet->iommu;
+}
+
+static void spapr_tce_reset(DeviceState *dev)
+{
+ sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
+ size_t table_size = tcet->nb_table * sizeof(uint64_t);
+
+ memset(tcet->table, 0, table_size);
+}
+
+static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
+ target_ulong tce)
+{
+ IOMMUTLBEntry entry;
+ hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
+ unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
+
+ if (index >= tcet->nb_table) {
+ hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x"
+ TARGET_FMT_lx "\n", ioba);
+ return H_PARAMETER;
+ }
+
+ tcet->table[index] = tce;
+
+ entry.target_as = &address_space_memory,
+ entry.iova = ioba & page_mask;
+ entry.translated_addr = tce & page_mask;
+ entry.addr_mask = ~page_mask;
+ entry.perm = spapr_tce_iommu_access_flags(tce);
+ memory_region_notify_iommu(&tcet->iommu, entry);
+
+ return H_SUCCESS;
+}
+
+static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ int i;
+ target_ulong liobn = args[0];
+ target_ulong ioba = args[1];
+ target_ulong ioba1 = ioba;
+ target_ulong tce_list = args[2];
+ target_ulong npages = args[3];
+ target_ulong ret = H_PARAMETER, tce = 0;
+ sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
+ CPUState *cs = CPU(cpu);
+ hwaddr page_mask, page_size;
+
+ if (!tcet) {
+ return H_PARAMETER;
+ }
+
+ if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) {
+ return H_PARAMETER;
+ }
+
+ page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
+ page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
+ ioba &= page_mask;
+
+ for (i = 0; i < npages; ++i, ioba += page_size) {
+ tce = ldq_be_phys(cs->as, tce_list + i * sizeof(target_ulong));
+
+ ret = put_tce_emu(tcet, ioba, tce);
+ if (ret) {
+ break;
+ }
+ }
+
+ /* Trace last successful or the first problematic entry */
+ i = i ? (i - 1) : 0;
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret);
+ } else {
+ trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret);
+ }
+ return ret;
+}
+
+static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ int i;
+ target_ulong liobn = args[0];
+ target_ulong ioba = args[1];
+ target_ulong tce_value = args[2];
+ target_ulong npages = args[3];
+ target_ulong ret = H_PARAMETER;
+ sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
+ hwaddr page_mask, page_size;
+
+ if (!tcet) {
+ return H_PARAMETER;
+ }
+
+ if (npages > tcet->nb_table) {
+ return H_PARAMETER;
+ }
+
+ page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
+ page_size = IOMMU_PAGE_SIZE(tcet->page_shift);
+ ioba &= page_mask;
+
+ for (i = 0; i < npages; ++i, ioba += page_size) {
+ ret = put_tce_emu(tcet, ioba, tce_value);
+ if (ret) {
+ break;
+ }
+ }
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret);
+ } else {
+ trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
+ }
+
+ return ret;
+}
+
+static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong liobn = args[0];
+ target_ulong ioba = args[1];
+ target_ulong tce = args[2];
+ target_ulong ret = H_PARAMETER;
+ sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
+
+ if (tcet) {
+ hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
+
+ ioba &= page_mask;
+
+ ret = put_tce_emu(tcet, ioba, tce);
+ }
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_put(liobn, ioba, tce, ret);
+ } else {
+ trace_spapr_iommu_put(liobn, ioba, tce, ret);
+ }
+
+ return ret;
+}
+
+static target_ulong get_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
+ target_ulong *tce)
+{
+ unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift;
+
+ if (index >= tcet->nb_table) {
+ hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x"
+ TARGET_FMT_lx "\n", ioba);
+ return H_PARAMETER;
+ }
+
+ *tce = tcet->table[index];
+
+ return H_SUCCESS;
+}
+
+static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong liobn = args[0];
+ target_ulong ioba = args[1];
+ target_ulong tce = 0;
+ target_ulong ret = H_PARAMETER;
+ sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
+
+ if (tcet) {
+ hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift);
+
+ ioba &= page_mask;
+
+ ret = get_tce_emu(tcet, ioba, &tce);
+ if (!ret) {
+ args[0] = tce;
+ }
+ }
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_get(liobn, ioba, ret, tce);
+ } else {
+ trace_spapr_iommu_get(liobn, ioba, ret, tce);
+ }
+
+ return ret;
+}
+
+int spapr_dma_dt(void *fdt, int node_off, const char *propname,
+ uint32_t liobn, uint64_t window, uint32_t size)
+{
+ uint32_t dma_prop[5];
+ int ret;
+
+ dma_prop[0] = cpu_to_be32(liobn);
+ dma_prop[1] = cpu_to_be32(window >> 32);
+ dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF);
+ dma_prop[3] = 0; /* window size is 32 bits */
+ dma_prop[4] = cpu_to_be32(size);
+
+ ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop));
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,
+ sPAPRTCETable *tcet)
+{
+ if (!tcet) {
+ return 0;
+ }
+
+ return spapr_dma_dt(fdt, node_off, propname,
+ tcet->liobn, 0, tcet->nb_table << tcet->page_shift);
+}
+
+static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->init = spapr_tce_table_realize;
+ dc->reset = spapr_tce_reset;
+ dc->unrealize = spapr_tce_table_unrealize;
+
+ QLIST_INIT(&spapr_tce_tables);
+
+ /* hcall-tce */
+ spapr_register_hypercall(H_PUT_TCE, h_put_tce);
+ spapr_register_hypercall(H_GET_TCE, h_get_tce);
+ spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect);
+ spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce);
+}
+
+static TypeInfo spapr_tce_table_info = {
+ .name = TYPE_SPAPR_TCE_TABLE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(sPAPRTCETable),
+ .class_init = spapr_tce_table_class_init,
+};
+
+static void register_types(void)
+{
+ type_register_static(&spapr_tce_table_info);
+}
+
+type_init(register_types);
diff --git a/src/hw/ppc/spapr_pci.c b/src/hw/ppc/spapr_pci.c
new file mode 100644
index 0000000..55fa8db
--- /dev/null
+++ b/src/hw/ppc/spapr_pci.c
@@ -0,0 +1,1843 @@
+/*
+ * QEMU sPAPR PCI host originated from Uninorth PCI host
+ *
+ * Copyright (c) 2011 Alexey Kardashevskiy, IBM Corporation.
+ * Copyright (C) 2011 David Gibson, IBM Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "hw/hw.h"
+#include "hw/sysbus.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
+#include "hw/pci/msix.h"
+#include "hw/pci/pci_host.h"
+#include "hw/ppc/spapr.h"
+#include "hw/pci-host/spapr.h"
+#include "exec/address-spaces.h"
+#include <libfdt.h>
+#include "trace.h"
+#include "qemu/error-report.h"
+#include "qapi/qmp/qerror.h"
+
+#include "hw/pci/pci_bridge.h"
+#include "hw/pci/pci_bus.h"
+#include "hw/ppc/spapr_drc.h"
+#include "sysemu/device_tree.h"
+
+/* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
+#define RTAS_QUERY_FN 0
+#define RTAS_CHANGE_FN 1
+#define RTAS_RESET_FN 2
+#define RTAS_CHANGE_MSI_FN 3
+#define RTAS_CHANGE_MSIX_FN 4
+
+/* Interrupt types to return on RTAS_CHANGE_* */
+#define RTAS_TYPE_MSI 1
+#define RTAS_TYPE_MSIX 2
+
+#define FDT_NAME_MAX 128
+
+#define _FDT(exp) \
+ do { \
+ int ret = (exp); \
+ if (ret < 0) { \
+ return ret; \
+ } \
+ } while (0)
+
+sPAPRPHBState *spapr_pci_find_phb(sPAPRMachineState *spapr, uint64_t buid)
+{
+ sPAPRPHBState *sphb;
+
+ QLIST_FOREACH(sphb, &spapr->phbs, list) {
+ if (sphb->buid != buid) {
+ continue;
+ }
+ return sphb;
+ }
+
+ return NULL;
+}
+
+PCIDevice *spapr_pci_find_dev(sPAPRMachineState *spapr, uint64_t buid,
+ uint32_t config_addr)
+{
+ sPAPRPHBState *sphb = spapr_pci_find_phb(spapr, buid);
+ PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
+ int bus_num = (config_addr >> 16) & 0xFF;
+ int devfn = (config_addr >> 8) & 0xFF;
+
+ if (!phb) {
+ return NULL;
+ }
+
+ return pci_find_device(phb->bus, bus_num, devfn);
+}
+
+static uint32_t rtas_pci_cfgaddr(uint32_t arg)
+{
+ /* This handles the encoding of extended config space addresses */
+ return ((arg >> 20) & 0xf00) | (arg & 0xff);
+}
+
+static void finish_read_pci_config(sPAPRMachineState *spapr, uint64_t buid,
+ uint32_t addr, uint32_t size,
+ target_ulong rets)
+{
+ PCIDevice *pci_dev;
+ uint32_t val;
+
+ if ((size != 1) && (size != 2) && (size != 4)) {
+ /* access must be 1, 2 or 4 bytes */
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ pci_dev = spapr_pci_find_dev(spapr, buid, addr);
+ addr = rtas_pci_cfgaddr(addr);
+
+ if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
+ /* Access must be to a valid device, within bounds and
+ * naturally aligned */
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ val = pci_host_config_read_common(pci_dev, addr,
+ pci_config_size(pci_dev), size);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, val);
+}
+
+static void rtas_ibm_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ uint64_t buid;
+ uint32_t size, addr;
+
+ if ((nargs != 4) || (nret != 2)) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ buid = rtas_ldq(args, 1);
+ size = rtas_ld(args, 3);
+ addr = rtas_ld(args, 0);
+
+ finish_read_pci_config(spapr, buid, addr, size, rets);
+}
+
+static void rtas_read_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ uint32_t size, addr;
+
+ if ((nargs != 2) || (nret != 2)) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ size = rtas_ld(args, 1);
+ addr = rtas_ld(args, 0);
+
+ finish_read_pci_config(spapr, 0, addr, size, rets);
+}
+
+static void finish_write_pci_config(sPAPRMachineState *spapr, uint64_t buid,
+ uint32_t addr, uint32_t size,
+ uint32_t val, target_ulong rets)
+{
+ PCIDevice *pci_dev;
+
+ if ((size != 1) && (size != 2) && (size != 4)) {
+ /* access must be 1, 2 or 4 bytes */
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ pci_dev = spapr_pci_find_dev(spapr, buid, addr);
+ addr = rtas_pci_cfgaddr(addr);
+
+ if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
+ /* Access must be to a valid device, within bounds and
+ * naturally aligned */
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ pci_host_config_write_common(pci_dev, addr, pci_config_size(pci_dev),
+ val, size);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void rtas_ibm_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ uint64_t buid;
+ uint32_t val, size, addr;
+
+ if ((nargs != 5) || (nret != 1)) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ buid = rtas_ldq(args, 1);
+ val = rtas_ld(args, 4);
+ size = rtas_ld(args, 3);
+ addr = rtas_ld(args, 0);
+
+ finish_write_pci_config(spapr, buid, addr, size, val, rets);
+}
+
+static void rtas_write_pci_config(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ uint32_t val, size, addr;
+
+ if ((nargs != 3) || (nret != 1)) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+
+ val = rtas_ld(args, 2);
+ size = rtas_ld(args, 1);
+ addr = rtas_ld(args, 0);
+
+ finish_write_pci_config(spapr, 0, addr, size, val, rets);
+}
+
+/*
+ * Set MSI/MSIX message data.
+ * This is required for msi_notify()/msix_notify() which
+ * will write at the addresses via spapr_msi_write().
+ *
+ * If hwaddr == 0, all entries will have .data == first_irq i.e.
+ * table will be reset.
+ */
+static void spapr_msi_setmsg(PCIDevice *pdev, hwaddr addr, bool msix,
+ unsigned first_irq, unsigned req_num)
+{
+ unsigned i;
+ MSIMessage msg = { .address = addr, .data = first_irq };
+
+ if (!msix) {
+ msi_set_message(pdev, msg);
+ trace_spapr_pci_msi_setup(pdev->name, 0, msg.address);
+ return;
+ }
+
+ for (i = 0; i < req_num; ++i) {
+ msix_set_message(pdev, i, msg);
+ trace_spapr_pci_msi_setup(pdev->name, i, msg.address);
+ if (addr) {
+ ++msg.data;
+ }
+ }
+}
+
+static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint32_t config_addr = rtas_ld(args, 0);
+ uint64_t buid = rtas_ldq(args, 1);
+ unsigned int func = rtas_ld(args, 3);
+ unsigned int req_num = rtas_ld(args, 4); /* 0 == remove all */
+ unsigned int seq_num = rtas_ld(args, 5);
+ unsigned int ret_intr_type;
+ unsigned int irq, max_irqs = 0, num = 0;
+ sPAPRPHBState *phb = NULL;
+ PCIDevice *pdev = NULL;
+ spapr_pci_msi *msi;
+ int *config_addr_key;
+
+ switch (func) {
+ case RTAS_CHANGE_MSI_FN:
+ case RTAS_CHANGE_FN:
+ ret_intr_type = RTAS_TYPE_MSI;
+ break;
+ case RTAS_CHANGE_MSIX_FN:
+ ret_intr_type = RTAS_TYPE_MSIX;
+ break;
+ default:
+ error_report("rtas_ibm_change_msi(%u) is not implemented", func);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ /* Fins sPAPRPHBState */
+ phb = spapr_pci_find_phb(spapr, buid);
+ if (phb) {
+ pdev = spapr_pci_find_dev(spapr, buid, config_addr);
+ }
+ if (!phb || !pdev) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ /* Releasing MSIs */
+ if (!req_num) {
+ msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
+ if (!msi) {
+ trace_spapr_pci_msi("Releasing wrong config", config_addr);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ xics_free(spapr->icp, msi->first_irq, msi->num);
+ if (msi_present(pdev)) {
+ spapr_msi_setmsg(pdev, 0, false, 0, num);
+ }
+ if (msix_present(pdev)) {
+ spapr_msi_setmsg(pdev, 0, true, 0, num);
+ }
+ g_hash_table_remove(phb->msi, &config_addr);
+
+ trace_spapr_pci_msi("Released MSIs", config_addr);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, 0);
+ return;
+ }
+
+ /* Enabling MSI */
+
+ /* Check if the device supports as many IRQs as requested */
+ if (ret_intr_type == RTAS_TYPE_MSI) {
+ max_irqs = msi_nr_vectors_allocated(pdev);
+ } else if (ret_intr_type == RTAS_TYPE_MSIX) {
+ max_irqs = pdev->msix_entries_nr;
+ }
+ if (!max_irqs) {
+ error_report("Requested interrupt type %d is not enabled for device %x",
+ ret_intr_type, config_addr);
+ rtas_st(rets, 0, -1); /* Hardware error */
+ return;
+ }
+ /* Correct the number if the guest asked for too many */
+ if (req_num > max_irqs) {
+ trace_spapr_pci_msi_retry(config_addr, req_num, max_irqs);
+ req_num = max_irqs;
+ irq = 0; /* to avoid misleading trace */
+ goto out;
+ }
+
+ /* Allocate MSIs */
+ irq = xics_alloc_block(spapr->icp, 0, req_num, false,
+ ret_intr_type == RTAS_TYPE_MSI);
+ if (!irq) {
+ error_report("Cannot allocate MSIs for device %x", config_addr);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ /* Setup MSI/MSIX vectors in the device (via cfgspace or MSIX BAR) */
+ spapr_msi_setmsg(pdev, SPAPR_PCI_MSI_WINDOW, ret_intr_type == RTAS_TYPE_MSIX,
+ irq, req_num);
+
+ /* Add MSI device to cache */
+ msi = g_new(spapr_pci_msi, 1);
+ msi->first_irq = irq;
+ msi->num = req_num;
+ config_addr_key = g_new(int, 1);
+ *config_addr_key = config_addr;
+ g_hash_table_insert(phb->msi, config_addr_key, msi);
+
+out:
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, req_num);
+ rtas_st(rets, 2, ++seq_num);
+ if (nret > 3) {
+ rtas_st(rets, 3, ret_intr_type);
+ }
+
+ trace_spapr_pci_rtas_ibm_change_msi(config_addr, func, req_num, irq);
+}
+
+static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs,
+ target_ulong args,
+ uint32_t nret,
+ target_ulong rets)
+{
+ uint32_t config_addr = rtas_ld(args, 0);
+ uint64_t buid = rtas_ldq(args, 1);
+ unsigned int intr_src_num = -1, ioa_intr_num = rtas_ld(args, 3);
+ sPAPRPHBState *phb = NULL;
+ PCIDevice *pdev = NULL;
+ spapr_pci_msi *msi;
+
+ /* Find sPAPRPHBState */
+ phb = spapr_pci_find_phb(spapr, buid);
+ if (phb) {
+ pdev = spapr_pci_find_dev(spapr, buid, config_addr);
+ }
+ if (!phb || !pdev) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ /* Find device descriptor and start IRQ */
+ msi = (spapr_pci_msi *) g_hash_table_lookup(phb->msi, &config_addr);
+ if (!msi || !msi->first_irq || !msi->num || (ioa_intr_num >= msi->num)) {
+ trace_spapr_pci_msi("Failed to return vector", config_addr);
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+ intr_src_num = msi->first_irq + ioa_intr_num;
+ trace_spapr_pci_rtas_ibm_query_interrupt_source_number(ioa_intr_num,
+ intr_src_num);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, intr_src_num);
+ rtas_st(rets, 2, 1);/* 0 == level; 1 == edge */
+}
+
+static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ sPAPRPHBState *sphb;
+ sPAPRPHBClass *spc;
+ uint32_t addr, option;
+ uint64_t buid;
+ int ret;
+
+ if ((nargs != 4) || (nret != 1)) {
+ goto param_error_exit;
+ }
+
+ buid = rtas_ldq(args, 1);
+ addr = rtas_ld(args, 0);
+ option = rtas_ld(args, 3);
+
+ sphb = spapr_pci_find_phb(spapr, buid);
+ if (!sphb) {
+ goto param_error_exit;
+ }
+
+ spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
+ if (!spc->eeh_set_option) {
+ goto param_error_exit;
+ }
+
+ ret = spc->eeh_set_option(sphb, addr, option);
+ rtas_st(rets, 0, ret);
+ return;
+
+param_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ sPAPRPHBState *sphb;
+ sPAPRPHBClass *spc;
+ PCIDevice *pdev;
+ uint32_t addr, option;
+ uint64_t buid;
+
+ if ((nargs != 4) || (nret != 2)) {
+ goto param_error_exit;
+ }
+
+ buid = rtas_ldq(args, 1);
+ sphb = spapr_pci_find_phb(spapr, buid);
+ if (!sphb) {
+ goto param_error_exit;
+ }
+
+ spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
+ if (!spc->eeh_set_option) {
+ goto param_error_exit;
+ }
+
+ /*
+ * We always have PE address of form "00BB0001". "BB"
+ * represents the bus number of PE's primary bus.
+ */
+ option = rtas_ld(args, 3);
+ switch (option) {
+ case RTAS_GET_PE_ADDR:
+ addr = rtas_ld(args, 0);
+ pdev = spapr_pci_find_dev(spapr, buid, addr);
+ if (!pdev) {
+ goto param_error_exit;
+ }
+
+ rtas_st(rets, 1, (pci_bus_num(pdev->bus) << 16) + 1);
+ break;
+ case RTAS_GET_PE_MODE:
+ rtas_st(rets, 1, RTAS_PE_MODE_SHARED);
+ break;
+ default:
+ goto param_error_exit;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ return;
+
+param_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ sPAPRPHBState *sphb;
+ sPAPRPHBClass *spc;
+ uint64_t buid;
+ int state, ret;
+
+ if ((nargs != 3) || (nret != 4 && nret != 5)) {
+ goto param_error_exit;
+ }
+
+ buid = rtas_ldq(args, 1);
+ sphb = spapr_pci_find_phb(spapr, buid);
+ if (!sphb) {
+ goto param_error_exit;
+ }
+
+ spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
+ if (!spc->eeh_get_state) {
+ goto param_error_exit;
+ }
+
+ ret = spc->eeh_get_state(sphb, &state);
+ rtas_st(rets, 0, ret);
+ if (ret != RTAS_OUT_SUCCESS) {
+ return;
+ }
+
+ rtas_st(rets, 1, state);
+ rtas_st(rets, 2, RTAS_EEH_SUPPORT);
+ rtas_st(rets, 3, RTAS_EEH_PE_UNAVAIL_INFO);
+ if (nret >= 5) {
+ rtas_st(rets, 4, RTAS_EEH_PE_RECOVER_INFO);
+ }
+ return;
+
+param_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ sPAPRPHBState *sphb;
+ sPAPRPHBClass *spc;
+ uint32_t option;
+ uint64_t buid;
+ int ret;
+
+ if ((nargs != 4) || (nret != 1)) {
+ goto param_error_exit;
+ }
+
+ buid = rtas_ldq(args, 1);
+ option = rtas_ld(args, 3);
+ sphb = spapr_pci_find_phb(spapr, buid);
+ if (!sphb) {
+ goto param_error_exit;
+ }
+
+ spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
+ if (!spc->eeh_reset) {
+ goto param_error_exit;
+ }
+
+ ret = spc->eeh_reset(sphb, option);
+ rtas_st(rets, 0, ret);
+ return;
+
+param_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ sPAPRPHBState *sphb;
+ sPAPRPHBClass *spc;
+ uint64_t buid;
+ int ret;
+
+ if ((nargs != 3) || (nret != 1)) {
+ goto param_error_exit;
+ }
+
+ buid = rtas_ldq(args, 1);
+ sphb = spapr_pci_find_phb(spapr, buid);
+ if (!sphb) {
+ goto param_error_exit;
+ }
+
+ spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
+ if (!spc->eeh_configure) {
+ goto param_error_exit;
+ }
+
+ ret = spc->eeh_configure(sphb);
+ rtas_st(rets, 0, ret);
+ return;
+
+param_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+/* To support it later */
+static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ sPAPRPHBState *sphb;
+ sPAPRPHBClass *spc;
+ int option;
+ uint64_t buid;
+
+ if ((nargs != 8) || (nret != 1)) {
+ goto param_error_exit;
+ }
+
+ buid = rtas_ldq(args, 1);
+ sphb = spapr_pci_find_phb(spapr, buid);
+ if (!sphb) {
+ goto param_error_exit;
+ }
+
+ spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
+ if (!spc->eeh_set_option) {
+ goto param_error_exit;
+ }
+
+ option = rtas_ld(args, 7);
+ switch (option) {
+ case RTAS_SLOT_TEMP_ERR_LOG:
+ case RTAS_SLOT_PERM_ERR_LOG:
+ break;
+ default:
+ goto param_error_exit;
+ }
+
+ /* We don't have error log yet */
+ rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
+ return;
+
+param_error_exit:
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static int pci_spapr_swizzle(int slot, int pin)
+{
+ return (slot + pin) % PCI_NUM_PINS;
+}
+
+static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num)
+{
+ /*
+ * Here we need to convert pci_dev + irq_num to some unique value
+ * which is less than number of IRQs on the specific bus (4). We
+ * use standard PCI swizzling, that is (slot number + pin number)
+ * % 4.
+ */
+ return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num);
+}
+
+static void pci_spapr_set_irq(void *opaque, int irq_num, int level)
+{
+ /*
+ * Here we use the number returned by pci_spapr_map_irq to find a
+ * corresponding qemu_irq.
+ */
+ sPAPRPHBState *phb = opaque;
+
+ trace_spapr_pci_lsi_set(phb->dtbusname, irq_num, phb->lsi_table[irq_num].irq);
+ qemu_set_irq(spapr_phb_lsi_qirq(phb, irq_num), level);
+}
+
+static PCIINTxRoute spapr_route_intx_pin_to_irq(void *opaque, int pin)
+{
+ sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(opaque);
+ PCIINTxRoute route;
+
+ route.mode = PCI_INTX_ENABLED;
+ route.irq = sphb->lsi_table[pin].irq;
+
+ return route;
+}
+
+/*
+ * MSI/MSIX memory region implementation.
+ * The handler handles both MSI and MSIX.
+ * For MSI-X, the vector number is encoded as a part of the address,
+ * data is set to 0.
+ * For MSI, the vector number is encoded in least bits in data.
+ */
+static void spapr_msi_write(void *opaque, hwaddr addr,
+ uint64_t data, unsigned size)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ uint32_t irq = data;
+
+ trace_spapr_pci_msi_write(addr, data, irq);
+
+ qemu_irq_pulse(xics_get_qirq(spapr->icp, irq));
+}
+
+static const MemoryRegionOps spapr_msi_ops = {
+ /* There is no .read as the read result is undefined by PCI spec */
+ .read = NULL,
+ .write = spapr_msi_write,
+ .endianness = DEVICE_LITTLE_ENDIAN
+};
+
+/*
+ * PHB PCI device
+ */
+static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
+{
+ sPAPRPHBState *phb = opaque;
+
+ return &phb->iommu_as;
+}
+
+static char *spapr_phb_vfio_get_loc_code(sPAPRPHBState *sphb, PCIDevice *pdev)
+{
+ char *path = NULL, *buf = NULL, *host = NULL;
+
+ /* Get the PCI VFIO host id */
+ host = object_property_get_str(OBJECT(pdev), "host", NULL);
+ if (!host) {
+ goto err_out;
+ }
+
+ /* Construct the path of the file that will give us the DT location */
+ path = g_strdup_printf("/sys/bus/pci/devices/%s/devspec", host);
+ g_free(host);
+ if (!path || !g_file_get_contents(path, &buf, NULL, NULL)) {
+ goto err_out;
+ }
+ g_free(path);
+
+ /* Construct and read from host device tree the loc-code */
+ path = g_strdup_printf("/proc/device-tree%s/ibm,loc-code", buf);
+ g_free(buf);
+ if (!path || !g_file_get_contents(path, &buf, NULL, NULL)) {
+ goto err_out;
+ }
+ return buf;
+
+err_out:
+ g_free(path);
+ return NULL;
+}
+
+static char *spapr_phb_get_loc_code(sPAPRPHBState *sphb, PCIDevice *pdev)
+{
+ char *buf;
+ const char *devtype = "qemu";
+ uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
+
+ if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
+ buf = spapr_phb_vfio_get_loc_code(sphb, pdev);
+ if (buf) {
+ return buf;
+ }
+ devtype = "vfio";
+ }
+ /*
+ * For emulated devices and VFIO-failure case, make up
+ * the loc-code.
+ */
+ buf = g_strdup_printf("%s_%s:%04x:%02x:%02x.%x",
+ devtype, pdev->name, sphb->index, busnr,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ return buf;
+}
+
+/* Macros to operate with address in OF binding to PCI */
+#define b_x(x, p, l) (((x) & ((1<<(l))-1)) << (p))
+#define b_n(x) b_x((x), 31, 1) /* 0 if relocatable */
+#define b_p(x) b_x((x), 30, 1) /* 1 if prefetchable */
+#define b_t(x) b_x((x), 29, 1) /* 1 if the address is aliased */
+#define b_ss(x) b_x((x), 24, 2) /* the space code */
+#define b_bbbbbbbb(x) b_x((x), 16, 8) /* bus number */
+#define b_ddddd(x) b_x((x), 11, 5) /* device number */
+#define b_fff(x) b_x((x), 8, 3) /* function number */
+#define b_rrrrrrrr(x) b_x((x), 0, 8) /* register number */
+
+/* for 'reg'/'assigned-addresses' OF properties */
+#define RESOURCE_CELLS_SIZE 2
+#define RESOURCE_CELLS_ADDRESS 3
+
+typedef struct ResourceFields {
+ uint32_t phys_hi;
+ uint32_t phys_mid;
+ uint32_t phys_lo;
+ uint32_t size_hi;
+ uint32_t size_lo;
+} QEMU_PACKED ResourceFields;
+
+typedef struct ResourceProps {
+ ResourceFields reg[8];
+ ResourceFields assigned[7];
+ uint32_t reg_len;
+ uint32_t assigned_len;
+} ResourceProps;
+
+/* fill in the 'reg'/'assigned-resources' OF properties for
+ * a PCI device. 'reg' describes resource requirements for a
+ * device's IO/MEM regions, 'assigned-addresses' describes the
+ * actual resource assignments.
+ *
+ * the properties are arrays of ('phys-addr', 'size') pairs describing
+ * the addressable regions of the PCI device, where 'phys-addr' is a
+ * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to
+ * (phys.hi, phys.mid, phys.lo), and 'size' is a
+ * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo).
+ *
+ * phys.hi = 0xYYXXXXZZ, where:
+ * 0xYY = npt000ss
+ * ||| |
+ * ||| +-- space code
+ * ||| |
+ * ||| + 00 if configuration space
+ * ||| + 01 if IO region,
+ * ||| + 10 if 32-bit MEM region
+ * ||| + 11 if 64-bit MEM region
+ * |||
+ * ||+------ for non-relocatable IO: 1 if aliased
+ * || for relocatable IO: 1 if below 64KB
+ * || for MEM: 1 if below 1MB
+ * |+------- 1 if region is prefetchable
+ * +-------- 1 if region is non-relocatable
+ * 0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function
+ * bits respectively
+ * 0xZZ = rrrrrrrr, the register number of the BAR corresponding
+ * to the region
+ *
+ * phys.mid and phys.lo correspond respectively to the hi/lo portions
+ * of the actual address of the region.
+ *
+ * how the phys-addr/size values are used differ slightly between
+ * 'reg' and 'assigned-addresses' properties. namely, 'reg' has
+ * an additional description for the config space region of the
+ * device, and in the case of QEMU has n=0 and phys.mid=phys.lo=0
+ * to describe the region as relocatable, with an address-mapping
+ * that corresponds directly to the PHB's address space for the
+ * resource. 'assigned-addresses' always has n=1 set with an absolute
+ * address assigned for the resource. in general, 'assigned-addresses'
+ * won't be populated, since addresses for PCI devices are generally
+ * unmapped initially and left to the guest to assign.
+ *
+ * note also that addresses defined in these properties are, at least
+ * for PAPR guests, relative to the PHBs IO/MEM windows, and
+ * correspond directly to the addresses in the BARs.
+ *
+ * in accordance with PCI Bus Binding to Open Firmware,
+ * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7,
+ * Appendix C.
+ */
+static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
+{
+ int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d))));
+ uint32_t dev_id = (b_bbbbbbbb(bus_num) |
+ b_ddddd(PCI_SLOT(d->devfn)) |
+ b_fff(PCI_FUNC(d->devfn)));
+ ResourceFields *reg, *assigned;
+ int i, reg_idx = 0, assigned_idx = 0;
+
+ /* config space region */
+ reg = &rp->reg[reg_idx++];
+ reg->phys_hi = cpu_to_be32(dev_id);
+ reg->phys_mid = 0;
+ reg->phys_lo = 0;
+ reg->size_hi = 0;
+ reg->size_lo = 0;
+
+ for (i = 0; i < PCI_NUM_REGIONS; i++) {
+ if (!d->io_regions[i].size) {
+ continue;
+ }
+
+ reg = &rp->reg[reg_idx++];
+
+ reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i)));
+ if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) {
+ reg->phys_hi |= cpu_to_be32(b_ss(1));
+ } else if (d->io_regions[i].type & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ reg->phys_hi |= cpu_to_be32(b_ss(3));
+ } else {
+ reg->phys_hi |= cpu_to_be32(b_ss(2));
+ }
+ reg->phys_mid = 0;
+ reg->phys_lo = 0;
+ reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32);
+ reg->size_lo = cpu_to_be32(d->io_regions[i].size);
+
+ if (d->io_regions[i].addr == PCI_BAR_UNMAPPED) {
+ continue;
+ }
+
+ assigned = &rp->assigned[assigned_idx++];
+ assigned->phys_hi = cpu_to_be32(reg->phys_hi | b_n(1));
+ assigned->phys_mid = cpu_to_be32(d->io_regions[i].addr >> 32);
+ assigned->phys_lo = cpu_to_be32(d->io_regions[i].addr);
+ assigned->size_hi = reg->size_hi;
+ assigned->size_lo = reg->size_lo;
+ }
+
+ rp->reg_len = reg_idx * sizeof(ResourceFields);
+ rp->assigned_len = assigned_idx * sizeof(ResourceFields);
+}
+
+static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
+ PCIDevice *pdev);
+
+static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
+ sPAPRPHBState *sphb)
+{
+ ResourceProps rp;
+ bool is_bridge = false;
+ int pci_status, err;
+ char *buf = NULL;
+ uint32_t drc_index = spapr_phb_get_pci_drc_index(sphb, dev);
+ uint32_t max_msi, max_msix;
+
+ if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) ==
+ PCI_HEADER_TYPE_BRIDGE) {
+ is_bridge = true;
+ }
+
+ /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
+ _FDT(fdt_setprop_cell(fdt, offset, "vendor-id",
+ pci_default_read_config(dev, PCI_VENDOR_ID, 2)));
+ _FDT(fdt_setprop_cell(fdt, offset, "device-id",
+ pci_default_read_config(dev, PCI_DEVICE_ID, 2)));
+ _FDT(fdt_setprop_cell(fdt, offset, "revision-id",
+ pci_default_read_config(dev, PCI_REVISION_ID, 1)));
+ _FDT(fdt_setprop_cell(fdt, offset, "class-code",
+ pci_default_read_config(dev, PCI_CLASS_PROG, 3)));
+ if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) {
+ _FDT(fdt_setprop_cell(fdt, offset, "interrupts",
+ pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)));
+ }
+
+ if (!is_bridge) {
+ _FDT(fdt_setprop_cell(fdt, offset, "min-grant",
+ pci_default_read_config(dev, PCI_MIN_GNT, 1)));
+ _FDT(fdt_setprop_cell(fdt, offset, "max-latency",
+ pci_default_read_config(dev, PCI_MAX_LAT, 1)));
+ }
+
+ if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) {
+ _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id",
+ pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)));
+ }
+
+ if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) {
+ _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
+ pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)));
+ }
+
+ _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size",
+ pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1)));
+
+ /* the following fdt cells are masked off the pci status register */
+ pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
+ _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
+ PCI_STATUS_DEVSEL_MASK & pci_status));
+
+ if (pci_status & PCI_STATUS_FAST_BACK) {
+ _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
+ }
+ if (pci_status & PCI_STATUS_66MHZ) {
+ _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
+ }
+ if (pci_status & PCI_STATUS_UDF) {
+ _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
+ }
+
+ /* NOTE: this is normally generated by firmware via path/unit name,
+ * but in our case we must set it manually since it does not get
+ * processed by OF beforehand
+ */
+ _FDT(fdt_setprop_string(fdt, offset, "name", "pci"));
+ buf = spapr_phb_get_loc_code(sphb, dev);
+ if (!buf) {
+ error_report("Failed setting the ibm,loc-code");
+ return -1;
+ }
+
+ err = fdt_setprop_string(fdt, offset, "ibm,loc-code", buf);
+ g_free(buf);
+ if (err < 0) {
+ return err;
+ }
+
+ if (drc_index) {
+ _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index));
+ }
+
+ _FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
+ RESOURCE_CELLS_ADDRESS));
+ _FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
+ RESOURCE_CELLS_SIZE));
+
+ max_msi = msi_nr_vectors_allocated(dev);
+ if (max_msi) {
+ _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi", max_msi));
+ }
+ max_msix = dev->msix_entries_nr;
+ if (max_msix) {
+ _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x", max_msix));
+ }
+
+ populate_resource_props(dev, &rp);
+ _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
+ _FDT(fdt_setprop(fdt, offset, "assigned-addresses",
+ (uint8_t *)rp.assigned, rp.assigned_len));
+
+ return 0;
+}
+
+/* create OF node for pci device and required OF DT properties */
+static int spapr_create_pci_child_dt(sPAPRPHBState *phb, PCIDevice *dev,
+ void *fdt, int node_offset)
+{
+ int offset, ret;
+ int slot = PCI_SLOT(dev->devfn);
+ int func = PCI_FUNC(dev->devfn);
+ char nodename[FDT_NAME_MAX];
+
+ if (func != 0) {
+ snprintf(nodename, FDT_NAME_MAX, "pci@%x,%x", slot, func);
+ } else {
+ snprintf(nodename, FDT_NAME_MAX, "pci@%x", slot);
+ }
+ offset = fdt_add_subnode(fdt, node_offset, nodename);
+ ret = spapr_populate_pci_child_dt(dev, fdt, offset, phb);
+
+ g_assert(!ret);
+ if (ret) {
+ return 0;
+ }
+ return offset;
+}
+
+static void spapr_phb_add_pci_device(sPAPRDRConnector *drc,
+ sPAPRPHBState *phb,
+ PCIDevice *pdev,
+ Error **errp)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ DeviceState *dev = DEVICE(pdev);
+ void *fdt = NULL;
+ int fdt_start_offset = 0, fdt_size;
+
+ if (object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
+ sPAPRTCETable *tcet = spapr_tce_find_by_liobn(phb->dma_liobn);
+
+ spapr_tce_set_need_vfio(tcet, true);
+ }
+
+ if (dev->hotplugged) {
+ fdt = create_device_tree(&fdt_size);
+ fdt_start_offset = spapr_create_pci_child_dt(phb, pdev, fdt, 0);
+ if (!fdt_start_offset) {
+ error_setg(errp, "Failed to create pci child device tree node");
+ goto out;
+ }
+ }
+
+ drck->attach(drc, DEVICE(pdev),
+ fdt, fdt_start_offset, !dev->hotplugged, errp);
+out:
+ if (*errp) {
+ g_free(fdt);
+ }
+}
+
+static void spapr_phb_remove_pci_device_cb(DeviceState *dev, void *opaque)
+{
+ /* some version guests do not wait for completion of a device
+ * cleanup (generally done asynchronously by the kernel) before
+ * signaling to QEMU that the device is safe, but instead sleep
+ * for some 'safe' period of time. unfortunately on a busy host
+ * this sleep isn't guaranteed to be long enough, resulting in
+ * bad things like IRQ lines being left asserted during final
+ * device removal. to deal with this we call reset just prior
+ * to finalizing the device, which will put the device back into
+ * an 'idle' state, as the device cleanup code expects.
+ */
+ pci_device_reset(PCI_DEVICE(dev));
+ object_unparent(OBJECT(dev));
+}
+
+static void spapr_phb_remove_pci_device(sPAPRDRConnector *drc,
+ sPAPRPHBState *phb,
+ PCIDevice *pdev,
+ Error **errp)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ drck->detach(drc, DEVICE(pdev), spapr_phb_remove_pci_device_cb, phb, errp);
+}
+
+static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb,
+ PCIDevice *pdev)
+{
+ uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
+ return spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_PCI,
+ (phb->index << 16) |
+ (busnr << 8) |
+ pdev->devfn);
+}
+
+static uint32_t spapr_phb_get_pci_drc_index(sPAPRPHBState *phb,
+ PCIDevice *pdev)
+{
+ sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
+ sPAPRDRConnectorClass *drck;
+
+ if (!drc) {
+ return 0;
+ }
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ return drck->get_index(drc);
+}
+
+static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev, Error **errp)
+{
+ sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
+ PCIDevice *pdev = PCI_DEVICE(plugged_dev);
+ sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
+ Error *local_err = NULL;
+
+ /* if DR is disabled we don't need to do anything in the case of
+ * hotplug or coldplug callbacks
+ */
+ if (!phb->dr_enabled) {
+ /* if this is a hotplug operation initiated by the user
+ * we need to let them know it's not enabled
+ */
+ if (plugged_dev->hotplugged) {
+ error_setg(errp, QERR_BUS_NO_HOTPLUG,
+ object_get_typename(OBJECT(phb)));
+ }
+ return;
+ }
+
+ g_assert(drc);
+
+ spapr_phb_add_pci_device(drc, phb, pdev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (plugged_dev->hotplugged) {
+ spapr_hotplug_req_add_by_index(drc);
+ }
+}
+
+static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev, Error **errp)
+{
+ sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
+ PCIDevice *pdev = PCI_DEVICE(plugged_dev);
+ sPAPRDRConnectorClass *drck;
+ sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
+ Error *local_err = NULL;
+
+ if (!phb->dr_enabled) {
+ error_setg(errp, QERR_BUS_NO_HOTPLUG,
+ object_get_typename(OBJECT(phb)));
+ return;
+ }
+
+ g_assert(drc);
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ if (!drck->release_pending(drc)) {
+ spapr_phb_remove_pci_device(drc, phb, pdev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ spapr_hotplug_req_remove_by_index(drc);
+ }
+}
+
+static void spapr_phb_realize(DeviceState *dev, Error **errp)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ SysBusDevice *s = SYS_BUS_DEVICE(dev);
+ sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(s);
+ PCIHostState *phb = PCI_HOST_BRIDGE(s);
+ sPAPRPHBClass *info = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(s);
+ char *namebuf;
+ int i;
+ PCIBus *bus;
+ uint64_t msi_window_size = 4096;
+
+ if (sphb->index != (uint32_t)-1) {
+ hwaddr windows_base;
+
+ if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1)
+ || (sphb->mem_win_addr != (hwaddr)-1)
+ || (sphb->io_win_addr != (hwaddr)-1)) {
+ error_setg(errp, "Either \"index\" or other parameters must"
+ " be specified for PAPR PHB, not both");
+ return;
+ }
+
+ if (sphb->index > SPAPR_PCI_MAX_INDEX) {
+ error_setg(errp, "\"index\" for PAPR PHB is too large (max %u)",
+ SPAPR_PCI_MAX_INDEX);
+ return;
+ }
+
+ sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
+ sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0);
+
+ windows_base = SPAPR_PCI_WINDOW_BASE
+ + sphb->index * SPAPR_PCI_WINDOW_SPACING;
+ sphb->mem_win_addr = windows_base + SPAPR_PCI_MMIO_WIN_OFF;
+ sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
+ }
+
+ if (sphb->buid == (uint64_t)-1) {
+ error_setg(errp, "BUID not specified for PHB");
+ return;
+ }
+
+ if (sphb->dma_liobn == (uint32_t)-1) {
+ error_setg(errp, "LIOBN not specified for PHB");
+ return;
+ }
+
+ if (sphb->mem_win_addr == (hwaddr)-1) {
+ error_setg(errp, "Memory window address not specified for PHB");
+ return;
+ }
+
+ if (sphb->io_win_addr == (hwaddr)-1) {
+ error_setg(errp, "IO window address not specified for PHB");
+ return;
+ }
+
+ if (spapr_pci_find_phb(spapr, sphb->buid)) {
+ error_setg(errp, "PCI host bridges must have unique BUIDs");
+ return;
+ }
+
+ sphb->dtbusname = g_strdup_printf("pci@%" PRIx64, sphb->buid);
+
+ namebuf = alloca(strlen(sphb->dtbusname) + 32);
+
+ /* Initialize memory regions */
+ sprintf(namebuf, "%s.mmio", sphb->dtbusname);
+ memory_region_init(&sphb->memspace, OBJECT(sphb), namebuf, UINT64_MAX);
+
+ sprintf(namebuf, "%s.mmio-alias", sphb->dtbusname);
+ memory_region_init_alias(&sphb->memwindow, OBJECT(sphb),
+ namebuf, &sphb->memspace,
+ SPAPR_PCI_MEM_WIN_BUS_OFFSET, sphb->mem_win_size);
+ memory_region_add_subregion(get_system_memory(), sphb->mem_win_addr,
+ &sphb->memwindow);
+
+ /* Initialize IO regions */
+ sprintf(namebuf, "%s.io", sphb->dtbusname);
+ memory_region_init(&sphb->iospace, OBJECT(sphb),
+ namebuf, SPAPR_PCI_IO_WIN_SIZE);
+
+ sprintf(namebuf, "%s.io-alias", sphb->dtbusname);
+ memory_region_init_alias(&sphb->iowindow, OBJECT(sphb), namebuf,
+ &sphb->iospace, 0, SPAPR_PCI_IO_WIN_SIZE);
+ memory_region_add_subregion(get_system_memory(), sphb->io_win_addr,
+ &sphb->iowindow);
+
+ bus = pci_register_bus(dev, NULL,
+ pci_spapr_set_irq, pci_spapr_map_irq, sphb,
+ &sphb->memspace, &sphb->iospace,
+ PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
+ phb->bus = bus;
+ qbus_set_hotplug_handler(BUS(phb->bus), DEVICE(sphb), NULL);
+
+ /*
+ * Initialize PHB address space.
+ * By default there will be at least one subregion for default
+ * 32bit DMA window.
+ * Later the guest might want to create another DMA window
+ * which will become another memory subregion.
+ */
+ sprintf(namebuf, "%s.iommu-root", sphb->dtbusname);
+
+ memory_region_init(&sphb->iommu_root, OBJECT(sphb),
+ namebuf, UINT64_MAX);
+ address_space_init(&sphb->iommu_as, &sphb->iommu_root,
+ sphb->dtbusname);
+
+ /*
+ * As MSI/MSIX interrupts trigger by writing at MSI/MSIX vectors,
+ * we need to allocate some memory to catch those writes coming
+ * from msi_notify()/msix_notify().
+ * As MSIMessage:addr is going to be the same and MSIMessage:data
+ * is going to be a VIRQ number, 4 bytes of the MSI MR will only
+ * be used.
+ *
+ * For KVM we want to ensure that this memory is a full page so that
+ * our memory slot is of page size granularity.
+ */
+#ifdef CONFIG_KVM
+ if (kvm_enabled()) {
+ msi_window_size = getpagesize();
+ }
+#endif
+
+ memory_region_init_io(&sphb->msiwindow, NULL, &spapr_msi_ops, spapr,
+ "msi", msi_window_size);
+ memory_region_add_subregion(&sphb->iommu_root, SPAPR_PCI_MSI_WINDOW,
+ &sphb->msiwindow);
+
+ pci_setup_iommu(bus, spapr_pci_dma_iommu, sphb);
+
+ pci_bus_set_route_irq_fn(bus, spapr_route_intx_pin_to_irq);
+
+ QLIST_INSERT_HEAD(&spapr->phbs, sphb, list);
+
+ /* Initialize the LSI table */
+ for (i = 0; i < PCI_NUM_PINS; i++) {
+ uint32_t irq;
+
+ irq = xics_alloc_block(spapr->icp, 0, 1, true, false);
+ if (!irq) {
+ error_setg(errp, "spapr_allocate_lsi failed");
+ return;
+ }
+
+ sphb->lsi_table[i].irq = irq;
+ }
+
+ /* allocate connectors for child PCI devices */
+ if (sphb->dr_enabled) {
+ for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
+ spapr_dr_connector_new(OBJECT(phb),
+ SPAPR_DR_CONNECTOR_TYPE_PCI,
+ (sphb->index << 16) | i);
+ }
+ }
+
+ if (!info->finish_realize) {
+ error_setg(errp, "finish_realize not defined");
+ return;
+ }
+
+ info->finish_realize(sphb, errp);
+
+ sphb->msi = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free);
+}
+
+static void spapr_phb_finish_realize(sPAPRPHBState *sphb, Error **errp)
+{
+ sPAPRTCETable *tcet;
+ uint32_t nb_table;
+
+ nb_table = sphb->dma_win_size >> SPAPR_TCE_PAGE_SHIFT;
+ tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn,
+ 0, SPAPR_TCE_PAGE_SHIFT, nb_table, false);
+ if (!tcet) {
+ error_setg(errp, "Unable to create TCE table for %s",
+ sphb->dtbusname);
+ return ;
+ }
+
+ /* Register default 32bit DMA window */
+ memory_region_add_subregion(&sphb->iommu_root, sphb->dma_win_addr,
+ spapr_tce_get_iommu(tcet));
+}
+
+static int spapr_phb_children_reset(Object *child, void *opaque)
+{
+ DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE);
+
+ if (dev) {
+ device_reset(dev);
+ }
+
+ return 0;
+}
+
+static void spapr_phb_reset(DeviceState *qdev)
+{
+ /* Reset the IOMMU state */
+ object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
+}
+
+static Property spapr_phb_properties[] = {
+ DEFINE_PROP_UINT32("index", sPAPRPHBState, index, -1),
+ DEFINE_PROP_UINT64("buid", sPAPRPHBState, buid, -1),
+ DEFINE_PROP_UINT32("liobn", sPAPRPHBState, dma_liobn, -1),
+ DEFINE_PROP_UINT64("mem_win_addr", sPAPRPHBState, mem_win_addr, -1),
+ DEFINE_PROP_UINT64("mem_win_size", sPAPRPHBState, mem_win_size,
+ SPAPR_PCI_MMIO_WIN_SIZE),
+ DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
+ DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
+ SPAPR_PCI_IO_WIN_SIZE),
+ DEFINE_PROP_BOOL("dynamic-reconfiguration", sPAPRPHBState, dr_enabled,
+ true),
+ /* Default DMA window is 0..1GB */
+ DEFINE_PROP_UINT64("dma_win_addr", sPAPRPHBState, dma_win_addr, 0),
+ DEFINE_PROP_UINT64("dma_win_size", sPAPRPHBState, dma_win_size, 0x40000000),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static const VMStateDescription vmstate_spapr_pci_lsi = {
+ .name = "spapr_pci/lsi",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_EQUAL(irq, struct spapr_pci_lsi),
+
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_spapr_pci_msi = {
+ .name = "spapr_pci/msi",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT32(key, spapr_pci_msi_mig),
+ VMSTATE_UINT32(value.first_irq, spapr_pci_msi_mig),
+ VMSTATE_UINT32(value.num, spapr_pci_msi_mig),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void spapr_pci_pre_save(void *opaque)
+{
+ sPAPRPHBState *sphb = opaque;
+ GHashTableIter iter;
+ gpointer key, value;
+ int i;
+
+ g_free(sphb->msi_devs);
+ sphb->msi_devs = NULL;
+ sphb->msi_devs_num = g_hash_table_size(sphb->msi);
+ if (!sphb->msi_devs_num) {
+ return;
+ }
+ sphb->msi_devs = g_malloc(sphb->msi_devs_num * sizeof(spapr_pci_msi_mig));
+
+ g_hash_table_iter_init(&iter, sphb->msi);
+ for (i = 0; g_hash_table_iter_next(&iter, &key, &value); ++i) {
+ sphb->msi_devs[i].key = *(uint32_t *) key;
+ sphb->msi_devs[i].value = *(spapr_pci_msi *) value;
+ }
+}
+
+static int spapr_pci_post_load(void *opaque, int version_id)
+{
+ sPAPRPHBState *sphb = opaque;
+ gpointer key, value;
+ int i;
+
+ for (i = 0; i < sphb->msi_devs_num; ++i) {
+ key = g_memdup(&sphb->msi_devs[i].key,
+ sizeof(sphb->msi_devs[i].key));
+ value = g_memdup(&sphb->msi_devs[i].value,
+ sizeof(sphb->msi_devs[i].value));
+ g_hash_table_insert(sphb->msi, key, value);
+ }
+ g_free(sphb->msi_devs);
+ sphb->msi_devs = NULL;
+ sphb->msi_devs_num = 0;
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_spapr_pci = {
+ .name = "spapr_pci",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .pre_save = spapr_pci_pre_save,
+ .post_load = spapr_pci_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_EQUAL(buid, sPAPRPHBState),
+ VMSTATE_UINT32_EQUAL(dma_liobn, sPAPRPHBState),
+ VMSTATE_UINT64_EQUAL(mem_win_addr, sPAPRPHBState),
+ VMSTATE_UINT64_EQUAL(mem_win_size, sPAPRPHBState),
+ VMSTATE_UINT64_EQUAL(io_win_addr, sPAPRPHBState),
+ VMSTATE_UINT64_EQUAL(io_win_size, sPAPRPHBState),
+ VMSTATE_STRUCT_ARRAY(lsi_table, sPAPRPHBState, PCI_NUM_PINS, 0,
+ vmstate_spapr_pci_lsi, struct spapr_pci_lsi),
+ VMSTATE_INT32(msi_devs_num, sPAPRPHBState),
+ VMSTATE_STRUCT_VARRAY_ALLOC(msi_devs, sPAPRPHBState, msi_devs_num, 0,
+ vmstate_spapr_pci_msi, spapr_pci_msi_mig),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const char *spapr_phb_root_bus_path(PCIHostState *host_bridge,
+ PCIBus *rootbus)
+{
+ sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(host_bridge);
+
+ return sphb->dtbusname;
+}
+
+static void spapr_phb_class_init(ObjectClass *klass, void *data)
+{
+ PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
+ HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass);
+
+ hc->root_bus_path = spapr_phb_root_bus_path;
+ dc->realize = spapr_phb_realize;
+ dc->props = spapr_phb_properties;
+ dc->reset = spapr_phb_reset;
+ dc->vmsd = &vmstate_spapr_pci;
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+ dc->cannot_instantiate_with_device_add_yet = false;
+ spc->finish_realize = spapr_phb_finish_realize;
+ hp->plug = spapr_phb_hot_plug_child;
+ hp->unplug = spapr_phb_hot_unplug_child;
+}
+
+static const TypeInfo spapr_phb_info = {
+ .name = TYPE_SPAPR_PCI_HOST_BRIDGE,
+ .parent = TYPE_PCI_HOST_BRIDGE,
+ .instance_size = sizeof(sPAPRPHBState),
+ .class_init = spapr_phb_class_init,
+ .class_size = sizeof(sPAPRPHBClass),
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ }
+};
+
+PCIHostState *spapr_create_phb(sPAPRMachineState *spapr, int index)
+{
+ DeviceState *dev;
+
+ dev = qdev_create(NULL, TYPE_SPAPR_PCI_HOST_BRIDGE);
+ qdev_prop_set_uint32(dev, "index", index);
+ qdev_init_nofail(dev);
+
+ return PCI_HOST_BRIDGE(dev);
+}
+
+typedef struct sPAPRFDT {
+ void *fdt;
+ int node_off;
+ sPAPRPHBState *sphb;
+} sPAPRFDT;
+
+static void spapr_populate_pci_devices_dt(PCIBus *bus, PCIDevice *pdev,
+ void *opaque)
+{
+ PCIBus *sec_bus;
+ sPAPRFDT *p = opaque;
+ int offset;
+ sPAPRFDT s_fdt;
+
+ offset = spapr_create_pci_child_dt(p->sphb, pdev, p->fdt, p->node_off);
+ if (!offset) {
+ error_report("Failed to create pci child device tree node");
+ return;
+ }
+
+ if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
+ PCI_HEADER_TYPE_BRIDGE)) {
+ return;
+ }
+
+ sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
+ if (!sec_bus) {
+ return;
+ }
+
+ s_fdt.fdt = p->fdt;
+ s_fdt.node_off = offset;
+ s_fdt.sphb = p->sphb;
+ pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
+ spapr_populate_pci_devices_dt,
+ &s_fdt);
+}
+
+static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
+ void *opaque)
+{
+ unsigned int *bus_no = opaque;
+ unsigned int primary = *bus_no;
+ unsigned int subordinate = 0xff;
+ PCIBus *sec_bus = NULL;
+
+ if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
+ PCI_HEADER_TYPE_BRIDGE)) {
+ return;
+ }
+
+ (*bus_no)++;
+ pci_default_write_config(pdev, PCI_PRIMARY_BUS, primary, 1);
+ pci_default_write_config(pdev, PCI_SECONDARY_BUS, *bus_no, 1);
+ pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
+
+ sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
+ if (!sec_bus) {
+ return;
+ }
+
+ pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, subordinate, 1);
+ pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
+ spapr_phb_pci_enumerate_bridge, bus_no);
+ pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1);
+}
+
+static void spapr_phb_pci_enumerate(sPAPRPHBState *phb)
+{
+ PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
+ unsigned int bus_no = 0;
+
+ pci_for_each_device(bus, pci_bus_num(bus),
+ spapr_phb_pci_enumerate_bridge,
+ &bus_no);
+
+}
+
+int spapr_populate_pci_dt(sPAPRPHBState *phb,
+ uint32_t xics_phandle,
+ void *fdt)
+{
+ int bus_off, i, j, ret;
+ char nodename[FDT_NAME_MAX];
+ uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
+ const uint64_t mmiosize = memory_region_size(&phb->memwindow);
+ const uint64_t w32max = (1ULL << 32) - SPAPR_PCI_MEM_WIN_BUS_OFFSET;
+ const uint64_t w32size = MIN(w32max, mmiosize);
+ const uint64_t w64size = (mmiosize > w32size) ? (mmiosize - w32size) : 0;
+ struct {
+ uint32_t hi;
+ uint64_t child;
+ uint64_t parent;
+ uint64_t size;
+ } QEMU_PACKED ranges[] = {
+ {
+ cpu_to_be32(b_ss(1)), cpu_to_be64(0),
+ cpu_to_be64(phb->io_win_addr),
+ cpu_to_be64(memory_region_size(&phb->iospace)),
+ },
+ {
+ cpu_to_be32(b_ss(2)), cpu_to_be64(SPAPR_PCI_MEM_WIN_BUS_OFFSET),
+ cpu_to_be64(phb->mem_win_addr),
+ cpu_to_be64(w32size),
+ },
+ {
+ cpu_to_be32(b_ss(3)), cpu_to_be64(1ULL << 32),
+ cpu_to_be64(phb->mem_win_addr + w32size),
+ cpu_to_be64(w64size)
+ },
+ };
+ const unsigned sizeof_ranges = (w64size ? 3 : 2) * sizeof(ranges[0]);
+ uint64_t bus_reg[] = { cpu_to_be64(phb->buid), 0 };
+ uint32_t interrupt_map_mask[] = {
+ cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
+ uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
+ sPAPRTCETable *tcet;
+ PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
+ sPAPRFDT s_fdt;
+
+ /* Start populating the FDT */
+ snprintf(nodename, FDT_NAME_MAX, "pci@%" PRIx64, phb->buid);
+ bus_off = fdt_add_subnode(fdt, 0, nodename);
+ if (bus_off < 0) {
+ return bus_off;
+ }
+
+ /* Write PHB properties */
+ _FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
+ _FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
+ _FDT(fdt_setprop_cell(fdt, bus_off, "#address-cells", 0x3));
+ _FDT(fdt_setprop_cell(fdt, bus_off, "#size-cells", 0x2));
+ _FDT(fdt_setprop_cell(fdt, bus_off, "#interrupt-cells", 0x1));
+ _FDT(fdt_setprop(fdt, bus_off, "used-by-rtas", NULL, 0));
+ _FDT(fdt_setprop(fdt, bus_off, "bus-range", &bus_range, sizeof(bus_range)));
+ _FDT(fdt_setprop(fdt, bus_off, "ranges", &ranges, sizeof_ranges));
+ _FDT(fdt_setprop(fdt, bus_off, "reg", &bus_reg, sizeof(bus_reg)));
+ _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pci-config-space-type", 0x1));
+ _FDT(fdt_setprop_cell(fdt, bus_off, "ibm,pe-total-#msi", XICS_IRQS));
+
+ /* Build the interrupt-map, this must matches what is done
+ * in pci_spapr_map_irq
+ */
+ _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask",
+ &interrupt_map_mask, sizeof(interrupt_map_mask)));
+ for (i = 0; i < PCI_SLOT_MAX; i++) {
+ for (j = 0; j < PCI_NUM_PINS; j++) {
+ uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j];
+ int lsi_num = pci_spapr_swizzle(i, j);
+
+ irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0));
+ irqmap[1] = 0;
+ irqmap[2] = 0;
+ irqmap[3] = cpu_to_be32(j+1);
+ irqmap[4] = cpu_to_be32(xics_phandle);
+ irqmap[5] = cpu_to_be32(phb->lsi_table[lsi_num].irq);
+ irqmap[6] = cpu_to_be32(0x8);
+ }
+ }
+ /* Write interrupt map */
+ _FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
+ sizeof(interrupt_map)));
+
+ tcet = spapr_tce_find_by_liobn(SPAPR_PCI_LIOBN(phb->index, 0));
+ spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
+ tcet->liobn, tcet->bus_offset,
+ tcet->nb_table << tcet->page_shift);
+
+ /* Walk the bridges and program the bus numbers*/
+ spapr_phb_pci_enumerate(phb);
+ _FDT(fdt_setprop_cell(fdt, bus_off, "qemu,phb-enumerated", 0x1));
+
+ /* Populate tree nodes with PCI devices attached */
+ s_fdt.fdt = fdt;
+ s_fdt.node_off = bus_off;
+ s_fdt.sphb = phb;
+ pci_for_each_device(bus, pci_bus_num(bus),
+ spapr_populate_pci_devices_dt,
+ &s_fdt);
+
+ ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
+ SPAPR_DR_CONNECTOR_TYPE_PCI);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}
+
+void spapr_pci_rtas_init(void)
+{
+ spapr_rtas_register(RTAS_READ_PCI_CONFIG, "read-pci-config",
+ rtas_read_pci_config);
+ spapr_rtas_register(RTAS_WRITE_PCI_CONFIG, "write-pci-config",
+ rtas_write_pci_config);
+ spapr_rtas_register(RTAS_IBM_READ_PCI_CONFIG, "ibm,read-pci-config",
+ rtas_ibm_read_pci_config);
+ spapr_rtas_register(RTAS_IBM_WRITE_PCI_CONFIG, "ibm,write-pci-config",
+ rtas_ibm_write_pci_config);
+ if (msi_supported) {
+ spapr_rtas_register(RTAS_IBM_QUERY_INTERRUPT_SOURCE_NUMBER,
+ "ibm,query-interrupt-source-number",
+ rtas_ibm_query_interrupt_source_number);
+ spapr_rtas_register(RTAS_IBM_CHANGE_MSI, "ibm,change-msi",
+ rtas_ibm_change_msi);
+ }
+
+ spapr_rtas_register(RTAS_IBM_SET_EEH_OPTION,
+ "ibm,set-eeh-option",
+ rtas_ibm_set_eeh_option);
+ spapr_rtas_register(RTAS_IBM_GET_CONFIG_ADDR_INFO2,
+ "ibm,get-config-addr-info2",
+ rtas_ibm_get_config_addr_info2);
+ spapr_rtas_register(RTAS_IBM_READ_SLOT_RESET_STATE2,
+ "ibm,read-slot-reset-state2",
+ rtas_ibm_read_slot_reset_state2);
+ spapr_rtas_register(RTAS_IBM_SET_SLOT_RESET,
+ "ibm,set-slot-reset",
+ rtas_ibm_set_slot_reset);
+ spapr_rtas_register(RTAS_IBM_CONFIGURE_PE,
+ "ibm,configure-pe",
+ rtas_ibm_configure_pe);
+ spapr_rtas_register(RTAS_IBM_SLOT_ERROR_DETAIL,
+ "ibm,slot-error-detail",
+ rtas_ibm_slot_error_detail);
+}
+
+static void spapr_pci_register_types(void)
+{
+ type_register_static(&spapr_phb_info);
+}
+
+type_init(spapr_pci_register_types)
+
+static int spapr_switch_one_vga(DeviceState *dev, void *opaque)
+{
+ bool be = *(bool *)opaque;
+
+ if (object_dynamic_cast(OBJECT(dev), "VGA")
+ || object_dynamic_cast(OBJECT(dev), "secondary-vga")) {
+ object_property_set_bool(OBJECT(dev), be, "big-endian-framebuffer",
+ &error_abort);
+ }
+ return 0;
+}
+
+void spapr_pci_switch_vga(bool big_endian)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ sPAPRPHBState *sphb;
+
+ /*
+ * For backward compatibility with existing guests, we switch
+ * the endianness of the VGA controller when changing the guest
+ * interrupt mode
+ */
+ QLIST_FOREACH(sphb, &spapr->phbs, list) {
+ BusState *bus = &PCI_HOST_BRIDGE(sphb)->bus->qbus;
+ qbus_walk_children(bus, spapr_switch_one_vga, NULL, NULL, NULL,
+ &big_endian);
+ }
+}
diff --git a/src/hw/ppc/spapr_pci_vfio.c b/src/hw/ppc/spapr_pci_vfio.c
new file mode 100644
index 0000000..a61b418
--- /dev/null
+++ b/src/hw/ppc/spapr_pci_vfio.c
@@ -0,0 +1,280 @@
+/*
+ * QEMU sPAPR PCI host for VFIO
+ *
+ * Copyright (c) 2011-2014 Alexey Kardashevskiy, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/ppc/spapr.h"
+#include "hw/pci-host/spapr.h"
+#include "hw/pci/msix.h"
+#include "linux/vfio.h"
+#include "hw/vfio/vfio.h"
+
+static Property spapr_phb_vfio_properties[] = {
+ DEFINE_PROP_INT32("iommu", sPAPRPHBVFIOState, iommugroupid, -1),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void spapr_phb_vfio_finish_realize(sPAPRPHBState *sphb, Error **errp)
+{
+ sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
+ struct vfio_iommu_spapr_tce_info info = { .argsz = sizeof(info) };
+ int ret;
+ sPAPRTCETable *tcet;
+ uint32_t liobn = svphb->phb.dma_liobn;
+
+ if (svphb->iommugroupid == -1) {
+ error_setg(errp, "Wrong IOMMU group ID %d", svphb->iommugroupid);
+ return;
+ }
+
+ ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
+ VFIO_CHECK_EXTENSION,
+ (void *) VFIO_SPAPR_TCE_IOMMU);
+ if (ret != 1) {
+ error_setg_errno(errp, -ret,
+ "spapr-vfio: SPAPR extension is not supported");
+ return;
+ }
+
+ ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
+ VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
+ if (ret) {
+ error_setg_errno(errp, -ret,
+ "spapr-vfio: get info from container failed");
+ return;
+ }
+
+ tcet = spapr_tce_new_table(DEVICE(sphb), liobn, info.dma32_window_start,
+ SPAPR_TCE_PAGE_SHIFT,
+ info.dma32_window_size >> SPAPR_TCE_PAGE_SHIFT,
+ true);
+ if (!tcet) {
+ error_setg(errp, "spapr-vfio: failed to create VFIO TCE table");
+ return;
+ }
+
+ /* Register default 32bit DMA window */
+ memory_region_add_subregion(&sphb->iommu_root, tcet->bus_offset,
+ spapr_tce_get_iommu(tcet));
+}
+
+static void spapr_phb_vfio_eeh_reenable(sPAPRPHBVFIOState *svphb)
+{
+ struct vfio_eeh_pe_op op = {
+ .argsz = sizeof(op),
+ .op = VFIO_EEH_PE_ENABLE
+ };
+
+ vfio_container_ioctl(&svphb->phb.iommu_as,
+ svphb->iommugroupid, VFIO_EEH_PE_OP, &op);
+}
+
+static void spapr_phb_vfio_reset(DeviceState *qdev)
+{
+ /*
+ * The PE might be in frozen state. To reenable the EEH
+ * functionality on it will clean the frozen state, which
+ * ensures that the contained PCI devices will work properly
+ * after reboot.
+ */
+ spapr_phb_vfio_eeh_reenable(SPAPR_PCI_VFIO_HOST_BRIDGE(qdev));
+}
+
+static int spapr_phb_vfio_eeh_set_option(sPAPRPHBState *sphb,
+ unsigned int addr, int option)
+{
+ sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
+ struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
+ int ret;
+
+ switch (option) {
+ case RTAS_EEH_DISABLE:
+ op.op = VFIO_EEH_PE_DISABLE;
+ break;
+ case RTAS_EEH_ENABLE: {
+ PCIHostState *phb;
+ PCIDevice *pdev;
+
+ /*
+ * The EEH functionality is enabled on basis of PCI device,
+ * instead of PE. We need check the validity of the PCI
+ * device address.
+ */
+ phb = PCI_HOST_BRIDGE(sphb);
+ pdev = pci_find_device(phb->bus,
+ (addr >> 16) & 0xFF, (addr >> 8) & 0xFF);
+ if (!pdev || !object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
+ return RTAS_OUT_PARAM_ERROR;
+ }
+
+ op.op = VFIO_EEH_PE_ENABLE;
+ break;
+ }
+ case RTAS_EEH_THAW_IO:
+ op.op = VFIO_EEH_PE_UNFREEZE_IO;
+ break;
+ case RTAS_EEH_THAW_DMA:
+ op.op = VFIO_EEH_PE_UNFREEZE_DMA;
+ break;
+ default:
+ return RTAS_OUT_PARAM_ERROR;
+ }
+
+ ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
+ VFIO_EEH_PE_OP, &op);
+ if (ret < 0) {
+ return RTAS_OUT_HW_ERROR;
+ }
+
+ return RTAS_OUT_SUCCESS;
+}
+
+static int spapr_phb_vfio_eeh_get_state(sPAPRPHBState *sphb, int *state)
+{
+ sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
+ struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
+ int ret;
+
+ op.op = VFIO_EEH_PE_GET_STATE;
+ ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
+ VFIO_EEH_PE_OP, &op);
+ if (ret < 0) {
+ return RTAS_OUT_PARAM_ERROR;
+ }
+
+ *state = ret;
+ return RTAS_OUT_SUCCESS;
+}
+
+static void spapr_phb_vfio_eeh_clear_dev_msix(PCIBus *bus,
+ PCIDevice *pdev,
+ void *opaque)
+{
+ /* Check if the device is VFIO PCI device */
+ if (!object_dynamic_cast(OBJECT(pdev), "vfio-pci")) {
+ return;
+ }
+
+ /*
+ * The MSIx table will be cleaned out by reset. We need
+ * disable it so that it can be reenabled properly. Also,
+ * the cached MSIx table should be cleared as it's not
+ * reflecting the contents in hardware.
+ */
+ if (msix_enabled(pdev)) {
+ uint16_t flags;
+
+ flags = pci_host_config_read_common(pdev,
+ pdev->msix_cap + PCI_MSIX_FLAGS,
+ pci_config_size(pdev), 2);
+ flags &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_host_config_write_common(pdev,
+ pdev->msix_cap + PCI_MSIX_FLAGS,
+ pci_config_size(pdev), flags, 2);
+ }
+
+ msix_reset(pdev);
+}
+
+static void spapr_phb_vfio_eeh_clear_bus_msix(PCIBus *bus, void *opaque)
+{
+ pci_for_each_device(bus, pci_bus_num(bus),
+ spapr_phb_vfio_eeh_clear_dev_msix, NULL);
+}
+
+static void spapr_phb_vfio_eeh_pre_reset(sPAPRPHBState *sphb)
+{
+ PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
+
+ pci_for_each_bus(phb->bus, spapr_phb_vfio_eeh_clear_bus_msix, NULL);
+}
+
+static int spapr_phb_vfio_eeh_reset(sPAPRPHBState *sphb, int option)
+{
+ sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
+ struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
+ int ret;
+
+ switch (option) {
+ case RTAS_SLOT_RESET_DEACTIVATE:
+ op.op = VFIO_EEH_PE_RESET_DEACTIVATE;
+ break;
+ case RTAS_SLOT_RESET_HOT:
+ spapr_phb_vfio_eeh_pre_reset(sphb);
+ op.op = VFIO_EEH_PE_RESET_HOT;
+ break;
+ case RTAS_SLOT_RESET_FUNDAMENTAL:
+ spapr_phb_vfio_eeh_pre_reset(sphb);
+ op.op = VFIO_EEH_PE_RESET_FUNDAMENTAL;
+ break;
+ default:
+ return RTAS_OUT_PARAM_ERROR;
+ }
+
+ ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
+ VFIO_EEH_PE_OP, &op);
+ if (ret < 0) {
+ return RTAS_OUT_HW_ERROR;
+ }
+
+ return RTAS_OUT_SUCCESS;
+}
+
+static int spapr_phb_vfio_eeh_configure(sPAPRPHBState *sphb)
+{
+ sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb);
+ struct vfio_eeh_pe_op op = { .argsz = sizeof(op) };
+ int ret;
+
+ op.op = VFIO_EEH_PE_CONFIGURE;
+ ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid,
+ VFIO_EEH_PE_OP, &op);
+ if (ret < 0) {
+ return RTAS_OUT_PARAM_ERROR;
+ }
+
+ return RTAS_OUT_SUCCESS;
+}
+
+static void spapr_phb_vfio_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
+
+ dc->props = spapr_phb_vfio_properties;
+ dc->reset = spapr_phb_vfio_reset;
+ spc->finish_realize = spapr_phb_vfio_finish_realize;
+ spc->eeh_set_option = spapr_phb_vfio_eeh_set_option;
+ spc->eeh_get_state = spapr_phb_vfio_eeh_get_state;
+ spc->eeh_reset = spapr_phb_vfio_eeh_reset;
+ spc->eeh_configure = spapr_phb_vfio_eeh_configure;
+}
+
+static const TypeInfo spapr_phb_vfio_info = {
+ .name = TYPE_SPAPR_PCI_VFIO_HOST_BRIDGE,
+ .parent = TYPE_SPAPR_PCI_HOST_BRIDGE,
+ .instance_size = sizeof(sPAPRPHBVFIOState),
+ .class_init = spapr_phb_vfio_class_init,
+ .class_size = sizeof(sPAPRPHBClass),
+};
+
+static void spapr_pci_vfio_register_types(void)
+{
+ type_register_static(&spapr_phb_vfio_info);
+}
+
+type_init(spapr_pci_vfio_register_types)
diff --git a/src/hw/ppc/spapr_rng.c b/src/hw/ppc/spapr_rng.c
new file mode 100644
index 0000000..ed43d5e
--- /dev/null
+++ b/src/hw/ppc/spapr_rng.c
@@ -0,0 +1,186 @@
+/*
+ * QEMU sPAPR random number generator "device" for H_RANDOM hypercall
+ *
+ * Copyright 2015 Thomas Huth, Red Hat Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/error-report.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/device_tree.h"
+#include "sysemu/rng.h"
+#include "hw/ppc/spapr.h"
+#include "kvm_ppc.h"
+
+#define SPAPR_RNG(obj) \
+ OBJECT_CHECK(sPAPRRngState, (obj), TYPE_SPAPR_RNG)
+
+struct sPAPRRngState {
+ /*< private >*/
+ DeviceState ds;
+ RngBackend *backend;
+ bool use_kvm;
+};
+typedef struct sPAPRRngState sPAPRRngState;
+
+struct HRandomData {
+ QemuSemaphore sem;
+ union {
+ uint64_t v64;
+ uint8_t v8[8];
+ } val;
+ int received;
+};
+typedef struct HRandomData HRandomData;
+
+/* Callback function for the RngBackend */
+static void random_recv(void *dest, const void *src, size_t size)
+{
+ HRandomData *hrdp = dest;
+
+ if (src && size > 0) {
+ assert(size + hrdp->received <= sizeof(hrdp->val.v8));
+ memcpy(&hrdp->val.v8[hrdp->received], src, size);
+ hrdp->received += size;
+ }
+
+ qemu_sem_post(&hrdp->sem);
+}
+
+/* Handler for the H_RANDOM hypercall */
+static target_ulong h_random(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ sPAPRRngState *rngstate;
+ HRandomData hrdata;
+
+ rngstate = SPAPR_RNG(object_resolve_path_type("", TYPE_SPAPR_RNG, NULL));
+
+ if (!rngstate || !rngstate->backend) {
+ return H_HARDWARE;
+ }
+
+ qemu_sem_init(&hrdata.sem, 0);
+ hrdata.val.v64 = 0;
+ hrdata.received = 0;
+
+ qemu_mutex_unlock_iothread();
+ while (hrdata.received < 8) {
+ rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received,
+ random_recv, &hrdata);
+ qemu_sem_wait(&hrdata.sem);
+ }
+ qemu_mutex_lock_iothread();
+
+ qemu_sem_destroy(&hrdata.sem);
+ args[0] = hrdata.val.v64;
+
+ return H_SUCCESS;
+}
+
+static void spapr_rng_instance_init(Object *obj)
+{
+ sPAPRRngState *rngstate = SPAPR_RNG(obj);
+
+ if (object_resolve_path_type("", TYPE_SPAPR_RNG, NULL) != NULL) {
+ error_report("spapr-rng can not be instantiated twice!");
+ return;
+ }
+
+ object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
+ (Object **)&rngstate->backend,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
+ object_property_set_description(obj, "rng",
+ "ID of the random number generator backend",
+ NULL);
+}
+
+static void spapr_rng_realize(DeviceState *dev, Error **errp)
+{
+
+ sPAPRRngState *rngstate = SPAPR_RNG(dev);
+
+ if (rngstate->use_kvm) {
+ if (kvmppc_enable_hwrng() == 0) {
+ return;
+ }
+ /*
+ * If user specified both, use-kvm and a backend, we fall back to
+ * the backend now. If not, provide an appropriate error message.
+ */
+ if (!rngstate->backend) {
+ error_setg(errp, "Could not initialize in-kernel H_RANDOM call!");
+ return;
+ }
+ }
+
+ if (rngstate->backend) {
+ spapr_register_hypercall(H_RANDOM, h_random);
+ } else {
+ error_setg(errp, "spapr-rng needs an RNG backend!");
+ }
+}
+
+int spapr_rng_populate_dt(void *fdt)
+{
+ int node;
+ int ret;
+
+ node = qemu_fdt_add_subnode(fdt, "/ibm,platform-facilities");
+ if (node <= 0) {
+ return -1;
+ }
+ ret = fdt_setprop_string(fdt, node, "device_type",
+ "ibm,platform-facilities");
+ ret |= fdt_setprop_cell(fdt, node, "#address-cells", 0x1);
+ ret |= fdt_setprop_cell(fdt, node, "#size-cells", 0x0);
+
+ node = fdt_add_subnode(fdt, node, "ibm,random-v1");
+ if (node <= 0) {
+ return -1;
+ }
+ ret |= fdt_setprop_string(fdt, node, "compatible", "ibm,random");
+
+ return ret ? -1 : 0;
+}
+
+static Property spapr_rng_properties[] = {
+ DEFINE_PROP_BOOL("use-kvm", sPAPRRngState, use_kvm, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void spapr_rng_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = spapr_rng_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->props = spapr_rng_properties;
+}
+
+static const TypeInfo spapr_rng_info = {
+ .name = TYPE_SPAPR_RNG,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(sPAPRRngState),
+ .instance_init = spapr_rng_instance_init,
+ .class_init = spapr_rng_class_init,
+};
+
+static void spapr_rng_register_type(void)
+{
+ type_register_static(&spapr_rng_info);
+}
+type_init(spapr_rng_register_type)
diff --git a/src/hw/ppc/spapr_rtas.c b/src/hw/ppc/spapr_rtas.c
new file mode 100644
index 0000000..34b12a3
--- /dev/null
+++ b/src/hw/ppc/spapr_rtas.c
@@ -0,0 +1,769 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
+ *
+ * Hypercall based emulated RTAS
+ *
+ * Copyright (c) 2010-2011 David Gibson, IBM Corporation.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+#include "cpu.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/char.h"
+#include "hw/qdev.h"
+#include "sysemu/device_tree.h"
+#include "sysemu/cpus.h"
+
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_vio.h"
+#include "qapi-event.h"
+#include "hw/boards.h"
+
+#include <libfdt.h>
+#include "hw/ppc/spapr_drc.h"
+
+/* #define DEBUG_SPAPR */
+
+#ifdef DEBUG_SPAPR
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+static sPAPRConfigureConnectorState *spapr_ccs_find(sPAPRMachineState *spapr,
+ uint32_t drc_index)
+{
+ sPAPRConfigureConnectorState *ccs = NULL;
+
+ QTAILQ_FOREACH(ccs, &spapr->ccs_list, next) {
+ if (ccs->drc_index == drc_index) {
+ break;
+ }
+ }
+
+ return ccs;
+}
+
+static void spapr_ccs_add(sPAPRMachineState *spapr,
+ sPAPRConfigureConnectorState *ccs)
+{
+ g_assert(!spapr_ccs_find(spapr, ccs->drc_index));
+ QTAILQ_INSERT_HEAD(&spapr->ccs_list, ccs, next);
+}
+
+static void spapr_ccs_remove(sPAPRMachineState *spapr,
+ sPAPRConfigureConnectorState *ccs)
+{
+ QTAILQ_REMOVE(&spapr->ccs_list, ccs, next);
+ g_free(ccs);
+}
+
+void spapr_ccs_reset_hook(void *opaque)
+{
+ sPAPRMachineState *spapr = opaque;
+ sPAPRConfigureConnectorState *ccs, *ccs_tmp;
+
+ QTAILQ_FOREACH_SAFE(ccs, &spapr->ccs_list, next, ccs_tmp) {
+ spapr_ccs_remove(spapr, ccs);
+ }
+}
+
+static void rtas_display_character(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ uint8_t c = rtas_ld(args, 0);
+ VIOsPAPRDevice *sdev = vty_lookup(spapr, 0);
+
+ if (!sdev) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ } else {
+ vty_putchars(sdev, &c, sizeof(c));
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ }
+}
+
+static void rtas_power_off(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ if (nargs != 2 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ qemu_system_shutdown_request();
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void rtas_system_reboot(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ if (nargs != 0 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ qemu_system_reset_request();
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ target_ulong id;
+ PowerPCCPU *cpu;
+
+ if (nargs != 1 || nret != 2) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ id = rtas_ld(args, 0);
+ cpu = ppc_get_vcpu_by_dt_id(id);
+ if (cpu != NULL) {
+ if (CPU(cpu)->halted) {
+ rtas_st(rets, 1, 0);
+ } else {
+ rtas_st(rets, 1, 2);
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ return;
+ }
+
+ /* Didn't find a matching cpu */
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ target_ulong id, start, r3;
+ PowerPCCPU *cpu;
+
+ if (nargs != 3 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ id = rtas_ld(args, 0);
+ start = rtas_ld(args, 1);
+ r3 = rtas_ld(args, 2);
+
+ cpu = ppc_get_vcpu_by_dt_id(id);
+ if (cpu != NULL) {
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ if (!cs->halted) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ /* This will make sure qemu state is up to date with kvm, and
+ * mark it dirty so our changes get flushed back before the
+ * new cpu enters */
+ kvm_cpu_synchronize_state(cs);
+
+ env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME);
+ env->nip = start;
+ env->gpr[3] = r3;
+ cs->halted = 0;
+
+ qemu_cpu_kick(cs);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ return;
+ }
+
+ /* Didn't find a matching cpu */
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+}
+
+static void rtas_stop_self(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ cs->halted = 1;
+ qemu_cpu_kick(cs);
+ /*
+ * While stopping a CPU, the guest calls H_CPPR which
+ * effectively disables interrupts on XICS level.
+ * However decrementer interrupts in TCG can still
+ * wake the CPU up so here we disable interrupts in MSR
+ * as well.
+ * As rtas_start_cpu() resets the whole MSR anyway, there is
+ * no need to bother with specific bits, we just clear it.
+ */
+ env->msr = 0;
+}
+
+static void rtas_ibm_get_system_parameter(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ target_ulong parameter = rtas_ld(args, 0);
+ target_ulong buffer = rtas_ld(args, 1);
+ target_ulong length = rtas_ld(args, 2);
+ target_ulong ret = RTAS_OUT_SUCCESS;
+
+ switch (parameter) {
+ case RTAS_SYSPARM_SPLPAR_CHARACTERISTICS: {
+ char *param_val = g_strdup_printf("MaxEntCap=%d,"
+ "DesMem=%llu,"
+ "DesProcs=%d,"
+ "MaxPlatProcs=%d",
+ max_cpus,
+ current_machine->ram_size / M_BYTE,
+ smp_cpus,
+ max_cpus);
+ rtas_st_buffer(buffer, length, (uint8_t *)param_val, strlen(param_val));
+ g_free(param_val);
+ break;
+ }
+ case RTAS_SYSPARM_DIAGNOSTICS_RUN_MODE: {
+ uint8_t param_val = DIAGNOSTICS_RUN_MODE_DISABLED;
+
+ rtas_st_buffer(buffer, length, &param_val, sizeof(param_val));
+ break;
+ }
+ case RTAS_SYSPARM_UUID:
+ rtas_st_buffer(buffer, length, qemu_uuid, (qemu_uuid_set ? 16 : 0));
+ break;
+ default:
+ ret = RTAS_OUT_NOT_SUPPORTED;
+ }
+
+ rtas_st(rets, 0, ret);
+}
+
+static void rtas_ibm_set_system_parameter(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ target_ulong parameter = rtas_ld(args, 0);
+ target_ulong ret = RTAS_OUT_NOT_SUPPORTED;
+
+ switch (parameter) {
+ case RTAS_SYSPARM_SPLPAR_CHARACTERISTICS:
+ case RTAS_SYSPARM_DIAGNOSTICS_RUN_MODE:
+ case RTAS_SYSPARM_UUID:
+ ret = RTAS_OUT_NOT_AUTHORIZED;
+ break;
+ }
+
+ rtas_st(rets, 0, ret);
+}
+
+static void rtas_ibm_os_term(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ target_ulong ret = 0;
+
+ qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE, &error_abort);
+
+ rtas_st(rets, 0, ret);
+}
+
+static void rtas_set_power_level(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ int32_t power_domain;
+
+ if (nargs != 2 || nret != 2) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ /* we currently only use a single, "live insert" powerdomain for
+ * hotplugged/dlpar'd resources, so the power is always live/full (100)
+ */
+ power_domain = rtas_ld(args, 0);
+ if (power_domain != -1) {
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+ return;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, 100);
+}
+
+static void rtas_get_power_level(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ int32_t power_domain;
+
+ if (nargs != 1 || nret != 2) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ /* we currently only use a single, "live insert" powerdomain for
+ * hotplugged/dlpar'd resources, so the power is always live/full (100)
+ */
+ power_domain = rtas_ld(args, 0);
+ if (power_domain != -1) {
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+ return;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, 100);
+}
+
+static bool sensor_type_is_dr(uint32_t sensor_type)
+{
+ switch (sensor_type) {
+ case RTAS_SENSOR_TYPE_ISOLATION_STATE:
+ case RTAS_SENSOR_TYPE_DR:
+ case RTAS_SENSOR_TYPE_ALLOCATION_STATE:
+ return true;
+ }
+
+ return false;
+}
+
+static void rtas_set_indicator(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint32_t sensor_type;
+ uint32_t sensor_index;
+ uint32_t sensor_state;
+ uint32_t ret = RTAS_OUT_SUCCESS;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+
+ if (nargs != 3 || nret != 1) {
+ ret = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+
+ sensor_type = rtas_ld(args, 0);
+ sensor_index = rtas_ld(args, 1);
+ sensor_state = rtas_ld(args, 2);
+
+ if (!sensor_type_is_dr(sensor_type)) {
+ goto out_unimplemented;
+ }
+
+ /* if this is a DR sensor we can assume sensor_index == drc_index */
+ drc = spapr_dr_connector_by_index(sensor_index);
+ if (!drc) {
+ DPRINTF("rtas_set_indicator: invalid sensor/DRC index: %xh\n",
+ sensor_index);
+ ret = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ switch (sensor_type) {
+ case RTAS_SENSOR_TYPE_ISOLATION_STATE:
+ /* if the guest is configuring a device attached to this
+ * DRC, we should reset the configuration state at this
+ * point since it may no longer be reliable (guest released
+ * device and needs to start over, or unplug occurred so
+ * the FDT is no longer valid)
+ */
+ if (sensor_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ sPAPRConfigureConnectorState *ccs = spapr_ccs_find(spapr,
+ sensor_index);
+ if (ccs) {
+ spapr_ccs_remove(spapr, ccs);
+ }
+ }
+ ret = drck->set_isolation_state(drc, sensor_state);
+ break;
+ case RTAS_SENSOR_TYPE_DR:
+ ret = drck->set_indicator_state(drc, sensor_state);
+ break;
+ case RTAS_SENSOR_TYPE_ALLOCATION_STATE:
+ ret = drck->set_allocation_state(drc, sensor_state);
+ break;
+ default:
+ goto out_unimplemented;
+ }
+
+out:
+ rtas_st(rets, 0, ret);
+ return;
+
+out_unimplemented:
+ /* currently only DR-related sensors are implemented */
+ DPRINTF("rtas_set_indicator: sensor/indicator not implemented: %d\n",
+ sensor_type);
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+}
+
+static void rtas_get_sensor_state(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint32_t sensor_type;
+ uint32_t sensor_index;
+ uint32_t sensor_state = 0;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ uint32_t ret = RTAS_OUT_SUCCESS;
+
+ if (nargs != 2 || nret != 2) {
+ ret = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+
+ sensor_type = rtas_ld(args, 0);
+ sensor_index = rtas_ld(args, 1);
+
+ if (sensor_type != RTAS_SENSOR_TYPE_ENTITY_SENSE) {
+ /* currently only DR-related sensors are implemented */
+ DPRINTF("rtas_get_sensor_state: sensor/indicator not implemented: %d\n",
+ sensor_type);
+ ret = RTAS_OUT_NOT_SUPPORTED;
+ goto out;
+ }
+
+ drc = spapr_dr_connector_by_index(sensor_index);
+ if (!drc) {
+ DPRINTF("rtas_get_sensor_state: invalid sensor/DRC index: %xh\n",
+ sensor_index);
+ ret = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ ret = drck->entity_sense(drc, &sensor_state);
+
+out:
+ rtas_st(rets, 0, ret);
+ rtas_st(rets, 1, sensor_state);
+}
+
+/* configure-connector work area offsets, int32_t units for field
+ * indexes, bytes for field offset/len values.
+ *
+ * as documented by PAPR+ v2.7, 13.5.3.5
+ */
+#define CC_IDX_NODE_NAME_OFFSET 2
+#define CC_IDX_PROP_NAME_OFFSET 2
+#define CC_IDX_PROP_LEN 3
+#define CC_IDX_PROP_DATA_OFFSET 4
+#define CC_VAL_DATA_OFFSET ((CC_IDX_PROP_DATA_OFFSET + 1) * 4)
+#define CC_WA_LEN 4096
+
+static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
+ sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint64_t wa_addr;
+ uint64_t wa_offset;
+ uint32_t drc_index;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ sPAPRConfigureConnectorState *ccs;
+ sPAPRDRCCResponse resp = SPAPR_DR_CC_RESPONSE_CONTINUE;
+ int rc;
+ const void *fdt;
+
+ if (nargs != 2 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ wa_addr = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 0);
+
+ drc_index = rtas_ld(wa_addr, 0);
+ drc = spapr_dr_connector_by_index(drc_index);
+ if (!drc) {
+ DPRINTF("rtas_ibm_configure_connector: invalid DRC index: %xh\n",
+ drc_index);
+ rc = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ fdt = drck->get_fdt(drc, NULL);
+ if (!fdt) {
+ DPRINTF("rtas_ibm_configure_connector: Missing FDT for DRC index: %xh\n",
+ drc_index);
+ rc = SPAPR_DR_CC_RESPONSE_NOT_CONFIGURABLE;
+ goto out;
+ }
+
+ ccs = spapr_ccs_find(spapr, drc_index);
+ if (!ccs) {
+ ccs = g_new0(sPAPRConfigureConnectorState, 1);
+ (void)drck->get_fdt(drc, &ccs->fdt_offset);
+ ccs->drc_index = drc_index;
+ spapr_ccs_add(spapr, ccs);
+ }
+
+ do {
+ uint32_t tag;
+ const char *name;
+ const struct fdt_property *prop;
+ int fdt_offset_next, prop_len;
+
+ tag = fdt_next_tag(fdt, ccs->fdt_offset, &fdt_offset_next);
+
+ switch (tag) {
+ case FDT_BEGIN_NODE:
+ ccs->fdt_depth++;
+ name = fdt_get_name(fdt, ccs->fdt_offset, NULL);
+
+ /* provide the name of the next OF node */
+ wa_offset = CC_VAL_DATA_OFFSET;
+ rtas_st(wa_addr, CC_IDX_NODE_NAME_OFFSET, wa_offset);
+ rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
+ (uint8_t *)name, strlen(name) + 1);
+ resp = SPAPR_DR_CC_RESPONSE_NEXT_CHILD;
+ break;
+ case FDT_END_NODE:
+ ccs->fdt_depth--;
+ if (ccs->fdt_depth == 0) {
+ /* done sending the device tree, don't need to track
+ * the state anymore
+ */
+ drck->set_configured(drc);
+ spapr_ccs_remove(spapr, ccs);
+ ccs = NULL;
+ resp = SPAPR_DR_CC_RESPONSE_SUCCESS;
+ } else {
+ resp = SPAPR_DR_CC_RESPONSE_PREV_PARENT;
+ }
+ break;
+ case FDT_PROP:
+ prop = fdt_get_property_by_offset(fdt, ccs->fdt_offset,
+ &prop_len);
+ name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+
+ /* provide the name of the next OF property */
+ wa_offset = CC_VAL_DATA_OFFSET;
+ rtas_st(wa_addr, CC_IDX_PROP_NAME_OFFSET, wa_offset);
+ rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
+ (uint8_t *)name, strlen(name) + 1);
+
+ /* provide the length and value of the OF property. data gets
+ * placed immediately after NULL terminator of the OF property's
+ * name string
+ */
+ wa_offset += strlen(name) + 1,
+ rtas_st(wa_addr, CC_IDX_PROP_LEN, prop_len);
+ rtas_st(wa_addr, CC_IDX_PROP_DATA_OFFSET, wa_offset);
+ rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
+ (uint8_t *)((struct fdt_property *)prop)->data,
+ prop_len);
+ resp = SPAPR_DR_CC_RESPONSE_NEXT_PROPERTY;
+ break;
+ case FDT_END:
+ resp = SPAPR_DR_CC_RESPONSE_ERROR;
+ default:
+ /* keep seeking for an actionable tag */
+ break;
+ }
+ if (ccs) {
+ ccs->fdt_offset = fdt_offset_next;
+ }
+ } while (resp == SPAPR_DR_CC_RESPONSE_CONTINUE);
+
+ rc = resp;
+out:
+ rtas_st(rets, 0, rc);
+}
+
+static struct rtas_call {
+ const char *name;
+ spapr_rtas_fn fn;
+} rtas_table[RTAS_TOKEN_MAX - RTAS_TOKEN_BASE];
+
+target_ulong spapr_rtas_call(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ if ((token >= RTAS_TOKEN_BASE) && (token < RTAS_TOKEN_MAX)) {
+ struct rtas_call *call = rtas_table + (token - RTAS_TOKEN_BASE);
+
+ if (call->fn) {
+ call->fn(cpu, spapr, token, nargs, args, nret, rets);
+ return H_SUCCESS;
+ }
+ }
+
+ /* HACK: Some Linux early debug code uses RTAS display-character,
+ * but assumes the token value is 0xa (which it is on some real
+ * machines) without looking it up in the device tree. This
+ * special case makes this work */
+ if (token == 0xa) {
+ rtas_display_character(cpu, spapr, 0xa, nargs, args, nret, rets);
+ return H_SUCCESS;
+ }
+
+ hcall_dprintf("Unknown RTAS token 0x%x\n", token);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return H_PARAMETER;
+}
+
+void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn)
+{
+ if (!((token >= RTAS_TOKEN_BASE) && (token < RTAS_TOKEN_MAX))) {
+ fprintf(stderr, "RTAS invalid token 0x%x\n", token);
+ exit(1);
+ }
+
+ token -= RTAS_TOKEN_BASE;
+ if (rtas_table[token].name) {
+ fprintf(stderr, "RTAS call \"%s\" is registered already as 0x%x\n",
+ rtas_table[token].name, token);
+ exit(1);
+ }
+
+ rtas_table[token].name = name;
+ rtas_table[token].fn = fn;
+}
+
+int spapr_rtas_device_tree_setup(void *fdt, hwaddr rtas_addr,
+ hwaddr rtas_size)
+{
+ int ret;
+ int i;
+ uint32_t lrdr_capacity[5];
+ MachineState *machine = MACHINE(qdev_get_machine());
+
+ ret = fdt_add_mem_rsv(fdt, rtas_addr, rtas_size);
+ if (ret < 0) {
+ fprintf(stderr, "Couldn't add RTAS reserve entry: %s\n",
+ fdt_strerror(ret));
+ return ret;
+ }
+
+ ret = qemu_fdt_setprop_cell(fdt, "/rtas", "linux,rtas-base",
+ rtas_addr);
+ if (ret < 0) {
+ fprintf(stderr, "Couldn't add linux,rtas-base property: %s\n",
+ fdt_strerror(ret));
+ return ret;
+ }
+
+ ret = qemu_fdt_setprop_cell(fdt, "/rtas", "linux,rtas-entry",
+ rtas_addr);
+ if (ret < 0) {
+ fprintf(stderr, "Couldn't add linux,rtas-entry property: %s\n",
+ fdt_strerror(ret));
+ return ret;
+ }
+
+ ret = qemu_fdt_setprop_cell(fdt, "/rtas", "rtas-size",
+ rtas_size);
+ if (ret < 0) {
+ fprintf(stderr, "Couldn't add rtas-size property: %s\n",
+ fdt_strerror(ret));
+ return ret;
+ }
+
+ for (i = 0; i < RTAS_TOKEN_MAX - RTAS_TOKEN_BASE; i++) {
+ struct rtas_call *call = &rtas_table[i];
+
+ if (!call->name) {
+ continue;
+ }
+
+ ret = qemu_fdt_setprop_cell(fdt, "/rtas", call->name,
+ i + RTAS_TOKEN_BASE);
+ if (ret < 0) {
+ fprintf(stderr, "Couldn't add rtas token for %s: %s\n",
+ call->name, fdt_strerror(ret));
+ return ret;
+ }
+
+ }
+
+ lrdr_capacity[0] = cpu_to_be32(((uint64_t)machine->maxram_size) >> 32);
+ lrdr_capacity[1] = cpu_to_be32(machine->maxram_size & 0xffffffff);
+ lrdr_capacity[2] = 0;
+ lrdr_capacity[3] = cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE);
+ lrdr_capacity[4] = cpu_to_be32(max_cpus/smp_threads);
+ ret = qemu_fdt_setprop(fdt, "/rtas", "ibm,lrdr-capacity", lrdr_capacity,
+ sizeof(lrdr_capacity));
+ if (ret < 0) {
+ fprintf(stderr, "Couldn't add ibm,lrdr-capacity rtas property\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void core_rtas_register_types(void)
+{
+ spapr_rtas_register(RTAS_DISPLAY_CHARACTER, "display-character",
+ rtas_display_character);
+ spapr_rtas_register(RTAS_POWER_OFF, "power-off", rtas_power_off);
+ spapr_rtas_register(RTAS_SYSTEM_REBOOT, "system-reboot",
+ rtas_system_reboot);
+ spapr_rtas_register(RTAS_QUERY_CPU_STOPPED_STATE, "query-cpu-stopped-state",
+ rtas_query_cpu_stopped_state);
+ spapr_rtas_register(RTAS_START_CPU, "start-cpu", rtas_start_cpu);
+ spapr_rtas_register(RTAS_STOP_SELF, "stop-self", rtas_stop_self);
+ spapr_rtas_register(RTAS_IBM_GET_SYSTEM_PARAMETER,
+ "ibm,get-system-parameter",
+ rtas_ibm_get_system_parameter);
+ spapr_rtas_register(RTAS_IBM_SET_SYSTEM_PARAMETER,
+ "ibm,set-system-parameter",
+ rtas_ibm_set_system_parameter);
+ spapr_rtas_register(RTAS_IBM_OS_TERM, "ibm,os-term",
+ rtas_ibm_os_term);
+ spapr_rtas_register(RTAS_SET_POWER_LEVEL, "set-power-level",
+ rtas_set_power_level);
+ spapr_rtas_register(RTAS_GET_POWER_LEVEL, "get-power-level",
+ rtas_get_power_level);
+ spapr_rtas_register(RTAS_SET_INDICATOR, "set-indicator",
+ rtas_set_indicator);
+ spapr_rtas_register(RTAS_GET_SENSOR_STATE, "get-sensor-state",
+ rtas_get_sensor_state);
+ spapr_rtas_register(RTAS_IBM_CONFIGURE_CONNECTOR, "ibm,configure-connector",
+ rtas_ibm_configure_connector);
+}
+
+type_init(core_rtas_register_types)
diff --git a/src/hw/ppc/spapr_rtc.c b/src/hw/ppc/spapr_rtc.c
new file mode 100644
index 0000000..34b27db
--- /dev/null
+++ b/src/hw/ppc/spapr_rtc.c
@@ -0,0 +1,211 @@
+/*
+ * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
+ *
+ * RTAS Real Time Clock
+ *
+ * Copyright (c) 2010-2011 David Gibson, IBM Corporation.
+ * Copyright 2014 David Gibson, Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+#include "cpu.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "hw/ppc/spapr.h"
+#include "qapi-event.h"
+
+#define SPAPR_RTC(obj) \
+ OBJECT_CHECK(sPAPRRTCState, (obj), TYPE_SPAPR_RTC)
+
+typedef struct sPAPRRTCState sPAPRRTCState;
+struct sPAPRRTCState {
+ /*< private >*/
+ SysBusDevice parent_obj;
+ int64_t ns_offset;
+};
+
+void spapr_rtc_read(DeviceState *dev, struct tm *tm, uint32_t *ns)
+{
+ sPAPRRTCState *rtc = SPAPR_RTC(dev);
+ int64_t host_ns = qemu_clock_get_ns(rtc_clock);
+ int64_t guest_ns;
+ time_t guest_s;
+
+ assert(rtc);
+
+ guest_ns = host_ns + rtc->ns_offset;
+ guest_s = guest_ns / NANOSECONDS_PER_SECOND;
+
+ if (tm) {
+ gmtime_r(&guest_s, tm);
+ }
+ if (ns) {
+ *ns = guest_ns;
+ }
+}
+
+int spapr_rtc_import_offset(DeviceState *dev, int64_t legacy_offset)
+{
+ sPAPRRTCState *rtc;
+
+ if (!dev) {
+ return -ENODEV;
+ }
+
+ rtc = SPAPR_RTC(dev);
+
+ rtc->ns_offset = legacy_offset * NANOSECONDS_PER_SECOND;
+
+ return 0;
+}
+
+static void rtas_get_time_of_day(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ struct tm tm;
+ uint32_t ns;
+
+ if ((nargs != 0) || (nret != 8)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ if (!spapr->rtc) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ spapr_rtc_read(spapr->rtc, &tm, &ns);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, tm.tm_year + 1900);
+ rtas_st(rets, 2, tm.tm_mon + 1);
+ rtas_st(rets, 3, tm.tm_mday);
+ rtas_st(rets, 4, tm.tm_hour);
+ rtas_st(rets, 5, tm.tm_min);
+ rtas_st(rets, 6, tm.tm_sec);
+ rtas_st(rets, 7, ns);
+}
+
+static void rtas_set_time_of_day(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ sPAPRRTCState *rtc;
+ struct tm tm;
+ time_t new_s;
+ int64_t host_ns;
+
+ if ((nargs != 7) || (nret != 1)) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ if (!spapr->rtc) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
+
+ tm.tm_year = rtas_ld(args, 0) - 1900;
+ tm.tm_mon = rtas_ld(args, 1) - 1;
+ tm.tm_mday = rtas_ld(args, 2);
+ tm.tm_hour = rtas_ld(args, 3);
+ tm.tm_min = rtas_ld(args, 4);
+ tm.tm_sec = rtas_ld(args, 5);
+
+ new_s = mktimegm(&tm);
+ if (new_s == -1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ /* Generate a monitor event for the change */
+ qapi_event_send_rtc_change(qemu_timedate_diff(&tm), &error_abort);
+
+ rtc = SPAPR_RTC(spapr->rtc);
+
+ host_ns = qemu_clock_get_ns(rtc_clock);
+
+ rtc->ns_offset = (new_s * NANOSECONDS_PER_SECOND) - host_ns;
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void spapr_rtc_qom_date(Object *obj, struct tm *current_tm, Error **errp)
+{
+ spapr_rtc_read(DEVICE(obj), current_tm, NULL);
+}
+
+static void spapr_rtc_realize(DeviceState *dev, Error **errp)
+{
+ sPAPRRTCState *rtc = SPAPR_RTC(dev);
+ struct tm tm;
+ time_t host_s;
+ int64_t rtc_ns;
+
+ /* Initialize the RTAS RTC from host time */
+
+ qemu_get_timedate(&tm, 0);
+ host_s = mktimegm(&tm);
+ rtc_ns = qemu_clock_get_ns(rtc_clock);
+ rtc->ns_offset = host_s * NANOSECONDS_PER_SECOND - rtc_ns;
+
+ object_property_add_tm(OBJECT(rtc), "date", spapr_rtc_qom_date, NULL);
+}
+
+static const VMStateDescription vmstate_spapr_rtc = {
+ .name = "spapr/rtc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT64(ns_offset, sPAPRRTCState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void spapr_rtc_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->realize = spapr_rtc_realize;
+ dc->vmsd = &vmstate_spapr_rtc;
+
+ spapr_rtas_register(RTAS_GET_TIME_OF_DAY, "get-time-of-day",
+ rtas_get_time_of_day);
+ spapr_rtas_register(RTAS_SET_TIME_OF_DAY, "set-time-of-day",
+ rtas_set_time_of_day);
+}
+
+static const TypeInfo spapr_rtc_info = {
+ .name = TYPE_SPAPR_RTC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(sPAPRRTCState),
+ .class_size = sizeof(XICSStateClass),
+ .class_init = spapr_rtc_class_init,
+};
+
+static void spapr_rtc_register_types(void)
+{
+ type_register_static(&spapr_rtc_info);
+}
+type_init(spapr_rtc_register_types)
diff --git a/src/hw/ppc/spapr_vio.c b/src/hw/ppc/spapr_vio.c
new file mode 100644
index 0000000..c51eb8e
--- /dev/null
+++ b/src/hw/ppc/spapr_vio.c
@@ -0,0 +1,705 @@
+/*
+ * QEMU sPAPR VIO code
+ *
+ * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ * Based on the s390 virtio bus code:
+ * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/hw.h"
+#include "sysemu/sysemu.h"
+#include "hw/boards.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "hw/sysbus.h"
+#include "sysemu/kvm.h"
+#include "sysemu/device_tree.h"
+#include "kvm_ppc.h"
+
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_vio.h"
+#include "hw/ppc/xics.h"
+
+#include <libfdt.h>
+
+/* #define DEBUG_SPAPR */
+
+#ifdef DEBUG_SPAPR
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+static Property spapr_vio_props[] = {
+ DEFINE_PROP_UINT32("irq", VIOsPAPRDevice, irq, 0), \
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static char *spapr_vio_get_dev_name(DeviceState *qdev)
+{
+ VIOsPAPRDevice *dev = VIO_SPAPR_DEVICE(qdev);
+ VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev);
+ char *name;
+
+ /* Device tree style name device@reg */
+ name = g_strdup_printf("%s@%x", pc->dt_name, dev->reg);
+
+ return name;
+}
+
+static void spapr_vio_bus_class_init(ObjectClass *klass, void *data)
+{
+ BusClass *k = BUS_CLASS(klass);
+
+ k->get_dev_path = spapr_vio_get_dev_name;
+ k->get_fw_dev_path = spapr_vio_get_dev_name;
+}
+
+static const TypeInfo spapr_vio_bus_info = {
+ .name = TYPE_SPAPR_VIO_BUS,
+ .parent = TYPE_BUS,
+ .class_init = spapr_vio_bus_class_init,
+ .instance_size = sizeof(VIOsPAPRBus),
+};
+
+VIOsPAPRDevice *spapr_vio_find_by_reg(VIOsPAPRBus *bus, uint32_t reg)
+{
+ BusChild *kid;
+ VIOsPAPRDevice *dev = NULL;
+
+ QTAILQ_FOREACH(kid, &bus->bus.children, sibling) {
+ dev = (VIOsPAPRDevice *)kid->child;
+ if (dev->reg == reg) {
+ return dev;
+ }
+ }
+
+ return NULL;
+}
+
+static int vio_make_devnode(VIOsPAPRDevice *dev,
+ void *fdt)
+{
+ VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev);
+ int vdevice_off, node_off, ret;
+ char *dt_name;
+
+ vdevice_off = fdt_path_offset(fdt, "/vdevice");
+ if (vdevice_off < 0) {
+ return vdevice_off;
+ }
+
+ dt_name = spapr_vio_get_dev_name(DEVICE(dev));
+ node_off = fdt_add_subnode(fdt, vdevice_off, dt_name);
+ g_free(dt_name);
+ if (node_off < 0) {
+ return node_off;
+ }
+
+ ret = fdt_setprop_cell(fdt, node_off, "reg", dev->reg);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (pc->dt_type) {
+ ret = fdt_setprop_string(fdt, node_off, "device_type",
+ pc->dt_type);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (pc->dt_compatible) {
+ ret = fdt_setprop_string(fdt, node_off, "compatible",
+ pc->dt_compatible);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (dev->irq) {
+ uint32_t ints_prop[] = {cpu_to_be32(dev->irq), 0};
+
+ ret = fdt_setprop(fdt, node_off, "interrupts", ints_prop,
+ sizeof(ints_prop));
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ ret = spapr_tcet_dma_dt(fdt, node_off, "ibm,my-dma-window", dev->tcet);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (pc->devnode) {
+ ret = (pc->devnode)(dev, fdt, node_off);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return node_off;
+}
+
+/*
+ * CRQ handling
+ */
+static target_ulong h_reg_crq(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong reg = args[0];
+ target_ulong queue_addr = args[1];
+ target_ulong queue_len = args[2];
+ VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+
+ if (!dev) {
+ hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg);
+ return H_PARAMETER;
+ }
+
+ /* We can't grok a queue size bigger than 256M for now */
+ if (queue_len < 0x1000 || queue_len > 0x10000000) {
+ hcall_dprintf("Queue size too small or too big (0x" TARGET_FMT_lx
+ ")\n", queue_len);
+ return H_PARAMETER;
+ }
+
+ /* Check queue alignment */
+ if (queue_addr & 0xfff) {
+ hcall_dprintf("Queue not aligned (0x" TARGET_FMT_lx ")\n", queue_addr);
+ return H_PARAMETER;
+ }
+
+ /* Check if device supports CRQs */
+ if (!dev->crq.SendFunc) {
+ hcall_dprintf("Device does not support CRQ\n");
+ return H_NOT_FOUND;
+ }
+
+ /* Already a queue ? */
+ if (dev->crq.qsize) {
+ hcall_dprintf("CRQ already registered\n");
+ return H_RESOURCE;
+ }
+ dev->crq.qladdr = queue_addr;
+ dev->crq.qsize = queue_len;
+ dev->crq.qnext = 0;
+
+ DPRINTF("CRQ for dev 0x" TARGET_FMT_lx " registered at 0x"
+ TARGET_FMT_lx "/0x" TARGET_FMT_lx "\n",
+ reg, queue_addr, queue_len);
+ return H_SUCCESS;
+}
+
+static target_ulong free_crq(VIOsPAPRDevice *dev)
+{
+ dev->crq.qladdr = 0;
+ dev->crq.qsize = 0;
+ dev->crq.qnext = 0;
+
+ DPRINTF("CRQ for dev 0x%" PRIx32 " freed\n", dev->reg);
+
+ return H_SUCCESS;
+}
+
+static target_ulong h_free_crq(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong reg = args[0];
+ VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+
+ if (!dev) {
+ hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg);
+ return H_PARAMETER;
+ }
+
+ return free_crq(dev);
+}
+
+static target_ulong h_send_crq(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong reg = args[0];
+ target_ulong msg_hi = args[1];
+ target_ulong msg_lo = args[2];
+ VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+ uint64_t crq_mangle[2];
+
+ if (!dev) {
+ hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg);
+ return H_PARAMETER;
+ }
+ crq_mangle[0] = cpu_to_be64(msg_hi);
+ crq_mangle[1] = cpu_to_be64(msg_lo);
+
+ if (dev->crq.SendFunc) {
+ return dev->crq.SendFunc(dev, (uint8_t *)crq_mangle);
+ }
+
+ return H_HARDWARE;
+}
+
+static target_ulong h_enable_crq(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong reg = args[0];
+ VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+
+ if (!dev) {
+ hcall_dprintf("Unit 0x" TARGET_FMT_lx " does not exist\n", reg);
+ return H_PARAMETER;
+ }
+
+ return 0;
+}
+
+/* Returns negative error, 0 success, or positive: queue full */
+int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq)
+{
+ int rc;
+ uint8_t byte;
+
+ if (!dev->crq.qsize) {
+ fprintf(stderr, "spapr_vio_send_creq on uninitialized queue\n");
+ return -1;
+ }
+
+ /* Maybe do a fast path for KVM just writing to the pages */
+ rc = spapr_vio_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1);
+ if (rc) {
+ return rc;
+ }
+ if (byte != 0) {
+ return 1;
+ }
+
+ rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8,
+ &crq[8], 8);
+ if (rc) {
+ return rc;
+ }
+
+ kvmppc_eieio();
+
+ rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8);
+ if (rc) {
+ return rc;
+ }
+
+ dev->crq.qnext = (dev->crq.qnext + 16) % dev->crq.qsize;
+
+ if (dev->signal_state & 1) {
+ qemu_irq_pulse(spapr_vio_qirq(dev));
+ }
+
+ return 0;
+}
+
+/* "quiesce" handling */
+
+static void spapr_vio_quiesce_one(VIOsPAPRDevice *dev)
+{
+ if (dev->tcet) {
+ device_reset(DEVICE(dev->tcet));
+ }
+ free_crq(dev);
+}
+
+void spapr_vio_set_bypass(VIOsPAPRDevice *dev, bool bypass)
+{
+ if (!dev->tcet) {
+ return;
+ }
+
+ memory_region_set_enabled(&dev->mrbypass, bypass);
+ memory_region_set_enabled(spapr_tce_get_iommu(dev->tcet), !bypass);
+
+ dev->tcet->bypass = bypass;
+}
+
+static void rtas_set_tce_bypass(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ VIOsPAPRBus *bus = spapr->vio_bus;
+ VIOsPAPRDevice *dev;
+ uint32_t unit, enable;
+
+ if (nargs != 2) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ unit = rtas_ld(args, 0);
+ enable = rtas_ld(args, 1);
+ dev = spapr_vio_find_by_reg(bus, unit);
+ if (!dev) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ if (!dev->tcet) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ spapr_vio_set_bypass(dev, !!enable);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static void rtas_quiesce(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ uint32_t token,
+ uint32_t nargs, target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ VIOsPAPRBus *bus = spapr->vio_bus;
+ BusChild *kid;
+ VIOsPAPRDevice *dev = NULL;
+
+ if (nargs != 0) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ QTAILQ_FOREACH(kid, &bus->bus.children, sibling) {
+ dev = (VIOsPAPRDevice *)kid->child;
+ spapr_vio_quiesce_one(dev);
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+}
+
+static VIOsPAPRDevice *reg_conflict(VIOsPAPRDevice *dev)
+{
+ VIOsPAPRBus *bus = DO_UPCAST(VIOsPAPRBus, bus, dev->qdev.parent_bus);
+ BusChild *kid;
+ VIOsPAPRDevice *other;
+
+ /*
+ * Check for a device other than the given one which is already
+ * using the requested address. We have to open code this because
+ * the given dev might already be in the list.
+ */
+ QTAILQ_FOREACH(kid, &bus->bus.children, sibling) {
+ other = VIO_SPAPR_DEVICE(kid->child);
+
+ if (other != dev && other->reg == dev->reg) {
+ return other;
+ }
+ }
+
+ return 0;
+}
+
+static void spapr_vio_busdev_reset(DeviceState *qdev)
+{
+ VIOsPAPRDevice *dev = VIO_SPAPR_DEVICE(qdev);
+ VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev);
+
+ /* Shut down the request queue and TCEs if necessary */
+ spapr_vio_quiesce_one(dev);
+
+ dev->signal_state = 0;
+
+ spapr_vio_set_bypass(dev, false);
+ if (pc->reset) {
+ pc->reset(dev);
+ }
+}
+
+static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
+ VIOsPAPRDevice *dev = (VIOsPAPRDevice *)qdev;
+ VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev);
+ char *id;
+
+ if (dev->reg != -1) {
+ /*
+ * Explicitly assigned address, just verify that no-one else
+ * is using it. other mechanism). We have to open code this
+ * rather than using spapr_vio_find_by_reg() because sdev
+ * itself is already in the list.
+ */
+ VIOsPAPRDevice *other = reg_conflict(dev);
+
+ if (other) {
+ error_setg(errp, "%s and %s devices conflict at address %#x",
+ object_get_typename(OBJECT(qdev)),
+ object_get_typename(OBJECT(&other->qdev)),
+ dev->reg);
+ return;
+ }
+ } else {
+ /* Need to assign an address */
+ VIOsPAPRBus *bus = DO_UPCAST(VIOsPAPRBus, bus, dev->qdev.parent_bus);
+
+ do {
+ dev->reg = bus->next_reg++;
+ } while (reg_conflict(dev));
+ }
+
+ /* Don't overwrite ids assigned on the command line */
+ if (!dev->qdev.id) {
+ id = spapr_vio_get_dev_name(DEVICE(dev));
+ dev->qdev.id = id;
+ }
+
+ dev->irq = xics_alloc(spapr->icp, 0, dev->irq, false);
+ if (!dev->irq) {
+ error_setg(errp, "can't allocate IRQ");
+ return;
+ }
+
+ if (pc->rtce_window_size) {
+ uint32_t liobn = SPAPR_VIO_LIOBN(dev->reg);
+
+ memory_region_init(&dev->mrroot, OBJECT(dev), "iommu-spapr-root",
+ ram_size);
+ memory_region_init_alias(&dev->mrbypass, OBJECT(dev),
+ "iommu-spapr-bypass", get_system_memory(),
+ 0, ram_size);
+ memory_region_add_subregion_overlap(&dev->mrroot, 0, &dev->mrbypass, 1);
+ address_space_init(&dev->as, &dev->mrroot, qdev->id);
+
+ dev->tcet = spapr_tce_new_table(qdev, liobn,
+ 0,
+ SPAPR_TCE_PAGE_SHIFT,
+ pc->rtce_window_size >>
+ SPAPR_TCE_PAGE_SHIFT, false);
+ dev->tcet->vdev = dev;
+ memory_region_add_subregion_overlap(&dev->mrroot, 0,
+ spapr_tce_get_iommu(dev->tcet), 2);
+ }
+
+ pc->realize(dev, errp);
+}
+
+static target_ulong h_vio_signal(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+ target_ulong opcode,
+ target_ulong *args)
+{
+ target_ulong reg = args[0];
+ target_ulong mode = args[1];
+ VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
+ VIOsPAPRDeviceClass *pc;
+
+ if (!dev) {
+ return H_PARAMETER;
+ }
+
+ pc = VIO_SPAPR_DEVICE_GET_CLASS(dev);
+
+ if (mode & ~pc->signal_mask) {
+ return H_PARAMETER;
+ }
+
+ dev->signal_state = mode;
+
+ return H_SUCCESS;
+}
+
+VIOsPAPRBus *spapr_vio_bus_init(void)
+{
+ VIOsPAPRBus *bus;
+ BusState *qbus;
+ DeviceState *dev;
+
+ /* Create bridge device */
+ dev = qdev_create(NULL, "spapr-vio-bridge");
+ qdev_init_nofail(dev);
+
+ /* Create bus on bridge device */
+
+ qbus = qbus_create(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio");
+ bus = DO_UPCAST(VIOsPAPRBus, bus, qbus);
+ bus->next_reg = 0x71000000;
+
+ /* hcall-vio */
+ spapr_register_hypercall(H_VIO_SIGNAL, h_vio_signal);
+
+ /* hcall-crq */
+ spapr_register_hypercall(H_REG_CRQ, h_reg_crq);
+ spapr_register_hypercall(H_FREE_CRQ, h_free_crq);
+ spapr_register_hypercall(H_SEND_CRQ, h_send_crq);
+ spapr_register_hypercall(H_ENABLE_CRQ, h_enable_crq);
+
+ /* RTAS calls */
+ spapr_rtas_register(RTAS_IBM_SET_TCE_BYPASS, "ibm,set-tce-bypass",
+ rtas_set_tce_bypass);
+ spapr_rtas_register(RTAS_QUIESCE, "quiesce", rtas_quiesce);
+
+ return bus;
+}
+
+/* Represents sPAPR hcall VIO devices */
+
+static int spapr_vio_bridge_init(SysBusDevice *dev)
+{
+ /* nothing */
+ return 0;
+}
+
+static void spapr_vio_bridge_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->fw_name = "vdevice";
+ k->init = spapr_vio_bridge_init;
+}
+
+static const TypeInfo spapr_vio_bridge_info = {
+ .name = "spapr-vio-bridge",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SysBusDevice),
+ .class_init = spapr_vio_bridge_class_init,
+};
+
+const VMStateDescription vmstate_spapr_vio = {
+ .name = "spapr_vio",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ /* Sanity check */
+ VMSTATE_UINT32_EQUAL(reg, VIOsPAPRDevice),
+ VMSTATE_UINT32_EQUAL(irq, VIOsPAPRDevice),
+
+ /* General VIO device state */
+ VMSTATE_UINTTL(signal_state, VIOsPAPRDevice),
+ VMSTATE_UINT64(crq.qladdr, VIOsPAPRDevice),
+ VMSTATE_UINT32(crq.qsize, VIOsPAPRDevice),
+ VMSTATE_UINT32(crq.qnext, VIOsPAPRDevice),
+
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void vio_spapr_device_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *k = DEVICE_CLASS(klass);
+ k->realize = spapr_vio_busdev_realize;
+ k->reset = spapr_vio_busdev_reset;
+ k->bus_type = TYPE_SPAPR_VIO_BUS;
+ k->props = spapr_vio_props;
+}
+
+static const TypeInfo spapr_vio_type_info = {
+ .name = TYPE_VIO_SPAPR_DEVICE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(VIOsPAPRDevice),
+ .abstract = true,
+ .class_size = sizeof(VIOsPAPRDeviceClass),
+ .class_init = vio_spapr_device_class_init,
+};
+
+static void spapr_vio_register_types(void)
+{
+ type_register_static(&spapr_vio_bus_info);
+ type_register_static(&spapr_vio_bridge_info);
+ type_register_static(&spapr_vio_type_info);
+}
+
+type_init(spapr_vio_register_types)
+
+static int compare_reg(const void *p1, const void *p2)
+{
+ VIOsPAPRDevice const *dev1, *dev2;
+
+ dev1 = (VIOsPAPRDevice *)*(DeviceState **)p1;
+ dev2 = (VIOsPAPRDevice *)*(DeviceState **)p2;
+
+ if (dev1->reg < dev2->reg) {
+ return -1;
+ }
+ if (dev1->reg == dev2->reg) {
+ return 0;
+ }
+
+ /* dev1->reg > dev2->reg */
+ return 1;
+}
+
+int spapr_populate_vdevice(VIOsPAPRBus *bus, void *fdt)
+{
+ DeviceState *qdev, **qdevs;
+ BusChild *kid;
+ int i, num, ret = 0;
+
+ /* Count qdevs on the bus list */
+ num = 0;
+ QTAILQ_FOREACH(kid, &bus->bus.children, sibling) {
+ num++;
+ }
+
+ /* Copy out into an array of pointers */
+ qdevs = g_malloc(sizeof(qdev) * num);
+ num = 0;
+ QTAILQ_FOREACH(kid, &bus->bus.children, sibling) {
+ qdevs[num++] = kid->child;
+ }
+
+ /* Sort the array */
+ qsort(qdevs, num, sizeof(qdev), compare_reg);
+
+ /* Hack alert. Give the devices to libfdt in reverse order, we happen
+ * to know that will mean they are in forward order in the tree. */
+ for (i = num - 1; i >= 0; i--) {
+ VIOsPAPRDevice *dev = (VIOsPAPRDevice *)(qdevs[i]);
+
+ ret = vio_make_devnode(dev, fdt);
+
+ if (ret < 0) {
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ g_free(qdevs);
+
+ return ret;
+}
+
+int spapr_populate_chosen_stdout(void *fdt, VIOsPAPRBus *bus)
+{
+ VIOsPAPRDevice *dev;
+ char *name, *path;
+ int ret, offset;
+
+ dev = spapr_vty_get_default(bus);
+ if (!dev)
+ return 0;
+
+ offset = fdt_path_offset(fdt, "/chosen");
+ if (offset < 0) {
+ return offset;
+ }
+
+ name = spapr_vio_get_dev_name(DEVICE(dev));
+ path = g_strdup_printf("/vdevice/%s", name);
+
+ ret = fdt_setprop_string(fdt, offset, "linux,stdout-path", path);
+
+ g_free(name);
+ g_free(path);
+
+ return ret;
+}
diff --git a/src/hw/ppc/virtex_ml507.c b/src/hw/ppc/virtex_ml507.c
new file mode 100644
index 0000000..c2b5e44
--- /dev/null
+++ b/src/hw/ppc/virtex_ml507.c
@@ -0,0 +1,306 @@
+/*
+ * Model of Xilinx Virtex5 ML507 PPC-440 refdesign.
+ *
+ * Copyright (c) 2010 Edgar E. Iglesias.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "hw/sysbus.h"
+#include "hw/hw.h"
+#include "hw/char/serial.h"
+#include "hw/block/flash.h"
+#include "sysemu/sysemu.h"
+#include "hw/devices.h"
+#include "hw/boards.h"
+#include "sysemu/device_tree.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "qemu/error-report.h"
+#include "qemu/log.h"
+#include "exec/address-spaces.h"
+
+#include "hw/ppc/ppc.h"
+#include "hw/ppc/ppc4xx.h"
+#include "ppc405.h"
+
+#include "sysemu/block-backend.h"
+
+#define EPAPR_MAGIC (0x45504150)
+#define FLASH_SIZE (16 * 1024 * 1024)
+
+#define INTC_BASEADDR 0x81800000
+#define UART16550_BASEADDR 0x83e01003
+#define TIMER_BASEADDR 0x83c00000
+#define PFLASH_BASEADDR 0xfc000000
+
+#define TIMER_IRQ 3
+#define UART16550_IRQ 9
+
+static struct boot_info
+{
+ uint32_t bootstrap_pc;
+ uint32_t cmdline;
+ uint32_t fdt;
+ uint32_t ima_size;
+ void *vfdt;
+} boot_info;
+
+/* Create reset TLB entries for BookE, spanning the 32bit addr space. */
+static void mmubooke_create_initial_mapping(CPUPPCState *env,
+ target_ulong va,
+ hwaddr pa)
+{
+ ppcemb_tlb_t *tlb = &env->tlb.tlbe[0];
+
+ tlb->attr = 0;
+ tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
+ tlb->size = 1U << 31; /* up to 0x80000000 */
+ tlb->EPN = va & TARGET_PAGE_MASK;
+ tlb->RPN = pa & TARGET_PAGE_MASK;
+ tlb->PID = 0;
+
+ tlb = &env->tlb.tlbe[1];
+ tlb->attr = 0;
+ tlb->prot = PAGE_VALID | ((PAGE_READ | PAGE_WRITE | PAGE_EXEC) << 4);
+ tlb->size = 1U << 31; /* up to 0xffffffff */
+ tlb->EPN = 0x80000000 & TARGET_PAGE_MASK;
+ tlb->RPN = 0x80000000 & TARGET_PAGE_MASK;
+ tlb->PID = 0;
+}
+
+static PowerPCCPU *ppc440_init_xilinx(ram_addr_t *ram_size,
+ int do_init,
+ const char *cpu_model,
+ uint32_t sysclk)
+{
+ PowerPCCPU *cpu;
+ CPUPPCState *env;
+ qemu_irq *irqs;
+
+ cpu = cpu_ppc_init(cpu_model);
+ if (cpu == NULL) {
+ fprintf(stderr, "Unable to initialize CPU!\n");
+ exit(1);
+ }
+ env = &cpu->env;
+
+ ppc_booke_timers_init(cpu, sysclk, 0/* no flags */);
+
+ ppc_dcr_init(env, NULL, NULL);
+
+ /* interrupt controller */
+ irqs = g_malloc0(sizeof(qemu_irq) * PPCUIC_OUTPUT_NB);
+ irqs[PPCUIC_OUTPUT_INT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_INT];
+ irqs[PPCUIC_OUTPUT_CINT] = ((qemu_irq *)env->irq_inputs)[PPC40x_INPUT_CINT];
+ ppcuic_init(env, irqs, 0x0C0, 0, 1);
+ return cpu;
+}
+
+static void main_cpu_reset(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ struct boot_info *bi = env->load_info;
+
+ cpu_reset(CPU(cpu));
+ /* Linux Kernel Parameters (passing device tree):
+ * r3: pointer to the fdt
+ * r4: 0
+ * r5: 0
+ * r6: epapr magic
+ * r7: size of IMA in bytes
+ * r8: 0
+ * r9: 0
+ */
+ env->gpr[1] = (16<<20) - 8;
+ /* Provide a device-tree. */
+ env->gpr[3] = bi->fdt;
+ env->nip = bi->bootstrap_pc;
+
+ /* Create a mapping for the kernel. */
+ mmubooke_create_initial_mapping(env, 0, 0);
+ env->gpr[6] = tswap32(EPAPR_MAGIC);
+ env->gpr[7] = bi->ima_size;
+}
+
+#define BINARY_DEVICE_TREE_FILE "virtex-ml507.dtb"
+static int xilinx_load_device_tree(hwaddr addr,
+ uint32_t ramsize,
+ hwaddr initrd_base,
+ hwaddr initrd_size,
+ const char *kernel_cmdline)
+{
+ char *path;
+ int fdt_size;
+ void *fdt = NULL;
+ int r;
+ const char *dtb_filename;
+
+ dtb_filename = qemu_opt_get(qemu_get_machine_opts(), "dtb");
+ if (dtb_filename) {
+ fdt = load_device_tree(dtb_filename, &fdt_size);
+ if (!fdt) {
+ error_report("Error while loading device tree file '%s'",
+ dtb_filename);
+ }
+ } else {
+ /* Try the local "ppc.dtb" override. */
+ fdt = load_device_tree("ppc.dtb", &fdt_size);
+ if (!fdt) {
+ path = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE);
+ if (path) {
+ fdt = load_device_tree(path, &fdt_size);
+ g_free(path);
+ }
+ }
+ }
+ if (!fdt) {
+ return 0;
+ }
+
+ r = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-start",
+ initrd_base);
+ if (r < 0) {
+ error_report("couldn't set /chosen/linux,initrd-start");
+ }
+
+ r = qemu_fdt_setprop_cell(fdt, "/chosen", "linux,initrd-end",
+ (initrd_base + initrd_size));
+ if (r < 0) {
+ error_report("couldn't set /chosen/linux,initrd-end");
+ }
+
+ r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline);
+ if (r < 0)
+ fprintf(stderr, "couldn't set /chosen/bootargs\n");
+ cpu_physical_memory_write(addr, fdt, fdt_size);
+ return fdt_size;
+}
+
+static void virtex_init(MachineState *machine)
+{
+ ram_addr_t ram_size = machine->ram_size;
+ const char *kernel_filename = machine->kernel_filename;
+ const char *kernel_cmdline = machine->kernel_cmdline;
+ hwaddr initrd_base = 0;
+ int initrd_size = 0;
+ MemoryRegion *address_space_mem = get_system_memory();
+ DeviceState *dev;
+ PowerPCCPU *cpu;
+ CPUPPCState *env;
+ hwaddr ram_base = 0;
+ DriveInfo *dinfo;
+ MemoryRegion *phys_ram = g_new(MemoryRegion, 1);
+ qemu_irq irq[32], *cpu_irq;
+ int kernel_size;
+ int i;
+
+ /* init CPUs */
+ if (machine->cpu_model == NULL) {
+ machine->cpu_model = "440-Xilinx";
+ }
+
+ cpu = ppc440_init_xilinx(&ram_size, 1, machine->cpu_model, 400000000);
+ env = &cpu->env;
+ qemu_register_reset(main_cpu_reset, cpu);
+
+ memory_region_allocate_system_memory(phys_ram, NULL, "ram", ram_size);
+ memory_region_add_subregion(address_space_mem, ram_base, phys_ram);
+
+ dinfo = drive_get(IF_PFLASH, 0, 0);
+ pflash_cfi01_register(PFLASH_BASEADDR, NULL, "virtex.flash", FLASH_SIZE,
+ dinfo ? blk_by_legacy_dinfo(dinfo) : NULL,
+ (64 * 1024), FLASH_SIZE >> 16,
+ 1, 0x89, 0x18, 0x0000, 0x0, 1);
+
+ cpu_irq = (qemu_irq *) &env->irq_inputs[PPC40x_INPUT_INT];
+ dev = qdev_create(NULL, "xlnx.xps-intc");
+ qdev_prop_set_uint32(dev, "kind-of-intr", 0);
+ qdev_init_nofail(dev);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq[0]);
+ for (i = 0; i < 32; i++) {
+ irq[i] = qdev_get_gpio_in(dev, i);
+ }
+
+ serial_mm_init(address_space_mem, UART16550_BASEADDR, 2, irq[UART16550_IRQ],
+ 115200, serial_hds[0], DEVICE_LITTLE_ENDIAN);
+
+ /* 2 timers at irq 2 @ 62 Mhz. */
+ dev = qdev_create(NULL, "xlnx.xps-timer");
+ qdev_prop_set_uint32(dev, "one-timer-only", 0);
+ qdev_prop_set_uint32(dev, "clock-frequency", 62 * 1000000);
+ qdev_init_nofail(dev);
+ sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR);
+ sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]);
+
+ if (kernel_filename) {
+ uint64_t entry, low, high;
+ hwaddr boot_offset;
+
+ /* Boots a kernel elf binary. */
+ kernel_size = load_elf(kernel_filename, NULL, NULL,
+ &entry, &low, &high, 1, PPC_ELF_MACHINE, 0);
+ boot_info.bootstrap_pc = entry & 0x00ffffff;
+
+ if (kernel_size < 0) {
+ boot_offset = 0x1200000;
+ /* If we failed loading ELF's try a raw image. */
+ kernel_size = load_image_targphys(kernel_filename,
+ boot_offset,
+ ram_size);
+ boot_info.bootstrap_pc = boot_offset;
+ high = boot_info.bootstrap_pc + kernel_size + 8192;
+ }
+
+ boot_info.ima_size = kernel_size;
+
+ /* Load initrd. */
+ if (machine->initrd_filename) {
+ initrd_base = high = ROUND_UP(high, 4);
+ initrd_size = load_image_targphys(machine->initrd_filename,
+ high, ram_size - high);
+
+ if (initrd_size < 0) {
+ error_report("couldn't load ram disk '%s'",
+ machine->initrd_filename);
+ exit(1);
+ }
+ high = ROUND_UP(high + initrd_size, 4);
+ }
+
+ /* Provide a device-tree. */
+ boot_info.fdt = high + (8192 * 2);
+ boot_info.fdt &= ~8191;
+
+ xilinx_load_device_tree(boot_info.fdt, ram_size,
+ initrd_base, initrd_size,
+ kernel_cmdline);
+ }
+ env->load_info = &boot_info;
+}
+
+static void virtex_machine_init(MachineClass *mc)
+{
+ mc->desc = "Xilinx Virtex ML507 reference design";
+ mc->init = virtex_init;
+}
+
+DEFINE_MACHINE("virtex-ml507", virtex_machine_init)
OpenPOWER on IntegriCloud