summaryrefslogtreecommitdiffstats
path: root/src/hw/s390x
diff options
context:
space:
mode:
Diffstat (limited to 'src/hw/s390x')
-rw-r--r--src/hw/s390x/Makefile.objs13
-rw-r--r--src/hw/s390x/css.c1571
-rw-r--r--src/hw/s390x/css.h111
-rw-r--r--src/hw/s390x/event-facility.c447
-rw-r--r--src/hw/s390x/ipl.c296
-rw-r--r--src/hw/s390x/ipl.h53
-rw-r--r--src/hw/s390x/s390-pci-bus.c618
-rw-r--r--src/hw/s390x/s390-pci-bus.h253
-rw-r--r--src/hw/s390x/s390-pci-inst.c844
-rw-r--r--src/hw/s390x/s390-pci-inst.h289
-rw-r--r--src/hw/s390x/s390-skeys-kvm.c75
-rw-r--r--src/hw/s390x/s390-skeys.c415
-rw-r--r--src/hw/s390x/s390-virtio-bus.c758
-rw-r--r--src/hw/s390x/s390-virtio-bus.h186
-rw-r--r--src/hw/s390x/s390-virtio-ccw.c318
-rw-r--r--src/hw/s390x/s390-virtio-hcall.c40
-rw-r--r--src/hw/s390x/s390-virtio.c373
-rw-r--r--src/hw/s390x/s390-virtio.h32
-rw-r--r--src/hw/s390x/sclp.c615
-rw-r--r--src/hw/s390x/sclpcpu.c100
-rw-r--r--src/hw/s390x/sclpquiesce.c142
-rw-r--r--src/hw/s390x/virtio-ccw.c1988
-rw-r--r--src/hw/s390x/virtio-ccw.h218
23 files changed, 9755 insertions, 0 deletions
diff --git a/src/hw/s390x/Makefile.objs b/src/hw/s390x/Makefile.objs
new file mode 100644
index 0000000..527d754
--- /dev/null
+++ b/src/hw/s390x/Makefile.objs
@@ -0,0 +1,13 @@
+obj-y = s390-virtio-bus.o s390-virtio.o
+obj-y += s390-virtio-hcall.o
+obj-y += sclp.o
+obj-y += event-facility.o
+obj-y += sclpquiesce.o
+obj-y += sclpcpu.o
+obj-y += ipl.o
+obj-y += css.o
+obj-y += s390-virtio-ccw.o
+obj-y += virtio-ccw.o
+obj-y += s390-pci-bus.o s390-pci-inst.o
+obj-y += s390-skeys.o
+obj-$(CONFIG_KVM) += s390-skeys-kvm.o
diff --git a/src/hw/s390x/css.c b/src/hw/s390x/css.c
new file mode 100644
index 0000000..c6ca8be
--- /dev/null
+++ b/src/hw/s390x/css.c
@@ -0,0 +1,1571 @@
+/*
+ * Channel subsystem base support.
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include <hw/qdev.h>
+#include "qemu/bitops.h"
+#include "exec/address-spaces.h"
+#include "cpu.h"
+#include "ioinst.h"
+#include "css.h"
+#include "trace.h"
+#include "hw/s390x/s390_flic.h"
+
+typedef struct CrwContainer {
+ CRW crw;
+ QTAILQ_ENTRY(CrwContainer) sibling;
+} CrwContainer;
+
+typedef struct ChpInfo {
+ uint8_t in_use;
+ uint8_t type;
+ uint8_t is_virtual;
+} ChpInfo;
+
+typedef struct SubchSet {
+ SubchDev *sch[MAX_SCHID + 1];
+ unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)];
+ unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)];
+} SubchSet;
+
+typedef struct CssImage {
+ SubchSet *sch_set[MAX_SSID + 1];
+ ChpInfo chpids[MAX_CHPID + 1];
+} CssImage;
+
+typedef struct IoAdapter {
+ uint32_t id;
+ uint8_t type;
+ uint8_t isc;
+ QTAILQ_ENTRY(IoAdapter) sibling;
+} IoAdapter;
+
+typedef struct ChannelSubSys {
+ QTAILQ_HEAD(, CrwContainer) pending_crws;
+ bool do_crw_mchk;
+ bool crws_lost;
+ uint8_t max_cssid;
+ uint8_t max_ssid;
+ bool chnmon_active;
+ uint64_t chnmon_area;
+ CssImage *css[MAX_CSSID + 1];
+ uint8_t default_cssid;
+ QTAILQ_HEAD(, IoAdapter) io_adapters;
+} ChannelSubSys;
+
+static ChannelSubSys *channel_subsys;
+
+int css_create_css_image(uint8_t cssid, bool default_image)
+{
+ trace_css_new_image(cssid, default_image ? "(default)" : "");
+ if (cssid > MAX_CSSID) {
+ return -EINVAL;
+ }
+ if (channel_subsys->css[cssid]) {
+ return -EBUSY;
+ }
+ channel_subsys->css[cssid] = g_malloc0(sizeof(CssImage));
+ if (default_image) {
+ channel_subsys->default_cssid = cssid;
+ }
+ return 0;
+}
+
+int css_register_io_adapter(uint8_t type, uint8_t isc, bool swap,
+ bool maskable, uint32_t *id)
+{
+ IoAdapter *adapter;
+ bool found = false;
+ int ret;
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
+
+ *id = 0;
+ QTAILQ_FOREACH(adapter, &channel_subsys->io_adapters, sibling) {
+ if ((adapter->type == type) && (adapter->isc == isc)) {
+ *id = adapter->id;
+ found = true;
+ ret = 0;
+ break;
+ }
+ if (adapter->id >= *id) {
+ *id = adapter->id + 1;
+ }
+ }
+ if (found) {
+ goto out;
+ }
+ adapter = g_new0(IoAdapter, 1);
+ ret = fsc->register_io_adapter(fs, *id, isc, swap, maskable);
+ if (ret == 0) {
+ adapter->id = *id;
+ adapter->isc = isc;
+ adapter->type = type;
+ QTAILQ_INSERT_TAIL(&channel_subsys->io_adapters, adapter, sibling);
+ } else {
+ g_free(adapter);
+ fprintf(stderr, "Unexpected error %d when registering adapter %d\n",
+ ret, *id);
+ }
+out:
+ return ret;
+}
+
+uint16_t css_build_subchannel_id(SubchDev *sch)
+{
+ if (channel_subsys->max_cssid > 0) {
+ return (sch->cssid << 8) | (1 << 3) | (sch->ssid << 1) | 1;
+ }
+ return (sch->ssid << 1) | 1;
+}
+
+static void css_inject_io_interrupt(SubchDev *sch)
+{
+ uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
+
+ trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
+ sch->curr_status.pmcw.intparm, isc, "");
+ s390_io_interrupt(css_build_subchannel_id(sch),
+ sch->schid,
+ sch->curr_status.pmcw.intparm,
+ isc << 27);
+}
+
+void css_conditional_io_interrupt(SubchDev *sch)
+{
+ /*
+ * If the subchannel is not currently status pending, make it pending
+ * with alert status.
+ */
+ if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) {
+ uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
+
+ trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
+ sch->curr_status.pmcw.intparm, isc,
+ "(unsolicited)");
+ sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ sch->curr_status.scsw.ctrl |=
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ /* Inject an I/O interrupt. */
+ s390_io_interrupt(css_build_subchannel_id(sch),
+ sch->schid,
+ sch->curr_status.pmcw.intparm,
+ isc << 27);
+ }
+}
+
+void css_adapter_interrupt(uint8_t isc)
+{
+ uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
+
+ trace_css_adapter_interrupt(isc);
+ s390_io_interrupt(0, 0, 0, io_int_word);
+}
+
+static void sch_handle_clear_func(SubchDev *sch)
+{
+ PMCW *p = &sch->curr_status.pmcw;
+ SCSW *s = &sch->curr_status.scsw;
+ int path;
+
+ /* Path management: In our simple css, we always choose the only path. */
+ path = 0x80;
+
+ /* Reset values prior to 'issuing the clear signal'. */
+ p->lpum = 0;
+ p->pom = 0xff;
+ s->flags &= ~SCSW_FLAGS_MASK_PNO;
+
+ /* We always 'attempt to issue the clear signal', and we always succeed. */
+ sch->channel_prog = 0x0;
+ sch->last_cmd_valid = false;
+ s->ctrl &= ~SCSW_ACTL_CLEAR_PEND;
+ s->ctrl |= SCSW_STCTL_STATUS_PEND;
+
+ s->dstat = 0;
+ s->cstat = 0;
+ p->lpum = path;
+
+}
+
+static void sch_handle_halt_func(SubchDev *sch)
+{
+
+ PMCW *p = &sch->curr_status.pmcw;
+ SCSW *s = &sch->curr_status.scsw;
+ hwaddr curr_ccw = sch->channel_prog;
+ int path;
+
+ /* Path management: In our simple css, we always choose the only path. */
+ path = 0x80;
+
+ /* We always 'attempt to issue the halt signal', and we always succeed. */
+ sch->channel_prog = 0x0;
+ sch->last_cmd_valid = false;
+ s->ctrl &= ~SCSW_ACTL_HALT_PEND;
+ s->ctrl |= SCSW_STCTL_STATUS_PEND;
+
+ if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
+ !((s->ctrl & SCSW_ACTL_START_PEND) ||
+ (s->ctrl & SCSW_ACTL_SUSP))) {
+ s->dstat = SCSW_DSTAT_DEVICE_END;
+ }
+ if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) ||
+ (s->ctrl & SCSW_ACTL_SUSP)) {
+ s->cpa = curr_ccw + 8;
+ }
+ s->cstat = 0;
+ p->lpum = path;
+
+}
+
+static void copy_sense_id_to_guest(SenseId *dest, SenseId *src)
+{
+ int i;
+
+ dest->reserved = src->reserved;
+ dest->cu_type = cpu_to_be16(src->cu_type);
+ dest->cu_model = src->cu_model;
+ dest->dev_type = cpu_to_be16(src->dev_type);
+ dest->dev_model = src->dev_model;
+ dest->unused = src->unused;
+ for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) {
+ dest->ciw[i].type = src->ciw[i].type;
+ dest->ciw[i].command = src->ciw[i].command;
+ dest->ciw[i].count = cpu_to_be16(src->ciw[i].count);
+ }
+}
+
+static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1)
+{
+ CCW0 tmp0;
+ CCW1 tmp1;
+ CCW1 ret;
+
+ if (fmt1) {
+ cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1));
+ ret.cmd_code = tmp1.cmd_code;
+ ret.flags = tmp1.flags;
+ ret.count = be16_to_cpu(tmp1.count);
+ ret.cda = be32_to_cpu(tmp1.cda);
+ } else {
+ cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0));
+ ret.cmd_code = tmp0.cmd_code;
+ ret.flags = tmp0.flags;
+ ret.count = be16_to_cpu(tmp0.count);
+ ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
+ if ((ret.cmd_code & 0x0f) == CCW_CMD_TIC) {
+ ret.cmd_code &= 0x0f;
+ }
+ }
+ return ret;
+}
+
+static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr)
+{
+ int ret;
+ bool check_len;
+ int len;
+ CCW1 ccw;
+
+ if (!ccw_addr) {
+ return -EIO;
+ }
+
+ /* Translate everything to format-1 ccws - the information is the same. */
+ ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1);
+
+ /* Check for invalid command codes. */
+ if ((ccw.cmd_code & 0x0f) == 0) {
+ return -EINVAL;
+ }
+ if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) &&
+ ((ccw.cmd_code & 0xf0) != 0)) {
+ return -EINVAL;
+ }
+ if (!sch->ccw_fmt_1 && (ccw.count == 0) &&
+ (ccw.cmd_code != CCW_CMD_TIC)) {
+ return -EINVAL;
+ }
+
+ if (ccw.flags & CCW_FLAG_SUSPEND) {
+ return -EINPROGRESS;
+ }
+
+ check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
+
+ if (!ccw.cda) {
+ if (sch->ccw_no_data_cnt == 255) {
+ return -EINVAL;
+ }
+ sch->ccw_no_data_cnt++;
+ }
+
+ /* Look at the command. */
+ switch (ccw.cmd_code) {
+ case CCW_CMD_NOOP:
+ /* Nothing to do. */
+ ret = 0;
+ break;
+ case CCW_CMD_BASIC_SENSE:
+ if (check_len) {
+ if (ccw.count != sizeof(sch->sense_data)) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ len = MIN(ccw.count, sizeof(sch->sense_data));
+ cpu_physical_memory_write(ccw.cda, sch->sense_data, len);
+ sch->curr_status.scsw.count = ccw.count - len;
+ memset(sch->sense_data, 0, sizeof(sch->sense_data));
+ ret = 0;
+ break;
+ case CCW_CMD_SENSE_ID:
+ {
+ SenseId sense_id;
+
+ copy_sense_id_to_guest(&sense_id, &sch->id);
+ /* Sense ID information is device specific. */
+ if (check_len) {
+ if (ccw.count != sizeof(sense_id)) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ len = MIN(ccw.count, sizeof(sense_id));
+ /*
+ * Only indicate 0xff in the first sense byte if we actually
+ * have enough place to store at least bytes 0-3.
+ */
+ if (len >= 4) {
+ sense_id.reserved = 0xff;
+ } else {
+ sense_id.reserved = 0;
+ }
+ cpu_physical_memory_write(ccw.cda, &sense_id, len);
+ sch->curr_status.scsw.count = ccw.count - len;
+ ret = 0;
+ break;
+ }
+ case CCW_CMD_TIC:
+ if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) {
+ ret = -EINVAL;
+ break;
+ }
+ if (ccw.flags & (CCW_FLAG_CC | CCW_FLAG_DC)) {
+ ret = -EINVAL;
+ break;
+ }
+ sch->channel_prog = ccw.cda;
+ ret = -EAGAIN;
+ break;
+ default:
+ if (sch->ccw_cb) {
+ /* Handle device specific commands. */
+ ret = sch->ccw_cb(sch, ccw);
+ } else {
+ ret = -ENOSYS;
+ }
+ break;
+ }
+ sch->last_cmd = ccw;
+ sch->last_cmd_valid = true;
+ if (ret == 0) {
+ if (ccw.flags & CCW_FLAG_CC) {
+ sch->channel_prog += 8;
+ ret = -EAGAIN;
+ }
+ }
+
+ return ret;
+}
+
+static void sch_handle_start_func(SubchDev *sch, ORB *orb)
+{
+
+ PMCW *p = &sch->curr_status.pmcw;
+ SCSW *s = &sch->curr_status.scsw;
+ int path;
+ int ret;
+
+ /* Path management: In our simple css, we always choose the only path. */
+ path = 0x80;
+
+ if (!(s->ctrl & SCSW_ACTL_SUSP)) {
+ s->cstat = 0;
+ s->dstat = 0;
+ /* Look at the orb and try to execute the channel program. */
+ assert(orb != NULL); /* resume does not pass an orb */
+ p->intparm = orb->intparm;
+ if (!(orb->lpm & path)) {
+ /* Generate a deferred cc 3 condition. */
+ s->flags |= SCSW_FLAGS_MASK_CC;
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
+ return;
+ }
+ sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT);
+ sch->ccw_no_data_cnt = 0;
+ } else {
+ s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
+ }
+ sch->last_cmd_valid = false;
+ do {
+ ret = css_interpret_ccw(sch, sch->channel_prog);
+ switch (ret) {
+ case -EAGAIN:
+ /* ccw chain, continue processing */
+ break;
+ case 0:
+ /* success */
+ s->ctrl &= ~SCSW_ACTL_START_PEND;
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_STATUS_PEND;
+ s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
+ s->cpa = sch->channel_prog + 8;
+ break;
+ case -ENOSYS:
+ /* unsupported command, generate unit check (command reject) */
+ s->ctrl &= ~SCSW_ACTL_START_PEND;
+ s->dstat = SCSW_DSTAT_UNIT_CHECK;
+ /* Set sense bit 0 in ecw0. */
+ sch->sense_data[0] = 0x80;
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ s->cpa = sch->channel_prog + 8;
+ break;
+ case -EFAULT:
+ /* memory problem, generate channel data check */
+ s->ctrl &= ~SCSW_ACTL_START_PEND;
+ s->cstat = SCSW_CSTAT_DATA_CHECK;
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ s->cpa = sch->channel_prog + 8;
+ break;
+ case -EBUSY:
+ /* subchannel busy, generate deferred cc 1 */
+ s->flags &= ~SCSW_FLAGS_MASK_CC;
+ s->flags |= (1 << 8);
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ break;
+ case -EINPROGRESS:
+ /* channel program has been suspended */
+ s->ctrl &= ~SCSW_ACTL_START_PEND;
+ s->ctrl |= SCSW_ACTL_SUSP;
+ break;
+ default:
+ /* error, generate channel program check */
+ s->ctrl &= ~SCSW_ACTL_START_PEND;
+ s->cstat = SCSW_CSTAT_PROG_CHECK;
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
+ s->cpa = sch->channel_prog + 8;
+ break;
+ }
+ } while (ret == -EAGAIN);
+
+}
+
+/*
+ * On real machines, this would run asynchronously to the main vcpus.
+ * We might want to make some parts of the ssch handling (interpreting
+ * read/writes) asynchronous later on if we start supporting more than
+ * our current very simple devices.
+ */
+static void do_subchannel_work(SubchDev *sch, ORB *orb)
+{
+
+ SCSW *s = &sch->curr_status.scsw;
+
+ if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) {
+ sch_handle_clear_func(sch);
+ } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) {
+ sch_handle_halt_func(sch);
+ } else if (s->ctrl & SCSW_FCTL_START_FUNC) {
+ sch_handle_start_func(sch, orb);
+ } else {
+ /* Cannot happen. */
+ return;
+ }
+ css_inject_io_interrupt(sch);
+}
+
+static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
+{
+ int i;
+
+ dest->intparm = cpu_to_be32(src->intparm);
+ dest->flags = cpu_to_be16(src->flags);
+ dest->devno = cpu_to_be16(src->devno);
+ dest->lpm = src->lpm;
+ dest->pnom = src->pnom;
+ dest->lpum = src->lpum;
+ dest->pim = src->pim;
+ dest->mbi = cpu_to_be16(src->mbi);
+ dest->pom = src->pom;
+ dest->pam = src->pam;
+ for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
+ dest->chpid[i] = src->chpid[i];
+ }
+ dest->chars = cpu_to_be32(src->chars);
+}
+
+static void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
+{
+ dest->flags = cpu_to_be16(src->flags);
+ dest->ctrl = cpu_to_be16(src->ctrl);
+ dest->cpa = cpu_to_be32(src->cpa);
+ dest->dstat = src->dstat;
+ dest->cstat = src->cstat;
+ dest->count = cpu_to_be16(src->count);
+}
+
+static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src)
+{
+ int i;
+
+ copy_pmcw_to_guest(&dest->pmcw, &src->pmcw);
+ copy_scsw_to_guest(&dest->scsw, &src->scsw);
+ dest->mba = cpu_to_be64(src->mba);
+ for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
+ dest->mda[i] = src->mda[i];
+ }
+}
+
+int css_do_stsch(SubchDev *sch, SCHIB *schib)
+{
+ /* Use current status. */
+ copy_schib_to_guest(schib, &sch->curr_status);
+ return 0;
+}
+
+static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src)
+{
+ int i;
+
+ dest->intparm = be32_to_cpu(src->intparm);
+ dest->flags = be16_to_cpu(src->flags);
+ dest->devno = be16_to_cpu(src->devno);
+ dest->lpm = src->lpm;
+ dest->pnom = src->pnom;
+ dest->lpum = src->lpum;
+ dest->pim = src->pim;
+ dest->mbi = be16_to_cpu(src->mbi);
+ dest->pom = src->pom;
+ dest->pam = src->pam;
+ for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
+ dest->chpid[i] = src->chpid[i];
+ }
+ dest->chars = be32_to_cpu(src->chars);
+}
+
+static void copy_scsw_from_guest(SCSW *dest, const SCSW *src)
+{
+ dest->flags = be16_to_cpu(src->flags);
+ dest->ctrl = be16_to_cpu(src->ctrl);
+ dest->cpa = be32_to_cpu(src->cpa);
+ dest->dstat = src->dstat;
+ dest->cstat = src->cstat;
+ dest->count = be16_to_cpu(src->count);
+}
+
+static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
+{
+ int i;
+
+ copy_pmcw_from_guest(&dest->pmcw, &src->pmcw);
+ copy_scsw_from_guest(&dest->scsw, &src->scsw);
+ dest->mba = be64_to_cpu(src->mba);
+ for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
+ dest->mda[i] = src->mda[i];
+ }
+}
+
+int css_do_msch(SubchDev *sch, const SCHIB *orig_schib)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ uint16_t oldflags;
+ int ret;
+ SCHIB schib;
+
+ if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
+ if (s->ctrl &
+ (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ copy_schib_from_guest(&schib, orig_schib);
+ /* Only update the program-modifiable fields. */
+ p->intparm = schib.pmcw.intparm;
+ oldflags = p->flags;
+ p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
+ PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
+ PMCW_FLAGS_MASK_MP);
+ p->flags |= schib.pmcw.flags &
+ (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
+ PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
+ PMCW_FLAGS_MASK_MP);
+ p->lpm = schib.pmcw.lpm;
+ p->mbi = schib.pmcw.mbi;
+ p->pom = schib.pmcw.pom;
+ p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
+ p->chars |= schib.pmcw.chars &
+ (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
+ sch->curr_status.mba = schib.mba;
+
+ /* Has the channel been disabled? */
+ if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0
+ && (p->flags & PMCW_FLAGS_MASK_ENA) == 0) {
+ sch->disable_cb(sch);
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int css_do_xsch(SubchDev *sch)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) ||
+ ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
+ (!(s->ctrl &
+ (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) ||
+ (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
+ if (s->ctrl & SCSW_CTRL_MASK_STCTL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Cancel the current operation. */
+ s->ctrl &= ~(SCSW_FCTL_START_FUNC |
+ SCSW_ACTL_RESUME_PEND |
+ SCSW_ACTL_START_PEND |
+ SCSW_ACTL_SUSP);
+ sch->channel_prog = 0x0;
+ sch->last_cmd_valid = false;
+ s->dstat = 0;
+ s->cstat = 0;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int css_do_csch(SubchDev *sch)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Trigger the clear function. */
+ s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL);
+ s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND;
+
+ do_subchannel_work(sch, NULL);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int css_do_hsch(SubchDev *sch)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) ||
+ (s->ctrl & (SCSW_STCTL_PRIMARY |
+ SCSW_STCTL_SECONDARY |
+ SCSW_STCTL_ALERT))) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
+ if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Trigger the halt function. */
+ s->ctrl |= SCSW_FCTL_HALT_FUNC;
+ s->ctrl &= ~SCSW_FCTL_START_FUNC;
+ if (((s->ctrl & SCSW_CTRL_MASK_ACTL) ==
+ (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) &&
+ ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) {
+ s->ctrl &= ~SCSW_STCTL_STATUS_PEND;
+ }
+ s->ctrl |= SCSW_ACTL_HALT_PEND;
+
+ do_subchannel_work(sch, NULL);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static void css_update_chnmon(SubchDev *sch)
+{
+ if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) {
+ /* Not active. */
+ return;
+ }
+ /* The counter is conveniently located at the beginning of the struct. */
+ if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) {
+ /* Format 1, per-subchannel area. */
+ uint32_t count;
+
+ count = address_space_ldl(&address_space_memory,
+ sch->curr_status.mba,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ count++;
+ address_space_stl(&address_space_memory, sch->curr_status.mba, count,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ } else {
+ /* Format 0, global area. */
+ uint32_t offset;
+ uint16_t count;
+
+ offset = sch->curr_status.pmcw.mbi << 5;
+ count = address_space_lduw(&address_space_memory,
+ channel_subsys->chnmon_area + offset,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ count++;
+ address_space_stw(&address_space_memory,
+ channel_subsys->chnmon_area + offset, count,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ }
+}
+
+int css_do_ssch(SubchDev *sch, ORB *orb)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
+ if (s->ctrl & (SCSW_FCTL_START_FUNC |
+ SCSW_FCTL_HALT_FUNC |
+ SCSW_FCTL_CLEAR_FUNC)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* If monitoring is active, update counter. */
+ if (channel_subsys->chnmon_active) {
+ css_update_chnmon(sch);
+ }
+ sch->channel_prog = orb->cpa;
+ /* Trigger the start function. */
+ s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
+ s->flags &= ~SCSW_FLAGS_MASK_PNO;
+
+ do_subchannel_work(sch, orb);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw,
+ int *irb_len)
+{
+ int i;
+ uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
+ uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
+
+ copy_scsw_to_guest(&dest->scsw, &src->scsw);
+
+ for (i = 0; i < ARRAY_SIZE(dest->esw); i++) {
+ dest->esw[i] = cpu_to_be32(src->esw[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
+ dest->ecw[i] = cpu_to_be32(src->ecw[i]);
+ }
+ *irb_len = sizeof(*dest) - sizeof(dest->emw);
+
+ /* extended measurements enabled? */
+ if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
+ !(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
+ !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) {
+ return;
+ }
+ /* extended measurements pending? */
+ if (!(stctl & SCSW_STCTL_STATUS_PEND)) {
+ return;
+ }
+ if ((stctl & SCSW_STCTL_PRIMARY) ||
+ (stctl == SCSW_STCTL_SECONDARY) ||
+ ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) {
+ for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
+ dest->emw[i] = cpu_to_be32(src->emw[i]);
+ }
+ }
+ *irb_len = sizeof(*dest);
+}
+
+int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ uint16_t stctl;
+ IRB irb;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ return 3;
+ }
+
+ stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
+
+ /* Prepare the irb for the guest. */
+ memset(&irb, 0, sizeof(IRB));
+
+ /* Copy scsw from current status. */
+ memcpy(&irb.scsw, s, sizeof(SCSW));
+ if (stctl & SCSW_STCTL_STATUS_PEND) {
+ if (s->cstat & (SCSW_CSTAT_DATA_CHECK |
+ SCSW_CSTAT_CHN_CTRL_CHK |
+ SCSW_CSTAT_INTF_CTRL_CHK)) {
+ irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF;
+ irb.esw[0] = 0x04804000;
+ } else {
+ irb.esw[0] = 0x00800000;
+ }
+ /* If a unit check is pending, copy sense data. */
+ if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) &&
+ (p->chars & PMCW_CHARS_MASK_CSENSE)) {
+ int i;
+
+ irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
+ /* Attention: sense_data is already BE! */
+ memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data));
+ for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) {
+ irb.ecw[i] = be32_to_cpu(irb.ecw[i]);
+ }
+ irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8);
+ }
+ }
+ /* Store the irb to the guest. */
+ copy_irb_to_guest(target_irb, &irb, p, irb_len);
+
+ return ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
+}
+
+void css_do_tsch_update_subch(SubchDev *sch)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ uint16_t stctl;
+ uint16_t fctl;
+ uint16_t actl;
+
+ stctl = s->ctrl & SCSW_CTRL_MASK_STCTL;
+ fctl = s->ctrl & SCSW_CTRL_MASK_FCTL;
+ actl = s->ctrl & SCSW_CTRL_MASK_ACTL;
+
+ /* Clear conditions on subchannel, if applicable. */
+ if (stctl & SCSW_STCTL_STATUS_PEND) {
+ s->ctrl &= ~SCSW_CTRL_MASK_STCTL;
+ if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) ||
+ ((fctl & SCSW_FCTL_HALT_FUNC) &&
+ (actl & SCSW_ACTL_SUSP))) {
+ s->ctrl &= ~SCSW_CTRL_MASK_FCTL;
+ }
+ if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) {
+ s->flags &= ~SCSW_FLAGS_MASK_PNO;
+ s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
+ SCSW_ACTL_START_PEND |
+ SCSW_ACTL_HALT_PEND |
+ SCSW_ACTL_CLEAR_PEND |
+ SCSW_ACTL_SUSP);
+ } else {
+ if ((actl & SCSW_ACTL_SUSP) &&
+ (fctl & SCSW_FCTL_START_FUNC)) {
+ s->flags &= ~SCSW_FLAGS_MASK_PNO;
+ if (fctl & SCSW_FCTL_HALT_FUNC) {
+ s->ctrl &= ~(SCSW_ACTL_RESUME_PEND |
+ SCSW_ACTL_START_PEND |
+ SCSW_ACTL_HALT_PEND |
+ SCSW_ACTL_CLEAR_PEND |
+ SCSW_ACTL_SUSP);
+ } else {
+ s->ctrl &= ~SCSW_ACTL_RESUME_PEND;
+ }
+ }
+ }
+ /* Clear pending sense data. */
+ if (p->chars & PMCW_CHARS_MASK_CSENSE) {
+ memset(sch->sense_data, 0 , sizeof(sch->sense_data));
+ }
+ }
+}
+
+static void copy_crw_to_guest(CRW *dest, const CRW *src)
+{
+ dest->flags = cpu_to_be16(src->flags);
+ dest->rsid = cpu_to_be16(src->rsid);
+}
+
+int css_do_stcrw(CRW *crw)
+{
+ CrwContainer *crw_cont;
+ int ret;
+
+ crw_cont = QTAILQ_FIRST(&channel_subsys->pending_crws);
+ if (crw_cont) {
+ QTAILQ_REMOVE(&channel_subsys->pending_crws, crw_cont, sibling);
+ copy_crw_to_guest(crw, &crw_cont->crw);
+ g_free(crw_cont);
+ ret = 0;
+ } else {
+ /* List was empty, turn crw machine checks on again. */
+ memset(crw, 0, sizeof(*crw));
+ channel_subsys->do_crw_mchk = true;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static void copy_crw_from_guest(CRW *dest, const CRW *src)
+{
+ dest->flags = be16_to_cpu(src->flags);
+ dest->rsid = be16_to_cpu(src->rsid);
+}
+
+void css_undo_stcrw(CRW *crw)
+{
+ CrwContainer *crw_cont;
+
+ crw_cont = g_try_malloc0(sizeof(CrwContainer));
+ if (!crw_cont) {
+ channel_subsys->crws_lost = true;
+ return;
+ }
+ copy_crw_from_guest(&crw_cont->crw, crw);
+
+ QTAILQ_INSERT_HEAD(&channel_subsys->pending_crws, crw_cont, sibling);
+}
+
+int css_do_tpi(IOIntCode *int_code, int lowcore)
+{
+ /* No pending interrupts for !KVM. */
+ return 0;
+ }
+
+int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
+ int rfmt, void *buf)
+{
+ int i, desc_size;
+ uint32_t words[8];
+ uint32_t chpid_type_word;
+ CssImage *css;
+
+ if (!m && !cssid) {
+ css = channel_subsys->css[channel_subsys->default_cssid];
+ } else {
+ css = channel_subsys->css[cssid];
+ }
+ if (!css) {
+ return 0;
+ }
+ desc_size = 0;
+ for (i = f_chpid; i <= l_chpid; i++) {
+ if (css->chpids[i].in_use) {
+ chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i;
+ if (rfmt == 0) {
+ words[0] = cpu_to_be32(chpid_type_word);
+ words[1] = 0;
+ memcpy(buf + desc_size, words, 8);
+ desc_size += 8;
+ } else if (rfmt == 1) {
+ words[0] = cpu_to_be32(chpid_type_word);
+ words[1] = 0;
+ words[2] = 0;
+ words[3] = 0;
+ words[4] = 0;
+ words[5] = 0;
+ words[6] = 0;
+ words[7] = 0;
+ memcpy(buf + desc_size, words, 32);
+ desc_size += 32;
+ }
+ }
+ }
+ return desc_size;
+}
+
+void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
+{
+ /* dct is currently ignored (not really meaningful for our devices) */
+ /* TODO: Don't ignore mbk. */
+ if (update && !channel_subsys->chnmon_active) {
+ /* Enable measuring. */
+ channel_subsys->chnmon_area = mbo;
+ channel_subsys->chnmon_active = true;
+ }
+ if (!update && channel_subsys->chnmon_active) {
+ /* Disable measuring. */
+ channel_subsys->chnmon_area = 0;
+ channel_subsys->chnmon_active = false;
+ }
+}
+
+int css_do_rsch(SubchDev *sch)
+{
+ SCSW *s = &sch->curr_status.scsw;
+ PMCW *p = &sch->curr_status.pmcw;
+ int ret;
+
+ if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (s->ctrl & SCSW_STCTL_STATUS_PEND) {
+ ret = -EINPROGRESS;
+ goto out;
+ }
+
+ if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
+ (s->ctrl & SCSW_ACTL_RESUME_PEND) ||
+ (!(s->ctrl & SCSW_ACTL_SUSP))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If monitoring is active, update counter. */
+ if (channel_subsys->chnmon_active) {
+ css_update_chnmon(sch);
+ }
+
+ s->ctrl |= SCSW_ACTL_RESUME_PEND;
+ do_subchannel_work(sch, NULL);
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int css_do_rchp(uint8_t cssid, uint8_t chpid)
+{
+ uint8_t real_cssid;
+
+ if (cssid > channel_subsys->max_cssid) {
+ return -EINVAL;
+ }
+ if (channel_subsys->max_cssid == 0) {
+ real_cssid = channel_subsys->default_cssid;
+ } else {
+ real_cssid = cssid;
+ }
+ if (!channel_subsys->css[real_cssid]) {
+ return -EINVAL;
+ }
+
+ if (!channel_subsys->css[real_cssid]->chpids[chpid].in_use) {
+ return -ENODEV;
+ }
+
+ if (!channel_subsys->css[real_cssid]->chpids[chpid].is_virtual) {
+ fprintf(stderr,
+ "rchp unsupported for non-virtual chpid %x.%02x!\n",
+ real_cssid, chpid);
+ return -ENODEV;
+ }
+
+ /* We don't really use a channel path, so we're done here. */
+ css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT,
+ channel_subsys->max_cssid > 0 ? 1 : 0, chpid);
+ if (channel_subsys->max_cssid > 0) {
+ css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8);
+ }
+ return 0;
+}
+
+bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid)
+{
+ SubchSet *set;
+ uint8_t real_cssid;
+
+ real_cssid = (!m && (cssid == 0)) ? channel_subsys->default_cssid : cssid;
+ if (real_cssid > MAX_CSSID || ssid > MAX_SSID ||
+ !channel_subsys->css[real_cssid] ||
+ !channel_subsys->css[real_cssid]->sch_set[ssid]) {
+ return true;
+ }
+ set = channel_subsys->css[real_cssid]->sch_set[ssid];
+ return schid > find_last_bit(set->schids_used,
+ (MAX_SCHID + 1) / sizeof(unsigned long));
+}
+
+static int css_add_virtual_chpid(uint8_t cssid, uint8_t chpid, uint8_t type)
+{
+ CssImage *css;
+
+ trace_css_chpid_add(cssid, chpid, type);
+ if (cssid > MAX_CSSID) {
+ return -EINVAL;
+ }
+ css = channel_subsys->css[cssid];
+ if (!css) {
+ return -EINVAL;
+ }
+ if (css->chpids[chpid].in_use) {
+ return -EEXIST;
+ }
+ css->chpids[chpid].in_use = 1;
+ css->chpids[chpid].type = type;
+ css->chpids[chpid].is_virtual = 1;
+
+ css_generate_chp_crws(cssid, chpid);
+
+ return 0;
+}
+
+void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type)
+{
+ PMCW *p = &sch->curr_status.pmcw;
+ SCSW *s = &sch->curr_status.scsw;
+ int i;
+ CssImage *css = channel_subsys->css[sch->cssid];
+
+ assert(css != NULL);
+ memset(p, 0, sizeof(PMCW));
+ p->flags |= PMCW_FLAGS_MASK_DNV;
+ p->devno = sch->devno;
+ /* single path */
+ p->pim = 0x80;
+ p->pom = 0xff;
+ p->pam = 0x80;
+ p->chpid[0] = chpid;
+ if (!css->chpids[chpid].in_use) {
+ css_add_virtual_chpid(sch->cssid, chpid, type);
+ }
+
+ memset(s, 0, sizeof(SCSW));
+ sch->curr_status.mba = 0;
+ for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) {
+ sch->curr_status.mda[i] = 0;
+ }
+}
+
+SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid)
+{
+ uint8_t real_cssid;
+
+ real_cssid = (!m && (cssid == 0)) ? channel_subsys->default_cssid : cssid;
+
+ if (!channel_subsys->css[real_cssid]) {
+ return NULL;
+ }
+
+ if (!channel_subsys->css[real_cssid]->sch_set[ssid]) {
+ return NULL;
+ }
+
+ return channel_subsys->css[real_cssid]->sch_set[ssid]->sch[schid];
+}
+
+bool css_subch_visible(SubchDev *sch)
+{
+ if (sch->ssid > channel_subsys->max_ssid) {
+ return false;
+ }
+
+ if (sch->cssid != channel_subsys->default_cssid) {
+ return (channel_subsys->max_cssid > 0);
+ }
+
+ return true;
+}
+
+bool css_present(uint8_t cssid)
+{
+ return (channel_subsys->css[cssid] != NULL);
+}
+
+bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno)
+{
+ if (!channel_subsys->css[cssid]) {
+ return false;
+ }
+ if (!channel_subsys->css[cssid]->sch_set[ssid]) {
+ return false;
+ }
+
+ return !!test_bit(devno,
+ channel_subsys->css[cssid]->sch_set[ssid]->devnos_used);
+}
+
+void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
+ uint16_t devno, SubchDev *sch)
+{
+ CssImage *css;
+ SubchSet *s_set;
+
+ trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid,
+ devno);
+ if (!channel_subsys->css[cssid]) {
+ fprintf(stderr,
+ "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n",
+ __func__, cssid, ssid, schid);
+ return;
+ }
+ css = channel_subsys->css[cssid];
+
+ if (!css->sch_set[ssid]) {
+ css->sch_set[ssid] = g_malloc0(sizeof(SubchSet));
+ }
+ s_set = css->sch_set[ssid];
+
+ s_set->sch[schid] = sch;
+ if (sch) {
+ set_bit(schid, s_set->schids_used);
+ set_bit(devno, s_set->devnos_used);
+ } else {
+ clear_bit(schid, s_set->schids_used);
+ clear_bit(devno, s_set->devnos_used);
+ }
+}
+
+void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid)
+{
+ CrwContainer *crw_cont;
+
+ trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : "");
+ /* TODO: Maybe use a static crw pool? */
+ crw_cont = g_try_malloc0(sizeof(CrwContainer));
+ if (!crw_cont) {
+ channel_subsys->crws_lost = true;
+ return;
+ }
+ crw_cont->crw.flags = (rsc << 8) | erc;
+ if (chain) {
+ crw_cont->crw.flags |= CRW_FLAGS_MASK_C;
+ }
+ crw_cont->crw.rsid = rsid;
+ if (channel_subsys->crws_lost) {
+ crw_cont->crw.flags |= CRW_FLAGS_MASK_R;
+ channel_subsys->crws_lost = false;
+ }
+
+ QTAILQ_INSERT_TAIL(&channel_subsys->pending_crws, crw_cont, sibling);
+
+ if (channel_subsys->do_crw_mchk) {
+ channel_subsys->do_crw_mchk = false;
+ /* Inject crw pending machine check. */
+ s390_crw_mchk();
+ }
+}
+
+void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
+ int hotplugged, int add)
+{
+ uint8_t guest_cssid;
+ bool chain_crw;
+
+ if (add && !hotplugged) {
+ return;
+ }
+ if (channel_subsys->max_cssid == 0) {
+ /* Default cssid shows up as 0. */
+ guest_cssid = (cssid == channel_subsys->default_cssid) ? 0 : cssid;
+ } else {
+ /* Show real cssid to the guest. */
+ guest_cssid = cssid;
+ }
+ /*
+ * Only notify for higher subchannel sets/channel subsystems if the
+ * guest has enabled it.
+ */
+ if ((ssid > channel_subsys->max_ssid) ||
+ (guest_cssid > channel_subsys->max_cssid) ||
+ ((channel_subsys->max_cssid == 0) &&
+ (cssid != channel_subsys->default_cssid))) {
+ return;
+ }
+ chain_crw = (channel_subsys->max_ssid > 0) ||
+ (channel_subsys->max_cssid > 0);
+ css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid);
+ if (chain_crw) {
+ css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0,
+ (guest_cssid << 8) | (ssid << 4));
+ }
+}
+
+void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
+{
+ /* TODO */
+}
+
+void css_generate_css_crws(uint8_t cssid)
+{
+ css_queue_crw(CRW_RSC_CSS, 0, 0, cssid);
+}
+
+int css_enable_mcsse(void)
+{
+ trace_css_enable_facility("mcsse");
+ channel_subsys->max_cssid = MAX_CSSID;
+ return 0;
+}
+
+int css_enable_mss(void)
+{
+ trace_css_enable_facility("mss");
+ channel_subsys->max_ssid = MAX_SSID;
+ return 0;
+}
+
+void subch_device_save(SubchDev *s, QEMUFile *f)
+{
+ int i;
+
+ qemu_put_byte(f, s->cssid);
+ qemu_put_byte(f, s->ssid);
+ qemu_put_be16(f, s->schid);
+ qemu_put_be16(f, s->devno);
+ qemu_put_byte(f, s->thinint_active);
+ /* SCHIB */
+ /* PMCW */
+ qemu_put_be32(f, s->curr_status.pmcw.intparm);
+ qemu_put_be16(f, s->curr_status.pmcw.flags);
+ qemu_put_be16(f, s->curr_status.pmcw.devno);
+ qemu_put_byte(f, s->curr_status.pmcw.lpm);
+ qemu_put_byte(f, s->curr_status.pmcw.pnom);
+ qemu_put_byte(f, s->curr_status.pmcw.lpum);
+ qemu_put_byte(f, s->curr_status.pmcw.pim);
+ qemu_put_be16(f, s->curr_status.pmcw.mbi);
+ qemu_put_byte(f, s->curr_status.pmcw.pom);
+ qemu_put_byte(f, s->curr_status.pmcw.pam);
+ qemu_put_buffer(f, s->curr_status.pmcw.chpid, 8);
+ qemu_put_be32(f, s->curr_status.pmcw.chars);
+ /* SCSW */
+ qemu_put_be16(f, s->curr_status.scsw.flags);
+ qemu_put_be16(f, s->curr_status.scsw.ctrl);
+ qemu_put_be32(f, s->curr_status.scsw.cpa);
+ qemu_put_byte(f, s->curr_status.scsw.dstat);
+ qemu_put_byte(f, s->curr_status.scsw.cstat);
+ qemu_put_be16(f, s->curr_status.scsw.count);
+ qemu_put_be64(f, s->curr_status.mba);
+ qemu_put_buffer(f, s->curr_status.mda, 4);
+ /* end SCHIB */
+ qemu_put_buffer(f, s->sense_data, 32);
+ qemu_put_be64(f, s->channel_prog);
+ /* last cmd */
+ qemu_put_byte(f, s->last_cmd.cmd_code);
+ qemu_put_byte(f, s->last_cmd.flags);
+ qemu_put_be16(f, s->last_cmd.count);
+ qemu_put_be32(f, s->last_cmd.cda);
+ qemu_put_byte(f, s->last_cmd_valid);
+ qemu_put_byte(f, s->id.reserved);
+ qemu_put_be16(f, s->id.cu_type);
+ qemu_put_byte(f, s->id.cu_model);
+ qemu_put_be16(f, s->id.dev_type);
+ qemu_put_byte(f, s->id.dev_model);
+ qemu_put_byte(f, s->id.unused);
+ for (i = 0; i < ARRAY_SIZE(s->id.ciw); i++) {
+ qemu_put_byte(f, s->id.ciw[i].type);
+ qemu_put_byte(f, s->id.ciw[i].command);
+ qemu_put_be16(f, s->id.ciw[i].count);
+ }
+ qemu_put_byte(f, s->ccw_fmt_1);
+ qemu_put_byte(f, s->ccw_no_data_cnt);
+ return;
+}
+
+int subch_device_load(SubchDev *s, QEMUFile *f)
+{
+ int i;
+
+ s->cssid = qemu_get_byte(f);
+ s->ssid = qemu_get_byte(f);
+ s->schid = qemu_get_be16(f);
+ s->devno = qemu_get_be16(f);
+ s->thinint_active = qemu_get_byte(f);
+ /* SCHIB */
+ /* PMCW */
+ s->curr_status.pmcw.intparm = qemu_get_be32(f);
+ s->curr_status.pmcw.flags = qemu_get_be16(f);
+ s->curr_status.pmcw.devno = qemu_get_be16(f);
+ s->curr_status.pmcw.lpm = qemu_get_byte(f);
+ s->curr_status.pmcw.pnom = qemu_get_byte(f);
+ s->curr_status.pmcw.lpum = qemu_get_byte(f);
+ s->curr_status.pmcw.pim = qemu_get_byte(f);
+ s->curr_status.pmcw.mbi = qemu_get_be16(f);
+ s->curr_status.pmcw.pom = qemu_get_byte(f);
+ s->curr_status.pmcw.pam = qemu_get_byte(f);
+ qemu_get_buffer(f, s->curr_status.pmcw.chpid, 8);
+ s->curr_status.pmcw.chars = qemu_get_be32(f);
+ /* SCSW */
+ s->curr_status.scsw.flags = qemu_get_be16(f);
+ s->curr_status.scsw.ctrl = qemu_get_be16(f);
+ s->curr_status.scsw.cpa = qemu_get_be32(f);
+ s->curr_status.scsw.dstat = qemu_get_byte(f);
+ s->curr_status.scsw.cstat = qemu_get_byte(f);
+ s->curr_status.scsw.count = qemu_get_be16(f);
+ s->curr_status.mba = qemu_get_be64(f);
+ qemu_get_buffer(f, s->curr_status.mda, 4);
+ /* end SCHIB */
+ qemu_get_buffer(f, s->sense_data, 32);
+ s->channel_prog = qemu_get_be64(f);
+ /* last cmd */
+ s->last_cmd.cmd_code = qemu_get_byte(f);
+ s->last_cmd.flags = qemu_get_byte(f);
+ s->last_cmd.count = qemu_get_be16(f);
+ s->last_cmd.cda = qemu_get_be32(f);
+ s->last_cmd_valid = qemu_get_byte(f);
+ s->id.reserved = qemu_get_byte(f);
+ s->id.cu_type = qemu_get_be16(f);
+ s->id.cu_model = qemu_get_byte(f);
+ s->id.dev_type = qemu_get_be16(f);
+ s->id.dev_model = qemu_get_byte(f);
+ s->id.unused = qemu_get_byte(f);
+ for (i = 0; i < ARRAY_SIZE(s->id.ciw); i++) {
+ s->id.ciw[i].type = qemu_get_byte(f);
+ s->id.ciw[i].command = qemu_get_byte(f);
+ s->id.ciw[i].count = qemu_get_be16(f);
+ }
+ s->ccw_fmt_1 = qemu_get_byte(f);
+ s->ccw_no_data_cnt = qemu_get_byte(f);
+ /*
+ * Hack alert. We don't migrate the channel subsystem status (no
+ * device!), but we need to find out if the guest enabled mss/mcss-e.
+ * If the subchannel is enabled, it certainly was able to access it,
+ * so adjust the max_ssid/max_cssid values for relevant ssid/cssid
+ * values. This is not watertight, but better than nothing.
+ */
+ if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) {
+ if (s->ssid) {
+ channel_subsys->max_ssid = MAX_SSID;
+ }
+ if (s->cssid != channel_subsys->default_cssid) {
+ channel_subsys->max_cssid = MAX_CSSID;
+ }
+ }
+ return 0;
+}
+
+
+static void css_init(void)
+{
+ channel_subsys = g_malloc0(sizeof(*channel_subsys));
+ QTAILQ_INIT(&channel_subsys->pending_crws);
+ channel_subsys->do_crw_mchk = true;
+ channel_subsys->crws_lost = false;
+ channel_subsys->chnmon_active = false;
+ QTAILQ_INIT(&channel_subsys->io_adapters);
+}
+machine_init(css_init);
+
+void css_reset_sch(SubchDev *sch)
+{
+ PMCW *p = &sch->curr_status.pmcw;
+
+ if ((p->flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) {
+ sch->disable_cb(sch);
+ }
+
+ p->intparm = 0;
+ p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
+ PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
+ PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF);
+ p->flags |= PMCW_FLAGS_MASK_DNV;
+ p->devno = sch->devno;
+ p->pim = 0x80;
+ p->lpm = p->pim;
+ p->pnom = 0;
+ p->lpum = 0;
+ p->mbi = 0;
+ p->pom = 0xff;
+ p->pam = 0x80;
+ p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME |
+ PMCW_CHARS_MASK_CSENSE);
+
+ memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw));
+ sch->curr_status.mba = 0;
+
+ sch->channel_prog = 0x0;
+ sch->last_cmd_valid = false;
+ sch->thinint_active = false;
+}
+
+void css_reset(void)
+{
+ CrwContainer *crw_cont;
+
+ /* Clean up monitoring. */
+ channel_subsys->chnmon_active = false;
+ channel_subsys->chnmon_area = 0;
+
+ /* Clear pending CRWs. */
+ while ((crw_cont = QTAILQ_FIRST(&channel_subsys->pending_crws))) {
+ QTAILQ_REMOVE(&channel_subsys->pending_crws, crw_cont, sibling);
+ g_free(crw_cont);
+ }
+ channel_subsys->do_crw_mchk = true;
+ channel_subsys->crws_lost = false;
+
+ /* Reset maximum ids. */
+ channel_subsys->max_cssid = 0;
+ channel_subsys->max_ssid = 0;
+}
diff --git a/src/hw/s390x/css.h b/src/hw/s390x/css.h
new file mode 100644
index 0000000..a09bb1f
--- /dev/null
+++ b/src/hw/s390x/css.h
@@ -0,0 +1,111 @@
+/*
+ * Channel subsystem structures and definitions.
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef CSS_H
+#define CSS_H
+
+#include "ioinst.h"
+
+/* Channel subsystem constants. */
+#define MAX_SCHID 65535
+#define MAX_SSID 3
+#define MAX_CSSID 254 /* 255 is reserved */
+#define MAX_CHPID 255
+
+#define MAX_CIWS 62
+
+typedef struct CIW {
+ uint8_t type;
+ uint8_t command;
+ uint16_t count;
+} QEMU_PACKED CIW;
+
+typedef struct SenseId {
+ /* common part */
+ uint8_t reserved; /* always 0x'FF' */
+ uint16_t cu_type; /* control unit type */
+ uint8_t cu_model; /* control unit model */
+ uint16_t dev_type; /* device type */
+ uint8_t dev_model; /* device model */
+ uint8_t unused; /* padding byte */
+ /* extended part */
+ CIW ciw[MAX_CIWS]; /* variable # of CIWs */
+} QEMU_PACKED SenseId;
+
+/* Channel measurements, from linux/drivers/s390/cio/cmf.c. */
+typedef struct CMB {
+ uint16_t ssch_rsch_count;
+ uint16_t sample_count;
+ uint32_t device_connect_time;
+ uint32_t function_pending_time;
+ uint32_t device_disconnect_time;
+ uint32_t control_unit_queuing_time;
+ uint32_t device_active_only_time;
+ uint32_t reserved[2];
+} QEMU_PACKED CMB;
+
+typedef struct CMBE {
+ uint32_t ssch_rsch_count;
+ uint32_t sample_count;
+ uint32_t device_connect_time;
+ uint32_t function_pending_time;
+ uint32_t device_disconnect_time;
+ uint32_t control_unit_queuing_time;
+ uint32_t device_active_only_time;
+ uint32_t device_busy_time;
+ uint32_t initial_command_response_time;
+ uint32_t reserved[7];
+} QEMU_PACKED CMBE;
+
+struct SubchDev {
+ /* channel-subsystem related things: */
+ uint8_t cssid;
+ uint8_t ssid;
+ uint16_t schid;
+ uint16_t devno;
+ SCHIB curr_status;
+ uint8_t sense_data[32];
+ hwaddr channel_prog;
+ CCW1 last_cmd;
+ bool last_cmd_valid;
+ bool ccw_fmt_1;
+ bool thinint_active;
+ uint8_t ccw_no_data_cnt;
+ /* transport-provided data: */
+ int (*ccw_cb) (SubchDev *, CCW1);
+ void (*disable_cb)(SubchDev *);
+ SenseId id;
+ void *driver_data;
+};
+
+typedef SubchDev *(*css_subch_cb_func)(uint8_t m, uint8_t cssid, uint8_t ssid,
+ uint16_t schid);
+void subch_device_save(SubchDev *s, QEMUFile *f);
+int subch_device_load(SubchDev *s, QEMUFile *f);
+int css_create_css_image(uint8_t cssid, bool default_image);
+bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno);
+void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
+ uint16_t devno, SubchDev *sch);
+void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type);
+uint16_t css_build_subchannel_id(SubchDev *sch);
+void css_reset(void);
+void css_reset_sch(SubchDev *sch);
+void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid);
+void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
+ int hotplugged, int add);
+void css_generate_chp_crws(uint8_t cssid, uint8_t chpid);
+void css_generate_css_crws(uint8_t cssid);
+void css_adapter_interrupt(uint8_t isc);
+
+#define CSS_IO_ADAPTER_VIRTIO 1
+int css_register_io_adapter(uint8_t type, uint8_t isc, bool swap,
+ bool maskable, uint32_t *id);
+#endif
diff --git a/src/hw/s390x/event-facility.c b/src/hw/s390x/event-facility.c
new file mode 100644
index 0000000..907b485
--- /dev/null
+++ b/src/hw/s390x/event-facility.c
@@ -0,0 +1,447 @@
+/*
+ * SCLP
+ * Event Facility
+ * handles SCLP event types
+ * - Signal Quiesce - system power down
+ * - ASCII Console Data - VT220 read and write
+ *
+ * Copyright IBM, Corp. 2012
+ *
+ * Authors:
+ * Heinz Graalfs <graalfs@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "sysemu/sysemu.h"
+
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/event-facility.h"
+
+typedef struct SCLPEventsBus {
+ BusState qbus;
+} SCLPEventsBus;
+
+struct SCLPEventFacility {
+ SysBusDevice parent_obj;
+ SCLPEventsBus sbus;
+ /* guest' receive mask */
+ unsigned int receive_mask;
+};
+
+/* return true if any child has event pending set */
+static bool event_pending(SCLPEventFacility *ef)
+{
+ BusChild *kid;
+ SCLPEvent *event;
+ SCLPEventClass *event_class;
+
+ QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
+ DeviceState *qdev = kid->child;
+ event = DO_UPCAST(SCLPEvent, qdev, qdev);
+ event_class = SCLP_EVENT_GET_CLASS(event);
+ if (event->event_pending &&
+ event_class->get_send_mask() & ef->receive_mask) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static unsigned int get_host_send_mask(SCLPEventFacility *ef)
+{
+ unsigned int mask;
+ BusChild *kid;
+ SCLPEventClass *child;
+
+ mask = 0;
+
+ QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
+ DeviceState *qdev = kid->child;
+ child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
+ mask |= child->get_send_mask();
+ }
+ return mask;
+}
+
+static unsigned int get_host_receive_mask(SCLPEventFacility *ef)
+{
+ unsigned int mask;
+ BusChild *kid;
+ SCLPEventClass *child;
+
+ mask = 0;
+
+ QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
+ DeviceState *qdev = kid->child;
+ child = SCLP_EVENT_GET_CLASS((SCLPEvent *) qdev);
+ mask |= child->get_receive_mask();
+ }
+ return mask;
+}
+
+static uint16_t write_event_length_check(SCCB *sccb)
+{
+ int slen;
+ unsigned elen = 0;
+ EventBufferHeader *event;
+ WriteEventData *wed = (WriteEventData *) sccb;
+
+ event = (EventBufferHeader *) &wed->ebh;
+ for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
+ elen = be16_to_cpu(event->length);
+ if (elen < sizeof(*event) || elen > slen) {
+ return SCLP_RC_EVENT_BUFFER_SYNTAX_ERROR;
+ }
+ event = (void *) event + elen;
+ }
+ if (slen) {
+ return SCLP_RC_INCONSISTENT_LENGTHS;
+ }
+ return SCLP_RC_NORMAL_COMPLETION;
+}
+
+static uint16_t handle_write_event_buf(SCLPEventFacility *ef,
+ EventBufferHeader *event_buf, SCCB *sccb)
+{
+ uint16_t rc;
+ BusChild *kid;
+ SCLPEvent *event;
+ SCLPEventClass *ec;
+
+ rc = SCLP_RC_INVALID_FUNCTION;
+
+ QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
+ DeviceState *qdev = kid->child;
+ event = (SCLPEvent *) qdev;
+ ec = SCLP_EVENT_GET_CLASS(event);
+
+ if (ec->write_event_data &&
+ ec->can_handle_event(event_buf->type)) {
+ rc = ec->write_event_data(event, event_buf);
+ break;
+ }
+ }
+ return rc;
+}
+
+static uint16_t handle_sccb_write_events(SCLPEventFacility *ef, SCCB *sccb)
+{
+ uint16_t rc;
+ int slen;
+ unsigned elen = 0;
+ EventBufferHeader *event_buf;
+ WriteEventData *wed = (WriteEventData *) sccb;
+
+ event_buf = &wed->ebh;
+ rc = SCLP_RC_NORMAL_COMPLETION;
+
+ /* loop over all contained event buffers */
+ for (slen = sccb_data_len(sccb); slen > 0; slen -= elen) {
+ elen = be16_to_cpu(event_buf->length);
+
+ /* in case of a previous error mark all trailing buffers
+ * as not accepted */
+ if (rc != SCLP_RC_NORMAL_COMPLETION) {
+ event_buf->flags &= ~(SCLP_EVENT_BUFFER_ACCEPTED);
+ } else {
+ rc = handle_write_event_buf(ef, event_buf, sccb);
+ }
+ event_buf = (void *) event_buf + elen;
+ }
+ return rc;
+}
+
+static void write_event_data(SCLPEventFacility *ef, SCCB *sccb)
+{
+ if (sccb->h.function_code != SCLP_FC_NORMAL_WRITE) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
+ goto out;
+ }
+ if (be16_to_cpu(sccb->h.length) < 8) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
+ goto out;
+ }
+ /* first do a sanity check of the write events */
+ sccb->h.response_code = cpu_to_be16(write_event_length_check(sccb));
+
+ /* if no early error, then execute */
+ if (sccb->h.response_code == be16_to_cpu(SCLP_RC_NORMAL_COMPLETION)) {
+ sccb->h.response_code =
+ cpu_to_be16(handle_sccb_write_events(ef, sccb));
+ }
+
+out:
+ return;
+}
+
+static uint16_t handle_sccb_read_events(SCLPEventFacility *ef, SCCB *sccb,
+ unsigned int mask)
+{
+ uint16_t rc;
+ int slen;
+ unsigned elen;
+ BusChild *kid;
+ SCLPEvent *event;
+ SCLPEventClass *ec;
+ EventBufferHeader *event_buf;
+ ReadEventData *red = (ReadEventData *) sccb;
+
+ event_buf = &red->ebh;
+ event_buf->length = 0;
+ slen = sizeof(sccb->data);
+
+ rc = SCLP_RC_NO_EVENT_BUFFERS_STORED;
+
+ QTAILQ_FOREACH(kid, &ef->sbus.qbus.children, sibling) {
+ DeviceState *qdev = kid->child;
+ event = (SCLPEvent *) qdev;
+ ec = SCLP_EVENT_GET_CLASS(event);
+
+ if (mask & ec->get_send_mask()) {
+ if (ec->read_event_data(event, event_buf, &slen)) {
+ elen = be16_to_cpu(event_buf->length);
+ event_buf = (EventBufferHeader *) ((char *)event_buf + elen);
+ rc = SCLP_RC_NORMAL_COMPLETION;
+ }
+ }
+ }
+
+ if (sccb->h.control_mask[2] & SCLP_VARIABLE_LENGTH_RESPONSE) {
+ /* architecture suggests to reset variable-length-response bit */
+ sccb->h.control_mask[2] &= ~SCLP_VARIABLE_LENGTH_RESPONSE;
+ /* with a new length value */
+ sccb->h.length = cpu_to_be16(SCCB_SIZE - slen);
+ }
+ return rc;
+}
+
+static void read_event_data(SCLPEventFacility *ef, SCCB *sccb)
+{
+ unsigned int sclp_active_selection_mask;
+ unsigned int sclp_cp_receive_mask;
+
+ ReadEventData *red = (ReadEventData *) sccb;
+
+ if (be16_to_cpu(sccb->h.length) != SCCB_SIZE) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INSUFFICIENT_SCCB_LENGTH);
+ goto out;
+ }
+
+ sclp_cp_receive_mask = ef->receive_mask;
+
+ /* get active selection mask */
+ switch (sccb->h.function_code) {
+ case SCLP_UNCONDITIONAL_READ:
+ sclp_active_selection_mask = sclp_cp_receive_mask;
+ break;
+ case SCLP_SELECTIVE_READ:
+ sclp_active_selection_mask = be32_to_cpu(red->mask);
+ if (!sclp_cp_receive_mask ||
+ (sclp_active_selection_mask & ~sclp_cp_receive_mask)) {
+ sccb->h.response_code =
+ cpu_to_be16(SCLP_RC_INVALID_SELECTION_MASK);
+ goto out;
+ }
+ break;
+ default:
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_FUNCTION);
+ goto out;
+ }
+ sccb->h.response_code = cpu_to_be16(
+ handle_sccb_read_events(ef, sccb, sclp_active_selection_mask));
+
+out:
+ return;
+}
+
+static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb)
+{
+ WriteEventMask *we_mask = (WriteEventMask *) sccb;
+
+ /* Attention: We assume that Linux uses 4-byte masks, what it actually
+ does. Architecture allows for masks of variable size, though */
+ if (be16_to_cpu(we_mask->mask_length) != 4) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_MASK_LENGTH);
+ goto out;
+ }
+
+ /* keep track of the guest's capability masks */
+ ef->receive_mask = be32_to_cpu(we_mask->cp_receive_mask);
+
+ /* return the SCLP's capability masks to the guest */
+ we_mask->send_mask = cpu_to_be32(get_host_send_mask(ef));
+ we_mask->receive_mask = cpu_to_be32(get_host_receive_mask(ef));
+
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
+
+out:
+ return;
+}
+
+/* qemu object creation and initialization functions */
+
+#define TYPE_SCLP_EVENTS_BUS "s390-sclp-events-bus"
+
+static void sclp_events_bus_realize(BusState *bus, Error **errp)
+{
+ BusChild *kid;
+
+ /* TODO: recursive realization has to be done in common code */
+ QTAILQ_FOREACH(kid, &bus->children, sibling) {
+ DeviceState *dev = kid->child;
+
+ object_property_set_bool(OBJECT(dev), true, "realized", errp);
+ if (*errp) {
+ return;
+ }
+ }
+}
+
+static void sclp_events_bus_class_init(ObjectClass *klass, void *data)
+{
+ BusClass *bc = BUS_CLASS(klass);
+
+ bc->realize = sclp_events_bus_realize;
+}
+
+static const TypeInfo sclp_events_bus_info = {
+ .name = TYPE_SCLP_EVENTS_BUS,
+ .parent = TYPE_BUS,
+ .class_init = sclp_events_bus_class_init,
+};
+
+static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code)
+{
+ switch (code & SCLP_CMD_CODE_MASK) {
+ case SCLP_CMD_READ_EVENT_DATA:
+ read_event_data(ef, sccb);
+ break;
+ case SCLP_CMD_WRITE_EVENT_DATA:
+ write_event_data(ef, sccb);
+ break;
+ case SCLP_CMD_WRITE_EVENT_MASK:
+ write_event_mask(ef, sccb);
+ break;
+ default:
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
+ break;
+ }
+}
+
+static const VMStateDescription vmstate_event_facility = {
+ .name = "vmstate-event-facility",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(receive_mask, SCLPEventFacility),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void init_event_facility(Object *obj)
+{
+ SCLPEventFacility *event_facility = EVENT_FACILITY(obj);
+ DeviceState *sdev = DEVICE(obj);
+ Object *new;
+
+ /* Spawn a new bus for SCLP events */
+ qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus),
+ TYPE_SCLP_EVENTS_BUS, sdev, NULL);
+
+ new = object_new(TYPE_SCLP_QUIESCE);
+ object_property_add_child(obj, TYPE_SCLP_QUIESCE, new, NULL);
+ object_unref(new);
+ qdev_set_parent_bus(DEVICE(new), &event_facility->sbus.qbus);
+
+ new = object_new(TYPE_SCLP_CPU_HOTPLUG);
+ object_property_add_child(obj, TYPE_SCLP_CPU_HOTPLUG, new, NULL);
+ object_unref(new);
+ qdev_set_parent_bus(DEVICE(new), &event_facility->sbus.qbus);
+ /* the facility will automatically realize the devices via the bus */
+}
+
+static void reset_event_facility(DeviceState *dev)
+{
+ SCLPEventFacility *sdev = EVENT_FACILITY(dev);
+
+ sdev->receive_mask = 0;
+}
+
+static void init_event_facility_class(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(sbdc);
+ SCLPEventFacilityClass *k = EVENT_FACILITY_CLASS(dc);
+
+ dc->reset = reset_event_facility;
+ dc->vmsd = &vmstate_event_facility;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ k->command_handler = command_handler;
+ k->event_pending = event_pending;
+}
+
+static const TypeInfo sclp_event_facility_info = {
+ .name = TYPE_SCLP_EVENT_FACILITY,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_init = init_event_facility,
+ .instance_size = sizeof(SCLPEventFacility),
+ .class_init = init_event_facility_class,
+ .class_size = sizeof(SCLPEventFacilityClass),
+};
+
+static void event_realize(DeviceState *qdev, Error **errp)
+{
+ SCLPEvent *event = SCLP_EVENT(qdev);
+ SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event);
+
+ if (child->init) {
+ int rc = child->init(event);
+ if (rc < 0) {
+ error_setg(errp, "SCLP event initialization failed.");
+ return;
+ }
+ }
+}
+
+static void event_unrealize(DeviceState *qdev, Error **errp)
+{
+ SCLPEvent *event = SCLP_EVENT(qdev);
+ SCLPEventClass *child = SCLP_EVENT_GET_CLASS(event);
+ if (child->exit) {
+ int rc = child->exit(event);
+ if (rc < 0) {
+ error_setg(errp, "SCLP event exit failed.");
+ return;
+ }
+ }
+}
+
+static void event_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->bus_type = TYPE_SCLP_EVENTS_BUS;
+ dc->realize = event_realize;
+ dc->unrealize = event_unrealize;
+}
+
+static const TypeInfo sclp_event_type_info = {
+ .name = TYPE_SCLP_EVENT,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(SCLPEvent),
+ .class_init = event_class_init,
+ .class_size = sizeof(SCLPEventClass),
+ .abstract = true,
+};
+
+static void register_types(void)
+{
+ type_register_static(&sclp_events_bus_info);
+ type_register_static(&sclp_event_facility_info);
+ type_register_static(&sclp_event_type_info);
+}
+
+type_init(register_types)
diff --git a/src/hw/s390x/ipl.c b/src/hw/s390x/ipl.c
new file mode 100644
index 0000000..b91fcc6
--- /dev/null
+++ b/src/hw/s390x/ipl.c
@@ -0,0 +1,296 @@
+/*
+ * bootloader support
+ *
+ * Copyright IBM, Corp. 2012
+ *
+ * Authors:
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "sysemu/sysemu.h"
+#include "cpu.h"
+#include "elf.h"
+#include "hw/loader.h"
+#include "hw/s390x/virtio-ccw.h"
+#include "hw/s390x/css.h"
+#include "ipl.h"
+
+#define KERN_IMAGE_START 0x010000UL
+#define KERN_PARM_AREA 0x010480UL
+#define INITRD_START 0x800000UL
+#define INITRD_PARM_START 0x010408UL
+#define INITRD_PARM_SIZE 0x010410UL
+#define PARMFILE_START 0x001000UL
+#define ZIPL_IMAGE_START 0x009000UL
+#define IPL_PSW_MASK (PSW_MASK_32 | PSW_MASK_64)
+
+static const VMStateDescription vmstate_iplb = {
+ .name = "ipl/iplb",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(reserved1, IplParameterBlock, 110),
+ VMSTATE_UINT16(devno, IplParameterBlock),
+ VMSTATE_UINT8_ARRAY(reserved2, IplParameterBlock, 88),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_ipl = {
+ .name = "ipl",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(start_addr, S390IPLState),
+ VMSTATE_UINT64(bios_start_addr, S390IPLState),
+ VMSTATE_STRUCT(iplb, S390IPLState, 0, vmstate_iplb, IplParameterBlock),
+ VMSTATE_BOOL(iplb_valid, S390IPLState),
+ VMSTATE_UINT8(cssid, S390IPLState),
+ VMSTATE_UINT8(ssid, S390IPLState),
+ VMSTATE_UINT16(devno, S390IPLState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static S390IPLState *get_ipl_device(void)
+{
+ return S390_IPL(object_resolve_path_type("", TYPE_S390_IPL, NULL));
+}
+
+static uint64_t bios_translate_addr(void *opaque, uint64_t srcaddr)
+{
+ uint64_t dstaddr = *(uint64_t *) opaque;
+ /*
+ * Assuming that our s390-ccw.img was linked for starting at address 0,
+ * we can simply add the destination address for the final location
+ */
+ return srcaddr + dstaddr;
+}
+
+static void s390_ipl_realize(DeviceState *dev, Error **errp)
+{
+ S390IPLState *ipl = S390_IPL(dev);
+ uint64_t pentry = KERN_IMAGE_START;
+ int kernel_size;
+ Error *l_err = NULL;
+
+ int bios_size;
+ char *bios_filename;
+
+ /*
+ * Always load the bios if it was enforced,
+ * even if an external kernel has been defined.
+ */
+ if (!ipl->kernel || ipl->enforce_bios) {
+ uint64_t fwbase = (MIN(ram_size, 0x80000000U) - 0x200000) & ~0xffffUL;
+
+ if (bios_name == NULL) {
+ bios_name = ipl->firmware;
+ }
+
+ bios_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ if (bios_filename == NULL) {
+ error_setg(&l_err, "could not find stage1 bootloader\n");
+ goto error;
+ }
+
+ bios_size = load_elf(bios_filename, bios_translate_addr, &fwbase,
+ &ipl->bios_start_addr, NULL, NULL, 1,
+ EM_S390, 0);
+ if (bios_size > 0) {
+ /* Adjust ELF start address to final location */
+ ipl->bios_start_addr += fwbase;
+ } else {
+ /* Try to load non-ELF file (e.g. s390-zipl.rom) */
+ bios_size = load_image_targphys(bios_filename, ZIPL_IMAGE_START,
+ 4096);
+ ipl->bios_start_addr = ZIPL_IMAGE_START;
+ }
+ g_free(bios_filename);
+
+ if (bios_size == -1) {
+ error_setg(&l_err, "could not load bootloader '%s'\n", bios_name);
+ goto error;
+ }
+
+ /* default boot target is the bios */
+ ipl->start_addr = ipl->bios_start_addr;
+ }
+
+ if (ipl->kernel) {
+ kernel_size = load_elf(ipl->kernel, NULL, NULL, &pentry, NULL,
+ NULL, 1, EM_S390, 0);
+ if (kernel_size < 0) {
+ kernel_size = load_image_targphys(ipl->kernel, 0, ram_size);
+ }
+ if (kernel_size < 0) {
+ error_setg(&l_err, "could not load kernel '%s'\n", ipl->kernel);
+ goto error;
+ }
+ /*
+ * Is it a Linux kernel (starting at 0x10000)? If yes, we fill in the
+ * kernel parameters here as well. Note: For old kernels (up to 3.2)
+ * we can not rely on the ELF entry point - it was 0x800 (the SALIPL
+ * loader) and it won't work. For this case we force it to 0x10000, too.
+ */
+ if (pentry == KERN_IMAGE_START || pentry == 0x800) {
+ ipl->start_addr = KERN_IMAGE_START;
+ /* Overwrite parameters in the kernel image, which are "rom" */
+ strcpy(rom_ptr(KERN_PARM_AREA), ipl->cmdline);
+ } else {
+ ipl->start_addr = pentry;
+ }
+
+ if (ipl->initrd) {
+ ram_addr_t initrd_offset;
+ int initrd_size;
+
+ initrd_offset = INITRD_START;
+ while (kernel_size + 0x100000 > initrd_offset) {
+ initrd_offset += 0x100000;
+ }
+ initrd_size = load_image_targphys(ipl->initrd, initrd_offset,
+ ram_size - initrd_offset);
+ if (initrd_size == -1) {
+ error_setg(&l_err, "could not load initrd '%s'\n", ipl->initrd);
+ goto error;
+ }
+
+ /*
+ * we have to overwrite values in the kernel image,
+ * which are "rom"
+ */
+ stq_p(rom_ptr(INITRD_PARM_START), initrd_offset);
+ stq_p(rom_ptr(INITRD_PARM_SIZE), initrd_size);
+ }
+ }
+ qemu_register_reset(qdev_reset_all_fn, dev);
+error:
+ error_propagate(errp, l_err);
+}
+
+static Property s390_ipl_properties[] = {
+ DEFINE_PROP_STRING("kernel", S390IPLState, kernel),
+ DEFINE_PROP_STRING("initrd", S390IPLState, initrd),
+ DEFINE_PROP_STRING("cmdline", S390IPLState, cmdline),
+ DEFINE_PROP_STRING("firmware", S390IPLState, firmware),
+ DEFINE_PROP_BOOL("enforce_bios", S390IPLState, enforce_bios, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+/*
+ * In addition to updating the iplstate, this function returns:
+ * - 0 if system was ipled with external kernel
+ * - -1 if no valid boot device was found
+ * - ccw id of the boot device otherwise
+ */
+static uint64_t s390_update_iplstate(S390IPLState *ipl)
+{
+ DeviceState *dev_st;
+
+ if (ipl->iplb_valid) {
+ ipl->cssid = 0;
+ ipl->ssid = 0;
+ ipl->devno = ipl->iplb.devno;
+ goto out;
+ }
+
+ if (ipl->kernel) {
+ return 0;
+ }
+
+ dev_st = get_boot_device(0);
+ if (dev_st) {
+ VirtioCcwDevice *ccw_dev = (VirtioCcwDevice *) object_dynamic_cast(
+ OBJECT(qdev_get_parent_bus(dev_st)->parent),
+ TYPE_VIRTIO_CCW_DEVICE);
+ if (ccw_dev) {
+ ipl->cssid = ccw_dev->sch->cssid;
+ ipl->ssid = ccw_dev->sch->ssid;
+ ipl->devno = ccw_dev->sch->devno;
+ goto out;
+ }
+ }
+
+ return -1;
+out:
+ return (uint32_t) (ipl->cssid << 24 | ipl->ssid << 16 | ipl->devno);
+}
+
+void s390_ipl_update_diag308(IplParameterBlock *iplb)
+{
+ S390IPLState *ipl = get_ipl_device();
+
+ ipl->iplb = *iplb;
+ ipl->iplb_valid = true;
+}
+
+IplParameterBlock *s390_ipl_get_iplb(void)
+{
+ S390IPLState *ipl = get_ipl_device();
+
+ if (!ipl->iplb_valid) {
+ return NULL;
+ }
+ return &ipl->iplb;
+}
+
+void s390_reipl_request(void)
+{
+ S390IPLState *ipl = get_ipl_device();
+
+ ipl->reipl_requested = true;
+ qemu_system_reset_request();
+}
+
+void s390_ipl_prepare_cpu(S390CPU *cpu)
+{
+ S390IPLState *ipl = get_ipl_device();
+
+ cpu->env.psw.addr = ipl->start_addr;
+ cpu->env.psw.mask = IPL_PSW_MASK;
+
+ if (!ipl->kernel || ipl->iplb_valid) {
+ cpu->env.psw.addr = ipl->bios_start_addr;
+ cpu->env.regs[7] = s390_update_iplstate(ipl);
+ }
+}
+
+static void s390_ipl_reset(DeviceState *dev)
+{
+ S390IPLState *ipl = S390_IPL(dev);
+
+ if (!ipl->reipl_requested) {
+ ipl->iplb_valid = false;
+ }
+ ipl->reipl_requested = false;
+}
+
+static void s390_ipl_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = s390_ipl_realize;
+ dc->props = s390_ipl_properties;
+ dc->reset = s390_ipl_reset;
+ dc->vmsd = &vmstate_ipl;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo s390_ipl_info = {
+ .class_init = s390_ipl_class_init,
+ .parent = TYPE_DEVICE,
+ .name = TYPE_S390_IPL,
+ .instance_size = sizeof(S390IPLState),
+};
+
+static void s390_ipl_register_types(void)
+{
+ type_register_static(&s390_ipl_info);
+}
+
+type_init(s390_ipl_register_types)
diff --git a/src/hw/s390x/ipl.h b/src/hw/s390x/ipl.h
new file mode 100644
index 0000000..6b48ed7
--- /dev/null
+++ b/src/hw/s390x/ipl.h
@@ -0,0 +1,53 @@
+/*
+ * s390 IPL device
+ *
+ * Copyright 2015 IBM Corp.
+ * Author(s): Zhang Fan <bjfanzh@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_IPL_H
+#define HW_S390_IPL_H
+
+#include "hw/qdev.h"
+#include "cpu.h"
+
+typedef struct IplParameterBlock {
+ uint8_t reserved1[110];
+ uint16_t devno;
+ uint8_t reserved2[88];
+} IplParameterBlock;
+
+void s390_ipl_update_diag308(IplParameterBlock *iplb);
+void s390_ipl_prepare_cpu(S390CPU *cpu);
+IplParameterBlock *s390_ipl_get_iplb(void);
+void s390_reipl_request(void);
+
+#define TYPE_S390_IPL "s390-ipl"
+#define S390_IPL(obj) OBJECT_CHECK(S390IPLState, (obj), TYPE_S390_IPL)
+
+struct S390IPLState {
+ /*< private >*/
+ DeviceState parent_obj;
+ uint64_t start_addr;
+ uint64_t bios_start_addr;
+ bool enforce_bios;
+ IplParameterBlock iplb;
+ bool iplb_valid;
+ bool reipl_requested;
+
+ /*< public >*/
+ char *kernel;
+ char *initrd;
+ char *cmdline;
+ char *firmware;
+ uint8_t cssid;
+ uint8_t ssid;
+ uint16_t devno;
+};
+typedef struct S390IPLState S390IPLState;
+
+#endif
diff --git a/src/hw/s390x/s390-pci-bus.c b/src/hw/s390x/s390-pci-bus.c
new file mode 100644
index 0000000..98c726c
--- /dev/null
+++ b/src/hw/s390x/s390-pci-bus.c
@@ -0,0 +1,618 @@
+/*
+ * s390 PCI BUS
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
+ * Hong Bo Li <lihbbj@cn.ibm.com>
+ * Yi Min Zhao <zyimin@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "s390-pci-bus.h"
+#include <hw/pci/pci_bus.h>
+#include <hw/pci/msi.h>
+#include <qemu/error-report.h>
+
+/* #define DEBUG_S390PCI_BUS */
+#ifdef DEBUG_S390PCI_BUS
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, "S390pci-bus: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+int chsc_sei_nt2_get_event(void *res)
+{
+ ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res;
+ PciCcdfAvail *accdf;
+ PciCcdfErr *eccdf;
+ int rc = 1;
+ SeiContainer *sei_cont;
+ S390pciState *s = S390_PCI_HOST_BRIDGE(
+ object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
+
+ if (!s) {
+ return rc;
+ }
+
+ sei_cont = QTAILQ_FIRST(&s->pending_sei);
+ if (sei_cont) {
+ QTAILQ_REMOVE(&s->pending_sei, sei_cont, link);
+ nt2_res->nt = 2;
+ nt2_res->cc = sei_cont->cc;
+ nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res));
+ switch (sei_cont->cc) {
+ case 1: /* error event */
+ eccdf = (PciCcdfErr *)nt2_res->ccdf;
+ eccdf->fid = cpu_to_be32(sei_cont->fid);
+ eccdf->fh = cpu_to_be32(sei_cont->fh);
+ eccdf->e = cpu_to_be32(sei_cont->e);
+ eccdf->faddr = cpu_to_be64(sei_cont->faddr);
+ eccdf->pec = cpu_to_be16(sei_cont->pec);
+ break;
+ case 2: /* availability event */
+ accdf = (PciCcdfAvail *)nt2_res->ccdf;
+ accdf->fid = cpu_to_be32(sei_cont->fid);
+ accdf->fh = cpu_to_be32(sei_cont->fh);
+ accdf->pec = cpu_to_be16(sei_cont->pec);
+ break;
+ default:
+ abort();
+ }
+ g_free(sei_cont);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+int chsc_sei_nt2_have_event(void)
+{
+ S390pciState *s = S390_PCI_HOST_BRIDGE(
+ object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
+
+ if (!s) {
+ return 0;
+ }
+
+ return !QTAILQ_EMPTY(&s->pending_sei);
+}
+
+S390PCIBusDevice *s390_pci_find_dev_by_fid(uint32_t fid)
+{
+ S390PCIBusDevice *pbdev;
+ int i;
+ S390pciState *s = S390_PCI_HOST_BRIDGE(
+ object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
+
+ if (!s) {
+ return NULL;
+ }
+
+ for (i = 0; i < PCI_SLOT_MAX; i++) {
+ pbdev = &s->pbdev[i];
+ if ((pbdev->fh != 0) && (pbdev->fid == fid)) {
+ return pbdev;
+ }
+ }
+
+ return NULL;
+}
+
+void s390_pci_sclp_configure(int configure, SCCB *sccb)
+{
+ PciCfgSccb *psccb = (PciCfgSccb *)sccb;
+ S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(be32_to_cpu(psccb->aid));
+ uint16_t rc;
+
+ if (pbdev) {
+ if ((configure == 1 && pbdev->configured == true) ||
+ (configure == 0 && pbdev->configured == false)) {
+ rc = SCLP_RC_NO_ACTION_REQUIRED;
+ } else {
+ pbdev->configured = !pbdev->configured;
+ rc = SCLP_RC_NORMAL_COMPLETION;
+ }
+ } else {
+ DPRINTF("sclp config %d no dev found\n", configure);
+ rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED;
+ }
+
+ psccb->header.response_code = cpu_to_be16(rc);
+ return;
+}
+
+static uint32_t s390_pci_get_pfid(PCIDevice *pdev)
+{
+ return PCI_SLOT(pdev->devfn);
+}
+
+static uint32_t s390_pci_get_pfh(PCIDevice *pdev)
+{
+ return PCI_SLOT(pdev->devfn) | FH_VIRT;
+}
+
+S390PCIBusDevice *s390_pci_find_dev_by_idx(uint32_t idx)
+{
+ S390PCIBusDevice *pbdev;
+ int i;
+ int j = 0;
+ S390pciState *s = S390_PCI_HOST_BRIDGE(
+ object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
+
+ if (!s) {
+ return NULL;
+ }
+
+ for (i = 0; i < PCI_SLOT_MAX; i++) {
+ pbdev = &s->pbdev[i];
+
+ if (pbdev->fh == 0) {
+ continue;
+ }
+
+ if (j == idx) {
+ return pbdev;
+ }
+ j++;
+ }
+
+ return NULL;
+}
+
+S390PCIBusDevice *s390_pci_find_dev_by_fh(uint32_t fh)
+{
+ S390PCIBusDevice *pbdev;
+ int i;
+ S390pciState *s = S390_PCI_HOST_BRIDGE(
+ object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
+
+ if (!s || !fh) {
+ return NULL;
+ }
+
+ for (i = 0; i < PCI_SLOT_MAX; i++) {
+ pbdev = &s->pbdev[i];
+ if (pbdev->fh == fh) {
+ return pbdev;
+ }
+ }
+
+ return NULL;
+}
+
+static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh,
+ uint32_t fid, uint64_t faddr, uint32_t e)
+{
+ SeiContainer *sei_cont;
+ S390pciState *s = S390_PCI_HOST_BRIDGE(
+ object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
+
+ if (!s) {
+ return;
+ }
+
+ sei_cont = g_malloc0(sizeof(SeiContainer));
+ sei_cont->fh = fh;
+ sei_cont->fid = fid;
+ sei_cont->cc = cc;
+ sei_cont->pec = pec;
+ sei_cont->faddr = faddr;
+ sei_cont->e = e;
+
+ QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link);
+ css_generate_css_crws(0);
+}
+
+static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh,
+ uint32_t fid)
+{
+ s390_pci_generate_event(2, pec, fh, fid, 0, 0);
+}
+
+static void s390_pci_generate_error_event(uint16_t pec, uint32_t fh,
+ uint32_t fid, uint64_t faddr,
+ uint32_t e)
+{
+ s390_pci_generate_event(1, pec, fh, fid, faddr, e);
+}
+
+static void s390_pci_set_irq(void *opaque, int irq, int level)
+{
+ /* nothing to do */
+}
+
+static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static uint64_t s390_pci_get_table_origin(uint64_t iota)
+{
+ return iota & ~ZPCI_IOTA_RTTO_FLAG;
+}
+
+static unsigned int calc_rtx(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static unsigned int calc_sx(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static unsigned int calc_px(dma_addr_t ptr)
+{
+ return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
+}
+
+static uint64_t get_rt_sto(uint64_t entry)
+{
+ return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
+ ? (entry & ZPCI_RTE_ADDR_MASK)
+ : 0;
+}
+
+static uint64_t get_st_pto(uint64_t entry)
+{
+ return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
+ ? (entry & ZPCI_STE_ADDR_MASK)
+ : 0;
+}
+
+static uint64_t s390_guest_io_table_walk(uint64_t guest_iota,
+ uint64_t guest_dma_address)
+{
+ uint64_t sto_a, pto_a, px_a;
+ uint64_t sto, pto, pte;
+ uint32_t rtx, sx, px;
+
+ rtx = calc_rtx(guest_dma_address);
+ sx = calc_sx(guest_dma_address);
+ px = calc_px(guest_dma_address);
+
+ sto_a = guest_iota + rtx * sizeof(uint64_t);
+ sto = address_space_ldq(&address_space_memory, sto_a,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ sto = get_rt_sto(sto);
+ if (!sto) {
+ pte = 0;
+ goto out;
+ }
+
+ pto_a = sto + sx * sizeof(uint64_t);
+ pto = address_space_ldq(&address_space_memory, pto_a,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ pto = get_st_pto(pto);
+ if (!pto) {
+ pte = 0;
+ goto out;
+ }
+
+ px_a = pto + px * sizeof(uint64_t);
+ pte = address_space_ldq(&address_space_memory, px_a,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+
+out:
+ return pte;
+}
+
+static IOMMUTLBEntry s390_translate_iommu(MemoryRegion *iommu, hwaddr addr,
+ bool is_write)
+{
+ uint64_t pte;
+ uint32_t flags;
+ S390PCIBusDevice *pbdev = container_of(iommu, S390PCIBusDevice, iommu_mr);
+ S390pciState *s;
+ IOMMUTLBEntry ret = {
+ .target_as = &address_space_memory,
+ .iova = 0,
+ .translated_addr = 0,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_NONE,
+ };
+
+ if (!pbdev->configured || !pbdev->pdev) {
+ return ret;
+ }
+
+ DPRINTF("iommu trans addr 0x%" PRIx64 "\n", addr);
+
+ s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pbdev->pdev)->qbus.parent);
+ /* s390 does not have an APIC mapped to main storage so we use
+ * a separate AddressSpace only for msix notifications
+ */
+ if (addr == ZPCI_MSI_ADDR) {
+ ret.target_as = &s->msix_notify_as;
+ ret.iova = addr;
+ ret.translated_addr = addr;
+ ret.addr_mask = 0xfff;
+ ret.perm = IOMMU_RW;
+ return ret;
+ }
+
+ if (!pbdev->g_iota) {
+ pbdev->error_state = true;
+ pbdev->lgstg_blocked = true;
+ s390_pci_generate_error_event(ERR_EVENT_INVALAS, pbdev->fh, pbdev->fid,
+ addr, 0);
+ return ret;
+ }
+
+ if (addr < pbdev->pba || addr > pbdev->pal) {
+ pbdev->error_state = true;
+ pbdev->lgstg_blocked = true;
+ s390_pci_generate_error_event(ERR_EVENT_OORANGE, pbdev->fh, pbdev->fid,
+ addr, 0);
+ return ret;
+ }
+
+ pte = s390_guest_io_table_walk(s390_pci_get_table_origin(pbdev->g_iota),
+ addr);
+
+ if (!pte) {
+ pbdev->error_state = true;
+ pbdev->lgstg_blocked = true;
+ s390_pci_generate_error_event(ERR_EVENT_SERR, pbdev->fh, pbdev->fid,
+ addr, ERR_EVENT_Q_BIT);
+ return ret;
+ }
+
+ flags = pte & ZPCI_PTE_FLAG_MASK;
+ ret.iova = addr;
+ ret.translated_addr = pte & ZPCI_PTE_ADDR_MASK;
+ ret.addr_mask = 0xfff;
+
+ if (flags & ZPCI_PTE_INVALID) {
+ ret.perm = IOMMU_NONE;
+ } else {
+ ret.perm = IOMMU_RW;
+ }
+
+ return ret;
+}
+
+static const MemoryRegionIOMMUOps s390_iommu_ops = {
+ .translate = s390_translate_iommu,
+};
+
+static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
+{
+ S390pciState *s = opaque;
+
+ return &s->pbdev[PCI_SLOT(devfn)].as;
+}
+
+static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set)
+{
+ uint8_t ind_old, ind_new;
+ hwaddr len = 1;
+ uint8_t *ind_addr;
+
+ ind_addr = cpu_physical_memory_map(ind_loc, &len, 1);
+ if (!ind_addr) {
+ s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0);
+ return -1;
+ }
+ do {
+ ind_old = *ind_addr;
+ ind_new = ind_old | to_be_set;
+ } while (atomic_cmpxchg(ind_addr, ind_old, ind_new) != ind_old);
+ cpu_physical_memory_unmap(ind_addr, len, 1, len);
+
+ return ind_old;
+}
+
+static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data,
+ unsigned int size)
+{
+ S390PCIBusDevice *pbdev;
+ uint32_t io_int_word;
+ uint32_t fid = data >> ZPCI_MSI_VEC_BITS;
+ uint32_t vec = data & ZPCI_MSI_VEC_MASK;
+ uint64_t ind_bit;
+ uint32_t sum_bit;
+ uint32_t e = 0;
+
+ DPRINTF("write_msix data 0x%" PRIx64 " fid %d vec 0x%x\n", data, fid, vec);
+
+ pbdev = s390_pci_find_dev_by_fid(fid);
+ if (!pbdev) {
+ e |= (vec << ERR_EVENT_MVN_OFFSET);
+ s390_pci_generate_error_event(ERR_EVENT_NOMSI, 0, fid, addr, e);
+ return;
+ }
+
+ ind_bit = pbdev->routes.adapter.ind_offset;
+ sum_bit = pbdev->routes.adapter.summary_offset;
+
+ set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8,
+ 0x80 >> ((ind_bit + vec) % 8));
+ if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8,
+ 0x80 >> (sum_bit % 8))) {
+ io_int_word = (pbdev->isc << 27) | IO_INT_WORD_AI;
+ s390_io_interrupt(0, 0, 0, io_int_word);
+ }
+
+ return;
+}
+
+static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size)
+{
+ return 0xffffffff;
+}
+
+static const MemoryRegionOps s390_msi_ctrl_ops = {
+ .write = s390_msi_ctrl_write,
+ .read = s390_msi_ctrl_read,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+void s390_pcihost_iommu_configure(S390PCIBusDevice *pbdev, bool enable)
+{
+ pbdev->configured = false;
+
+ if (enable) {
+ uint64_t size = pbdev->pal - pbdev->pba + 1;
+ memory_region_init_iommu(&pbdev->iommu_mr, OBJECT(&pbdev->mr),
+ &s390_iommu_ops, "iommu-s390", size);
+ memory_region_add_subregion(&pbdev->mr, pbdev->pba, &pbdev->iommu_mr);
+ } else {
+ memory_region_del_subregion(&pbdev->mr, &pbdev->iommu_mr);
+ }
+
+ pbdev->configured = true;
+}
+
+static void s390_pcihost_init_as(S390pciState *s)
+{
+ int i;
+ S390PCIBusDevice *pbdev;
+
+ for (i = 0; i < PCI_SLOT_MAX; i++) {
+ pbdev = &s->pbdev[i];
+ memory_region_init(&pbdev->mr, OBJECT(s),
+ "iommu-root-s390", UINT64_MAX);
+ address_space_init(&pbdev->as, &pbdev->mr, "iommu-pci");
+ }
+
+ memory_region_init_io(&s->msix_notify_mr, OBJECT(s),
+ &s390_msi_ctrl_ops, s, "msix-s390", UINT64_MAX);
+ address_space_init(&s->msix_notify_as, &s->msix_notify_mr, "msix-pci");
+}
+
+static int s390_pcihost_init(SysBusDevice *dev)
+{
+ PCIBus *b;
+ BusState *bus;
+ PCIHostState *phb = PCI_HOST_BRIDGE(dev);
+ S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
+
+ DPRINTF("host_init\n");
+
+ b = pci_register_bus(DEVICE(dev), NULL,
+ s390_pci_set_irq, s390_pci_map_irq, NULL,
+ get_system_memory(), get_system_io(), 0, 64,
+ TYPE_PCI_BUS);
+ s390_pcihost_init_as(s);
+ pci_setup_iommu(b, s390_pci_dma_iommu, s);
+
+ bus = BUS(b);
+ qbus_set_hotplug_handler(bus, DEVICE(dev), NULL);
+ phb->bus = b;
+ QTAILQ_INIT(&s->pending_sei);
+ return 0;
+}
+
+static int s390_pcihost_setup_msix(S390PCIBusDevice *pbdev)
+{
+ uint8_t pos;
+ uint16_t ctrl;
+ uint32_t table, pba;
+
+ pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX);
+ if (!pos) {
+ pbdev->msix.available = false;
+ return 0;
+ }
+
+ ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_CAP_FLAGS,
+ pci_config_size(pbdev->pdev), sizeof(ctrl));
+ table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE,
+ pci_config_size(pbdev->pdev), sizeof(table));
+ pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA,
+ pci_config_size(pbdev->pdev), sizeof(pba));
+
+ pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
+ pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
+ pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
+ pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
+ pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
+ pbdev->msix.available = true;
+ return 0;
+}
+
+static void s390_pcihost_hot_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ PCIDevice *pci_dev = PCI_DEVICE(dev);
+ S390PCIBusDevice *pbdev;
+ S390pciState *s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pci_dev)
+ ->qbus.parent);
+
+ pbdev = &s->pbdev[PCI_SLOT(pci_dev->devfn)];
+
+ pbdev->fid = s390_pci_get_pfid(pci_dev);
+ pbdev->pdev = pci_dev;
+ pbdev->configured = true;
+ pbdev->fh = s390_pci_get_pfh(pci_dev);
+
+ s390_pcihost_setup_msix(pbdev);
+
+ if (dev->hotplugged) {
+ s390_pci_generate_plug_event(HP_EVENT_RESERVED_TO_STANDBY,
+ pbdev->fh, pbdev->fid);
+ s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED,
+ pbdev->fh, pbdev->fid);
+ }
+ return;
+}
+
+static void s390_pcihost_hot_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ PCIDevice *pci_dev = PCI_DEVICE(dev);
+ S390pciState *s = S390_PCI_HOST_BRIDGE(pci_device_root_bus(pci_dev)
+ ->qbus.parent);
+ S390PCIBusDevice *pbdev = &s->pbdev[PCI_SLOT(pci_dev->devfn)];
+
+ if (pbdev->configured) {
+ pbdev->configured = false;
+ s390_pci_generate_plug_event(HP_EVENT_CONFIGURED_TO_STBRES,
+ pbdev->fh, pbdev->fid);
+ }
+
+ s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED,
+ pbdev->fh, pbdev->fid);
+ pbdev->fh = 0;
+ pbdev->fid = 0;
+ pbdev->pdev = NULL;
+ object_unparent(OBJECT(pci_dev));
+}
+
+static void s390_pcihost_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
+
+ dc->cannot_instantiate_with_device_add_yet = true;
+ k->init = s390_pcihost_init;
+ hc->plug = s390_pcihost_hot_plug;
+ hc->unplug = s390_pcihost_hot_unplug;
+ msi_supported = true;
+}
+
+static const TypeInfo s390_pcihost_info = {
+ .name = TYPE_S390_PCI_HOST_BRIDGE,
+ .parent = TYPE_PCI_HOST_BRIDGE,
+ .instance_size = sizeof(S390pciState),
+ .class_init = s390_pcihost_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ }
+};
+
+static void s390_pci_register_types(void)
+{
+ type_register_static(&s390_pcihost_info);
+}
+
+type_init(s390_pci_register_types)
diff --git a/src/hw/s390x/s390-pci-bus.h b/src/hw/s390x/s390-pci-bus.h
new file mode 100644
index 0000000..80345da
--- /dev/null
+++ b/src/hw/s390x/s390-pci-bus.h
@@ -0,0 +1,253 @@
+/*
+ * s390 PCI BUS definitions
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
+ * Hong Bo Li <lihbbj@cn.ibm.com>
+ * Yi Min Zhao <zyimin@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_BUS_H
+#define HW_S390_PCI_BUS_H
+
+#include <hw/pci/pci.h>
+#include <hw/pci/pci_host.h>
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/s390_flic.h"
+#include "hw/s390x/css.h"
+
+#define TYPE_S390_PCI_HOST_BRIDGE "s390-pcihost"
+#define FH_VIRT 0x00ff0000
+#define ENABLE_BIT_OFFSET 31
+#define S390_PCIPT_ADAPTER 2
+
+#define S390_PCI_HOST_BRIDGE(obj) \
+ OBJECT_CHECK(S390pciState, (obj), TYPE_S390_PCI_HOST_BRIDGE)
+
+#define HP_EVENT_TO_CONFIGURED 0x0301
+#define HP_EVENT_RESERVED_TO_STANDBY 0x0302
+#define HP_EVENT_CONFIGURED_TO_STBRES 0x0304
+#define HP_EVENT_STANDBY_TO_RESERVED 0x0308
+
+#define ERR_EVENT_INVALAS 0x1
+#define ERR_EVENT_OORANGE 0x2
+#define ERR_EVENT_INVALTF 0x3
+#define ERR_EVENT_TPROTE 0x4
+#define ERR_EVENT_APROTE 0x5
+#define ERR_EVENT_KEYE 0x6
+#define ERR_EVENT_INVALTE 0x7
+#define ERR_EVENT_INVALTL 0x8
+#define ERR_EVENT_TT 0x9
+#define ERR_EVENT_INVALMS 0xa
+#define ERR_EVENT_SERR 0xb
+#define ERR_EVENT_NOMSI 0x10
+#define ERR_EVENT_INVALBV 0x11
+#define ERR_EVENT_AIBV 0x12
+#define ERR_EVENT_AIRERR 0x13
+#define ERR_EVENT_FMBA 0x2a
+#define ERR_EVENT_FMBUP 0x2b
+#define ERR_EVENT_FMBPRO 0x2c
+#define ERR_EVENT_CCONF 0x30
+#define ERR_EVENT_SERVAC 0x3a
+#define ERR_EVENT_PERMERR 0x3b
+
+#define ERR_EVENT_Q_BIT 0x2
+#define ERR_EVENT_MVN_OFFSET 16
+
+#define ZPCI_MSI_VEC_BITS 11
+#define ZPCI_MSI_VEC_MASK 0x7ff
+
+#define ZPCI_MSI_ADDR 0xfe00000000000000ULL
+#define ZPCI_SDMA_ADDR 0x100000000ULL
+#define ZPCI_EDMA_ADDR 0x1ffffffffffffffULL
+
+#define PAGE_SHIFT 12
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_DEFAULT_ACC 0
+#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
+
+/* I/O Translation Anchor (IOTA) */
+enum ZpciIoatDtype {
+ ZPCI_IOTA_STO = 0,
+ ZPCI_IOTA_RTTO = 1,
+ ZPCI_IOTA_RSTO = 2,
+ ZPCI_IOTA_RFTO = 3,
+ ZPCI_IOTA_PFAA = 4,
+ ZPCI_IOTA_IOPFAA = 5,
+ ZPCI_IOTA_IOPTO = 7
+};
+
+#define ZPCI_IOTA_IOT_ENABLED 0x800ULL
+#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2)
+#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2)
+#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2)
+#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2)
+#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2)
+#define ZPCI_IOTA_FS_4K 0
+#define ZPCI_IOTA_FS_1M 1
+#define ZPCI_IOTA_FS_2G 2
+#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
+
+#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
+#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
+#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
+#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF)
+#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY |\
+ ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G)
+
+/* I/O Region and segment tables */
+#define ZPCI_INDEX_MASK 0x7ffULL
+
+#define ZPCI_TABLE_TYPE_MASK 0xc
+#define ZPCI_TABLE_TYPE_RFX 0xc
+#define ZPCI_TABLE_TYPE_RSX 0x8
+#define ZPCI_TABLE_TYPE_RTX 0x4
+#define ZPCI_TABLE_TYPE_SX 0x0
+
+#define ZPCI_TABLE_LEN_RFX 0x3
+#define ZPCI_TABLE_LEN_RSX 0x3
+#define ZPCI_TABLE_LEN_RTX 0x3
+
+#define ZPCI_TABLE_OFFSET_MASK 0xc0
+#define ZPCI_TABLE_SIZE 0x4000
+#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE
+#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long))
+#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+
+#define ZPCI_TABLE_BITS 11
+#define ZPCI_PT_BITS 8
+#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT)
+#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
+
+#define ZPCI_RTE_FLAG_MASK 0x3fffULL
+#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
+#define ZPCI_STE_FLAG_MASK 0x7ffULL
+#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
+
+/* I/O Page tables */
+#define ZPCI_PTE_VALID_MASK 0x400
+#define ZPCI_PTE_INVALID 0x400
+#define ZPCI_PTE_VALID 0x000
+#define ZPCI_PT_SIZE 0x800
+#define ZPCI_PT_ALIGN ZPCI_PT_SIZE
+#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE)
+#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1)
+
+#define ZPCI_PTE_FLAG_MASK 0xfffULL
+#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK)
+
+/* Shared bits */
+#define ZPCI_TABLE_VALID 0x00
+#define ZPCI_TABLE_INVALID 0x20
+#define ZPCI_TABLE_PROTECTED 0x200
+#define ZPCI_TABLE_UNPROTECTED 0x000
+
+#define ZPCI_TABLE_VALID_MASK 0x20
+#define ZPCI_TABLE_PROT_MASK 0x200
+
+typedef struct SeiContainer {
+ QTAILQ_ENTRY(SeiContainer) link;
+ uint32_t fid;
+ uint32_t fh;
+ uint8_t cc;
+ uint16_t pec;
+ uint64_t faddr;
+ uint32_t e;
+} SeiContainer;
+
+typedef struct PciCcdfErr {
+ uint32_t reserved1;
+ uint32_t fh;
+ uint32_t fid;
+ uint32_t e;
+ uint64_t faddr;
+ uint32_t reserved3;
+ uint16_t reserved4;
+ uint16_t pec;
+} QEMU_PACKED PciCcdfErr;
+
+typedef struct PciCcdfAvail {
+ uint32_t reserved1;
+ uint32_t fh;
+ uint32_t fid;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ uint32_t reserved4;
+ uint32_t reserved5;
+ uint16_t reserved6;
+ uint16_t pec;
+} QEMU_PACKED PciCcdfAvail;
+
+typedef struct ChscSeiNt2Res {
+ uint16_t length;
+ uint16_t code;
+ uint16_t reserved1;
+ uint8_t reserved2;
+ uint8_t nt;
+ uint8_t flags;
+ uint8_t reserved3;
+ uint8_t reserved4;
+ uint8_t cc;
+ uint32_t reserved5[13];
+ uint8_t ccdf[4016];
+} QEMU_PACKED ChscSeiNt2Res;
+
+typedef struct PciCfgSccb {
+ SCCBHeader header;
+ uint8_t atype;
+ uint8_t reserved1;
+ uint16_t reserved2;
+ uint32_t aid;
+} QEMU_PACKED PciCfgSccb;
+
+typedef struct S390MsixInfo {
+ bool available;
+ uint8_t table_bar;
+ uint8_t pba_bar;
+ uint16_t entries;
+ uint32_t table_offset;
+ uint32_t pba_offset;
+} S390MsixInfo;
+
+typedef struct S390PCIBusDevice {
+ PCIDevice *pdev;
+ bool configured;
+ bool error_state;
+ bool lgstg_blocked;
+ uint32_t fh;
+ uint32_t fid;
+ uint64_t g_iota;
+ uint64_t pba;
+ uint64_t pal;
+ uint64_t fmb_addr;
+ uint8_t isc;
+ uint16_t noi;
+ uint8_t sum;
+ S390MsixInfo msix;
+ AdapterRoutes routes;
+ AddressSpace as;
+ MemoryRegion mr;
+ MemoryRegion iommu_mr;
+} S390PCIBusDevice;
+
+typedef struct S390pciState {
+ PCIHostState parent_obj;
+ S390PCIBusDevice pbdev[PCI_SLOT_MAX];
+ AddressSpace msix_notify_as;
+ MemoryRegion msix_notify_mr;
+ QTAILQ_HEAD(, SeiContainer) pending_sei;
+} S390pciState;
+
+int chsc_sei_nt2_get_event(void *res);
+int chsc_sei_nt2_have_event(void);
+void s390_pci_sclp_configure(int configure, SCCB *sccb);
+void s390_pcihost_iommu_configure(S390PCIBusDevice *pbdev, bool enable);
+S390PCIBusDevice *s390_pci_find_dev_by_idx(uint32_t idx);
+S390PCIBusDevice *s390_pci_find_dev_by_fh(uint32_t fh);
+S390PCIBusDevice *s390_pci_find_dev_by_fid(uint32_t fid);
+
+#endif
diff --git a/src/hw/s390x/s390-pci-inst.c b/src/hw/s390x/s390-pci-inst.c
new file mode 100644
index 0000000..8c1dc82
--- /dev/null
+++ b/src/hw/s390x/s390-pci-inst.c
@@ -0,0 +1,844 @@
+/*
+ * s390 PCI instructions
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
+ * Hong Bo Li <lihbbj@cn.ibm.com>
+ * Yi Min Zhao <zyimin@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "s390-pci-inst.h"
+#include "s390-pci-bus.h"
+#include <exec/memory-internal.h>
+#include <qemu/error-report.h>
+
+/* #define DEBUG_S390PCI_INST */
+#ifdef DEBUG_S390PCI_INST
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+static void s390_set_status_code(CPUS390XState *env,
+ uint8_t r, uint64_t status_code)
+{
+ env->regs[r] &= ~0xff000000ULL;
+ env->regs[r] |= (status_code & 0xff) << 24;
+}
+
+static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
+{
+ S390PCIBusDevice *pbdev;
+ uint32_t res_code, initial_l2, g_l2, finish;
+ int rc, idx;
+ uint64_t resume_token;
+
+ rc = 0;
+ if (lduw_p(&rrb->request.hdr.len) != 32) {
+ res_code = CLP_RC_LEN;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
+ res_code = CLP_RC_FMT;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
+ ldq_p(&rrb->request.reserved1) != 0 ||
+ ldq_p(&rrb->request.reserved2) != 0) {
+ res_code = CLP_RC_RESNOT0;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ resume_token = ldq_p(&rrb->request.resume_token);
+
+ if (resume_token) {
+ pbdev = s390_pci_find_dev_by_idx(resume_token);
+ if (!pbdev) {
+ res_code = CLP_RC_LISTPCI_BADRT;
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (lduw_p(&rrb->response.hdr.len) < 48) {
+ res_code = CLP_RC_8K;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ initial_l2 = lduw_p(&rrb->response.hdr.len);
+ if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
+ != 0) {
+ res_code = CLP_RC_LEN;
+ rc = -EINVAL;
+ *cc = 3;
+ goto out;
+ }
+
+ stl_p(&rrb->response.fmt, 0);
+ stq_p(&rrb->response.reserved1, 0);
+ stq_p(&rrb->response.reserved2, 0);
+ stl_p(&rrb->response.mdd, FH_VIRT);
+ stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
+ rrb->response.entry_size = sizeof(ClpFhListEntry);
+ finish = 0;
+ idx = resume_token;
+ g_l2 = LIST_PCI_HDR_LEN;
+ do {
+ pbdev = s390_pci_find_dev_by_idx(idx);
+ if (!pbdev) {
+ finish = 1;
+ break;
+ }
+ stw_p(&rrb->response.fh_list[idx - resume_token].device_id,
+ pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
+ stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id,
+ pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
+ stl_p(&rrb->response.fh_list[idx - resume_token].config, 0x80000000);
+ stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid);
+ stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh);
+
+ g_l2 += sizeof(ClpFhListEntry);
+ /* Add endian check for DPRINTF? */
+ DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
+ g_l2,
+ lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id),
+ lduw_p(&rrb->response.fh_list[idx - resume_token].device_id),
+ ldl_p(&rrb->response.fh_list[idx - resume_token].fid),
+ ldl_p(&rrb->response.fh_list[idx - resume_token].fh));
+ idx++;
+ } while (g_l2 < initial_l2);
+
+ if (finish == 1) {
+ resume_token = 0;
+ } else {
+ resume_token = idx;
+ }
+ stq_p(&rrb->response.resume_token, resume_token);
+ stw_p(&rrb->response.hdr.len, g_l2);
+ stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
+out:
+ if (rc) {
+ DPRINTF("list pci failed rc 0x%x\n", rc);
+ stw_p(&rrb->response.hdr.rsp, res_code);
+ }
+ return rc;
+}
+
+int clp_service_call(S390CPU *cpu, uint8_t r2)
+{
+ ClpReqHdr *reqh;
+ ClpRspHdr *resh;
+ S390PCIBusDevice *pbdev;
+ uint32_t req_len;
+ uint32_t res_len;
+ uint8_t buffer[4096 * 2];
+ uint8_t cc = 0;
+ CPUS390XState *env = &cpu->env;
+ int i;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 4);
+ return 0;
+ }
+
+ if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
+ return 0;
+ }
+ reqh = (ClpReqHdr *)buffer;
+ req_len = lduw_p(&reqh->len);
+ if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+
+ if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
+ req_len + sizeof(*resh))) {
+ return 0;
+ }
+ resh = (ClpRspHdr *)(buffer + req_len);
+ res_len = lduw_p(&resh->len);
+ if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ if ((req_len + res_len) > 8192) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+
+ if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
+ req_len + res_len)) {
+ return 0;
+ }
+
+ if (req_len != 32) {
+ stw_p(&resh->rsp, CLP_RC_LEN);
+ goto out;
+ }
+
+ switch (lduw_p(&reqh->cmd)) {
+ case CLP_LIST_PCI: {
+ ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
+ list_pci(rrb, &cc);
+ break;
+ }
+ case CLP_SET_PCI_FN: {
+ ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
+ ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
+
+ pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh));
+ if (!pbdev) {
+ stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
+ goto out;
+ }
+
+ switch (reqsetpci->oc) {
+ case CLP_SET_ENABLE_PCI_FN:
+ pbdev->fh = pbdev->fh | 1 << ENABLE_BIT_OFFSET;
+ stl_p(&ressetpci->fh, pbdev->fh);
+ stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
+ break;
+ case CLP_SET_DISABLE_PCI_FN:
+ pbdev->fh = pbdev->fh & ~(1 << ENABLE_BIT_OFFSET);
+ pbdev->error_state = false;
+ pbdev->lgstg_blocked = false;
+ stl_p(&ressetpci->fh, pbdev->fh);
+ stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
+ break;
+ default:
+ DPRINTF("unknown set pci command\n");
+ stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
+ break;
+ }
+ break;
+ }
+ case CLP_QUERY_PCI_FN: {
+ ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
+ ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
+
+ pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh));
+ if (!pbdev) {
+ DPRINTF("query pci no pci dev\n");
+ stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
+ goto out;
+ }
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ uint32_t data = pci_get_long(pbdev->pdev->config +
+ PCI_BASE_ADDRESS_0 + (i * 4));
+
+ stl_p(&resquery->bar[i], data);
+ resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
+ ctz64(pbdev->pdev->io_regions[i].size) : 0;
+ DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
+ ldl_p(&resquery->bar[i]),
+ pbdev->pdev->io_regions[i].size,
+ resquery->bar_size[i]);
+ }
+
+ stq_p(&resquery->sdma, ZPCI_SDMA_ADDR);
+ stq_p(&resquery->edma, ZPCI_EDMA_ADDR);
+ stw_p(&resquery->pchid, 0);
+ stw_p(&resquery->ug, 1);
+ stl_p(&resquery->uid, pbdev->fid);
+ stw_p(&resquery->hdr.rsp, CLP_RC_OK);
+ break;
+ }
+ case CLP_QUERY_PCI_FNGRP: {
+ ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
+ resgrp->fr = 1;
+ stq_p(&resgrp->dasm, 0);
+ stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
+ stw_p(&resgrp->mui, 0);
+ stw_p(&resgrp->i, 128);
+ resgrp->version = 0;
+
+ stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
+ break;
+ }
+ default:
+ DPRINTF("unknown clp command\n");
+ stw_p(&resh->rsp, CLP_RC_CMD);
+ break;
+ }
+
+out:
+ if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
+ req_len + res_len)) {
+ return 0;
+ }
+ setcc(cpu, cc);
+ return 0;
+}
+
+int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
+{
+ CPUS390XState *env = &cpu->env;
+ S390PCIBusDevice *pbdev;
+ uint64_t offset;
+ uint64_t data;
+ uint8_t len;
+ uint32_t fh;
+ uint8_t pcias;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 4);
+ return 0;
+ }
+
+ if (r2 & 0x1) {
+ program_interrupt(env, PGM_SPECIFICATION, 4);
+ return 0;
+ }
+
+ fh = env->regs[r2] >> 32;
+ pcias = (env->regs[r2] >> 16) & 0xf;
+ len = env->regs[r2] & 0xf;
+ offset = env->regs[r2 + 1];
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+ if (!pbdev) {
+ DPRINTF("pcilg no pci dev\n");
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
+ if (pbdev->lgstg_blocked) {
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
+ return 0;
+ }
+
+ if (pcias < 6) {
+ if ((8 - (offset & 0x7)) < len) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory;
+ memory_region_dispatch_read(mr, offset, &data, len,
+ MEMTXATTRS_UNSPECIFIED);
+ } else if (pcias == 15) {
+ if ((4 - (offset & 0x3)) < len) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ data = pci_host_config_read_common(
+ pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
+
+ switch (len) {
+ case 1:
+ break;
+ case 2:
+ data = bswap16(data);
+ break;
+ case 4:
+ data = bswap32(data);
+ break;
+ case 8:
+ data = bswap64(data);
+ break;
+ default:
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ } else {
+ DPRINTF("invalid space\n");
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
+ return 0;
+ }
+
+ env->regs[r1] = data;
+ setcc(cpu, ZPCI_PCI_LS_OK);
+ return 0;
+}
+
+static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset,
+ uint64_t *data, uint8_t len)
+{
+ uint32_t val;
+ uint8_t *msg_data;
+
+ if (offset % PCI_MSIX_ENTRY_SIZE != 8) {
+ return;
+ }
+
+ if (len != 4) {
+ DPRINTF("access msix table msg data but len is %d\n", len);
+ return;
+ }
+
+ msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+ val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS);
+ pci_set_long(msg_data, val);
+ DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data);
+}
+
+static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias)
+{
+ if (pbdev->msix.available && pbdev->msix.table_bar == pcias &&
+ offset >= pbdev->msix.table_offset &&
+ offset <= pbdev->msix.table_offset +
+ (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
+{
+ CPUS390XState *env = &cpu->env;
+ uint64_t offset, data;
+ S390PCIBusDevice *pbdev;
+ uint8_t len;
+ uint32_t fh;
+ uint8_t pcias;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 4);
+ return 0;
+ }
+
+ if (r2 & 0x1) {
+ program_interrupt(env, PGM_SPECIFICATION, 4);
+ return 0;
+ }
+
+ fh = env->regs[r2] >> 32;
+ pcias = (env->regs[r2] >> 16) & 0xf;
+ len = env->regs[r2] & 0xf;
+ offset = env->regs[r2 + 1];
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+ if (!pbdev) {
+ DPRINTF("pcistg no pci dev\n");
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
+ if (pbdev->lgstg_blocked) {
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
+ return 0;
+ }
+
+ data = env->regs[r1];
+ if (pcias < 6) {
+ if ((8 - (offset & 0x7)) < len) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ MemoryRegion *mr;
+ if (trap_msix(pbdev, offset, pcias)) {
+ offset = offset - pbdev->msix.table_offset;
+ mr = &pbdev->pdev->msix_table_mmio;
+ update_msix_table_msg_data(pbdev, offset, &data, len);
+ } else {
+ mr = pbdev->pdev->io_regions[pcias].memory;
+ }
+
+ memory_region_dispatch_write(mr, offset, data, len,
+ MEMTXATTRS_UNSPECIFIED);
+ } else if (pcias == 15) {
+ if ((4 - (offset & 0x3)) < len) {
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+ switch (len) {
+ case 1:
+ break;
+ case 2:
+ data = bswap16(data);
+ break;
+ case 4:
+ data = bswap32(data);
+ break;
+ case 8:
+ data = bswap64(data);
+ break;
+ default:
+ program_interrupt(env, PGM_OPERAND, 4);
+ return 0;
+ }
+
+ pci_host_config_write_common(pbdev->pdev, offset,
+ pci_config_size(pbdev->pdev),
+ data, len);
+ } else {
+ DPRINTF("pcistg invalid space\n");
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
+ return 0;
+ }
+
+ setcc(cpu, ZPCI_PCI_LS_OK);
+ return 0;
+}
+
+int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
+{
+ CPUS390XState *env = &cpu->env;
+ uint32_t fh;
+ S390PCIBusDevice *pbdev;
+ hwaddr start, end;
+ IOMMUTLBEntry entry;
+ MemoryRegion *mr;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 4);
+ goto out;
+ }
+
+ if (r2 & 0x1) {
+ program_interrupt(env, PGM_SPECIFICATION, 4);
+ goto out;
+ }
+
+ fh = env->regs[r1] >> 32;
+ start = env->regs[r2];
+ end = start + env->regs[r2 + 1];
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+
+ if (!pbdev) {
+ DPRINTF("rpcit no pci dev\n");
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ goto out;
+ }
+
+ mr = &pbdev->iommu_mr;
+ while (start < end) {
+ entry = mr->iommu_ops->translate(mr, start, 0);
+
+ if (!entry.translated_addr) {
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ goto out;
+ }
+
+ memory_region_notify_iommu(mr, entry);
+ start += entry.addr_mask + 1;
+ }
+
+ setcc(cpu, ZPCI_PCI_LS_OK);
+out:
+ return 0;
+}
+
+int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
+ uint8_t ar)
+{
+ CPUS390XState *env = &cpu->env;
+ S390PCIBusDevice *pbdev;
+ MemoryRegion *mr;
+ int i;
+ uint32_t fh;
+ uint8_t pcias;
+ uint8_t len;
+ uint8_t buffer[128];
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 6);
+ return 0;
+ }
+
+ fh = env->regs[r1] >> 32;
+ pcias = (env->regs[r1] >> 16) & 0xf;
+ len = env->regs[r1] & 0xff;
+
+ if (pcias > 5) {
+ DPRINTF("pcistb invalid space\n");
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
+ return 0;
+ }
+
+ switch (len) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ program_interrupt(env, PGM_SPECIFICATION, 6);
+ return 0;
+ }
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+ if (!pbdev) {
+ DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
+ if (pbdev->lgstg_blocked) {
+ setcc(cpu, ZPCI_PCI_LS_ERR);
+ s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
+ return 0;
+ }
+
+ mr = pbdev->pdev->io_regions[pcias].memory;
+ if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
+ program_interrupt(env, PGM_ADDRESSING, 6);
+ return 0;
+ }
+
+ if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
+ return 0;
+ }
+
+ for (i = 0; i < len / 8; i++) {
+ memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
+ ldq_p(buffer + i * 8), 8,
+ MEMTXATTRS_UNSPECIFIED);
+ }
+
+ setcc(cpu, ZPCI_PCI_LS_OK);
+ return 0;
+}
+
+static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
+{
+ int ret;
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
+
+ ret = css_register_io_adapter(S390_PCIPT_ADAPTER,
+ FIB_DATA_ISC(ldl_p(&fib.data)), true, false,
+ &pbdev->routes.adapter.adapter_id);
+ assert(ret == 0);
+
+ fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
+ ldq_p(&fib.aisb), true);
+ fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
+ ldq_p(&fib.aibv), true);
+
+ pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
+ pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
+ pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
+ pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
+ pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data));
+ pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
+ pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
+
+ DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
+ return 0;
+}
+
+static int dereg_irqs(S390PCIBusDevice *pbdev)
+{
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
+
+ fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
+ pbdev->routes.adapter.ind_addr, false);
+
+ pbdev->routes.adapter.summary_addr = 0;
+ pbdev->routes.adapter.summary_offset = 0;
+ pbdev->routes.adapter.ind_addr = 0;
+ pbdev->routes.adapter.ind_offset = 0;
+ pbdev->isc = 0;
+ pbdev->noi = 0;
+ pbdev->sum = 0;
+
+ DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
+ return 0;
+}
+
+static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
+{
+ uint64_t pba = ldq_p(&fib.pba);
+ uint64_t pal = ldq_p(&fib.pal);
+ uint64_t g_iota = ldq_p(&fib.iota);
+ uint8_t dt = (g_iota >> 2) & 0x7;
+ uint8_t t = (g_iota >> 11) & 0x1;
+
+ if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
+ program_interrupt(env, PGM_OPERAND, 6);
+ return -EINVAL;
+ }
+
+ /* currently we only support designation type 1 with translation */
+ if (!(dt == ZPCI_IOTA_RTTO && t)) {
+ error_report("unsupported ioat dt %d t %d", dt, t);
+ program_interrupt(env, PGM_OPERAND, 6);
+ return -EINVAL;
+ }
+
+ pbdev->pba = pba;
+ pbdev->pal = pal;
+ pbdev->g_iota = g_iota;
+
+ s390_pcihost_iommu_configure(pbdev, true);
+
+ return 0;
+}
+
+static void dereg_ioat(S390PCIBusDevice *pbdev)
+{
+ pbdev->pba = 0;
+ pbdev->pal = 0;
+ pbdev->g_iota = 0;
+
+ s390_pcihost_iommu_configure(pbdev, false);
+}
+
+int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
+{
+ CPUS390XState *env = &cpu->env;
+ uint8_t oc;
+ uint32_t fh;
+ ZpciFib fib;
+ S390PCIBusDevice *pbdev;
+ uint64_t cc = ZPCI_PCI_LS_OK;
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 6);
+ return 0;
+ }
+
+ oc = env->regs[r1] & 0xff;
+ fh = env->regs[r1] >> 32;
+
+ if (fiba & 0x7) {
+ program_interrupt(env, PGM_SPECIFICATION, 6);
+ return 0;
+ }
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+ if (!pbdev) {
+ DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
+ if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
+ return 0;
+ }
+
+ switch (oc) {
+ case ZPCI_MOD_FC_REG_INT:
+ if (reg_irqs(env, pbdev, fib)) {
+ cc = ZPCI_PCI_LS_ERR;
+ }
+ break;
+ case ZPCI_MOD_FC_DEREG_INT:
+ dereg_irqs(pbdev);
+ break;
+ case ZPCI_MOD_FC_REG_IOAT:
+ if (reg_ioat(env, pbdev, fib)) {
+ cc = ZPCI_PCI_LS_ERR;
+ }
+ break;
+ case ZPCI_MOD_FC_DEREG_IOAT:
+ dereg_ioat(pbdev);
+ break;
+ case ZPCI_MOD_FC_REREG_IOAT:
+ dereg_ioat(pbdev);
+ if (reg_ioat(env, pbdev, fib)) {
+ cc = ZPCI_PCI_LS_ERR;
+ }
+ break;
+ case ZPCI_MOD_FC_RESET_ERROR:
+ pbdev->error_state = false;
+ pbdev->lgstg_blocked = false;
+ break;
+ case ZPCI_MOD_FC_RESET_BLOCK:
+ pbdev->lgstg_blocked = false;
+ break;
+ case ZPCI_MOD_FC_SET_MEASURE:
+ pbdev->fmb_addr = ldq_p(&fib.fmb_addr);
+ break;
+ default:
+ program_interrupt(&cpu->env, PGM_OPERAND, 6);
+ cc = ZPCI_PCI_LS_ERR;
+ }
+
+ setcc(cpu, cc);
+ return 0;
+}
+
+int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
+{
+ CPUS390XState *env = &cpu->env;
+ uint32_t fh;
+ ZpciFib fib;
+ S390PCIBusDevice *pbdev;
+ uint32_t data;
+ uint64_t cc = ZPCI_PCI_LS_OK;
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, 6);
+ return 0;
+ }
+
+ fh = env->regs[r1] >> 32;
+
+ if (fiba & 0x7) {
+ program_interrupt(env, PGM_SPECIFICATION, 6);
+ return 0;
+ }
+
+ pbdev = s390_pci_find_dev_by_fh(fh);
+ if (!pbdev) {
+ setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
+ return 0;
+ }
+
+ memset(&fib, 0, sizeof(fib));
+ stq_p(&fib.pba, pbdev->pba);
+ stq_p(&fib.pal, pbdev->pal);
+ stq_p(&fib.iota, pbdev->g_iota);
+ stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
+ stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
+ stq_p(&fib.fmb_addr, pbdev->fmb_addr);
+
+ data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
+ ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
+ ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
+ stl_p(&fib.data, data);
+
+ if (pbdev->fh >> ENABLE_BIT_OFFSET) {
+ fib.fc |= 0x80;
+ }
+
+ if (pbdev->error_state) {
+ fib.fc |= 0x40;
+ }
+
+ if (pbdev->lgstg_blocked) {
+ fib.fc |= 0x20;
+ }
+
+ if (pbdev->g_iota) {
+ fib.fc |= 0x10;
+ }
+
+ if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
+ return 0;
+ }
+
+ setcc(cpu, cc);
+ return 0;
+}
diff --git a/src/hw/s390x/s390-pci-inst.h b/src/hw/s390x/s390-pci-inst.h
new file mode 100644
index 0000000..70fa713
--- /dev/null
+++ b/src/hw/s390x/s390-pci-inst.h
@@ -0,0 +1,289 @@
+/*
+ * s390 PCI instruction definitions
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
+ * Hong Bo Li <lihbbj@cn.ibm.com>
+ * Yi Min Zhao <zyimin@cn.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_PCI_INST_H
+#define HW_S390_PCI_INST_H
+
+#include <sysemu/dma.h>
+
+/* CLP common request & response block size */
+#define CLP_BLK_SIZE 4096
+#define PCI_BAR_COUNT 6
+#define PCI_MAX_FUNCTIONS 4096
+
+typedef struct ClpReqHdr {
+ uint16_t len;
+ uint16_t cmd;
+} QEMU_PACKED ClpReqHdr;
+
+typedef struct ClpRspHdr {
+ uint16_t len;
+ uint16_t rsp;
+} QEMU_PACKED ClpRspHdr;
+
+/* CLP Response Codes */
+#define CLP_RC_OK 0x0010 /* Command request successfully */
+#define CLP_RC_CMD 0x0020 /* Command code not recognized */
+#define CLP_RC_PERM 0x0030 /* Command not authorized */
+#define CLP_RC_FMT 0x0040 /* Invalid command request format */
+#define CLP_RC_LEN 0x0050 /* Invalid command request length */
+#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
+#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
+#define CLP_RC_NODATA 0x0080 /* No data available */
+#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
+
+/*
+ * Call Logical Processor - Command Codes
+ */
+#define CLP_LIST_PCI 0x0002
+#define CLP_QUERY_PCI_FN 0x0003
+#define CLP_QUERY_PCI_FNGRP 0x0004
+#define CLP_SET_PCI_FN 0x0005
+
+/* PCI function handle list entry */
+typedef struct ClpFhListEntry {
+ uint16_t device_id;
+ uint16_t vendor_id;
+#define CLP_FHLIST_MASK_CONFIG 0x80000000
+ uint32_t config;
+ uint32_t fid;
+ uint32_t fh;
+} QEMU_PACKED ClpFhListEntry;
+
+#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
+#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
+#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
+#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
+#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
+#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
+#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
+#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
+#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
+#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
+
+/* request or response block header length */
+#define LIST_PCI_HDR_LEN 32
+
+/* Number of function handles fitting in response block */
+#define CLP_FH_LIST_NR_ENTRIES \
+ ((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
+ / sizeof(ClpFhListEntry))
+
+#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
+#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
+
+#define CLP_UTIL_STR_LEN 64
+
+#define CLP_MASK_FMT 0xf0000000
+
+/* List PCI functions request */
+typedef struct ClpReqListPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint64_t resume_token;
+ uint64_t reserved2;
+} QEMU_PACKED ClpReqListPci;
+
+/* List PCI functions response */
+typedef struct ClpRspListPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint64_t resume_token;
+ uint32_t mdd;
+ uint16_t max_fn;
+ uint8_t reserved2;
+ uint8_t entry_size;
+ ClpFhListEntry fh_list[CLP_FH_LIST_NR_ENTRIES];
+} QEMU_PACKED ClpRspListPci;
+
+/* Query PCI function request */
+typedef struct ClpReqQueryPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint32_t reserved2;
+ uint64_t reserved3;
+} QEMU_PACKED ClpReqQueryPci;
+
+/* Query PCI function response */
+typedef struct ClpRspQueryPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint16_t vfn; /* virtual fn number */
+#define CLP_RSP_QPCI_MASK_UTIL 0x100
+#define CLP_RSP_QPCI_MASK_PFGID 0xff
+ uint16_t ug;
+ uint32_t fid; /* pci function id */
+ uint8_t bar_size[PCI_BAR_COUNT];
+ uint16_t pchid;
+ uint32_t bar[PCI_BAR_COUNT];
+ uint64_t reserved2;
+ uint64_t sdma; /* start dma as */
+ uint64_t edma; /* end dma as */
+ uint32_t reserved3[11];
+ uint32_t uid;
+ uint8_t util_str[CLP_UTIL_STR_LEN]; /* utility string */
+} QEMU_PACKED ClpRspQueryPci;
+
+/* Query PCI function group request */
+typedef struct ClpReqQueryPciGrp {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+#define CLP_REQ_QPCIG_MASK_PFGID 0xff
+ uint32_t g;
+ uint32_t reserved2;
+ uint64_t reserved3;
+} QEMU_PACKED ClpReqQueryPciGrp;
+
+/* Query PCI function group response */
+typedef struct ClpRspQueryPciGrp {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+#define CLP_RSP_QPCIG_MASK_NOI 0xfff
+ uint16_t i;
+ uint8_t version;
+#define CLP_RSP_QPCIG_MASK_FRAME 0x2
+#define CLP_RSP_QPCIG_MASK_REFRESH 0x1
+ uint8_t fr;
+ uint16_t reserved2;
+ uint16_t mui;
+ uint64_t reserved3;
+ uint64_t dasm; /* dma address space mask */
+ uint64_t msia; /* MSI address */
+ uint64_t reserved4;
+ uint64_t reserved5;
+} QEMU_PACKED ClpRspQueryPciGrp;
+
+/* Set PCI function request */
+typedef struct ClpReqSetPci {
+ ClpReqHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint16_t reserved2;
+ uint8_t oc; /* operation controls */
+ uint8_t ndas; /* number of dma spaces */
+ uint64_t reserved3;
+} QEMU_PACKED ClpReqSetPci;
+
+/* Set PCI function response */
+typedef struct ClpRspSetPci {
+ ClpRspHdr hdr;
+ uint32_t fmt;
+ uint64_t reserved1;
+ uint32_t fh; /* function handle */
+ uint32_t reserved3;
+ uint64_t reserved4;
+} QEMU_PACKED ClpRspSetPci;
+
+typedef struct ClpReqRspListPci {
+ ClpReqListPci request;
+ ClpRspListPci response;
+} QEMU_PACKED ClpReqRspListPci;
+
+typedef struct ClpReqRspSetPci {
+ ClpReqSetPci request;
+ ClpRspSetPci response;
+} QEMU_PACKED ClpReqRspSetPci;
+
+typedef struct ClpReqRspQueryPci {
+ ClpReqQueryPci request;
+ ClpRspQueryPci response;
+} QEMU_PACKED ClpReqRspQueryPci;
+
+typedef struct ClpReqRspQueryPciGrp {
+ ClpReqQueryPciGrp request;
+ ClpRspQueryPciGrp response;
+} QEMU_PACKED ClpReqRspQueryPciGrp;
+
+/* Load/Store status codes */
+#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
+#define ZPCI_PCI_ST_FUNC_IN_ERR 8
+#define ZPCI_PCI_ST_BLOCKED 12
+#define ZPCI_PCI_ST_INSUF_RES 16
+#define ZPCI_PCI_ST_INVAL_AS 20
+#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
+#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
+#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
+#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
+#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
+
+/* Load/Store return codes */
+#define ZPCI_PCI_LS_OK 0
+#define ZPCI_PCI_LS_ERR 1
+#define ZPCI_PCI_LS_BUSY 2
+#define ZPCI_PCI_LS_INVAL_HANDLE 3
+
+/* Modify PCI Function Controls */
+#define ZPCI_MOD_FC_REG_INT 2
+#define ZPCI_MOD_FC_DEREG_INT 3
+#define ZPCI_MOD_FC_REG_IOAT 4
+#define ZPCI_MOD_FC_DEREG_IOAT 5
+#define ZPCI_MOD_FC_REREG_IOAT 6
+#define ZPCI_MOD_FC_RESET_ERROR 7
+#define ZPCI_MOD_FC_RESET_BLOCK 9
+#define ZPCI_MOD_FC_SET_MEASURE 10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* FIB function controls */
+#define ZPCI_FIB_FC_ENABLED 0x80
+#define ZPCI_FIB_FC_ERROR 0x40
+#define ZPCI_FIB_FC_LS_BLOCKED 0x20
+#define ZPCI_FIB_FC_DMAAS_REG 0x10
+
+/* Function Information Block */
+typedef struct ZpciFib {
+ uint8_t fmt; /* format */
+ uint8_t reserved1[7];
+ uint8_t fc; /* function controls */
+ uint8_t reserved2;
+ uint16_t reserved3;
+ uint32_t reserved4;
+ uint64_t pba; /* PCI base address */
+ uint64_t pal; /* PCI address limit */
+ uint64_t iota; /* I/O Translation Anchor */
+#define FIB_DATA_ISC(x) (((x) >> 28) & 0x7)
+#define FIB_DATA_NOI(x) (((x) >> 16) & 0xfff)
+#define FIB_DATA_AIBVO(x) (((x) >> 8) & 0x3f)
+#define FIB_DATA_SUM(x) (((x) >> 7) & 0x1)
+#define FIB_DATA_AISBO(x) ((x) & 0x3f)
+ uint32_t data;
+ uint32_t reserved5;
+ uint64_t aibv; /* Adapter int bit vector address */
+ uint64_t aisb; /* Adapter int summary bit address */
+ uint64_t fmb_addr; /* Function measurement address and key */
+ uint32_t reserved6;
+ uint32_t gd;
+} QEMU_PACKED ZpciFib;
+
+int clp_service_call(S390CPU *cpu, uint8_t r2);
+int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
+int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
+int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2);
+int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
+ uint8_t ar);
+int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar);
+int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar);
+
+#endif
diff --git a/src/hw/s390x/s390-skeys-kvm.c b/src/hw/s390x/s390-skeys-kvm.c
new file mode 100644
index 0000000..682949a
--- /dev/null
+++ b/src/hw/s390x/s390-skeys-kvm.c
@@ -0,0 +1,75 @@
+/*
+ * s390 storage key device
+ *
+ * Copyright 2015 IBM Corp.
+ * Author(s): Jason J. Herne <jjherne@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "hw/s390x/storage-keys.h"
+#include "sysemu/kvm.h"
+#include "qemu/error-report.h"
+
+static int kvm_s390_skeys_enabled(S390SKeysState *ss)
+{
+ S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss);
+ uint8_t single_key;
+ int r;
+
+ r = skeyclass->get_skeys(ss, 0, 1, &single_key);
+ if (r != 0 && r != KVM_S390_GET_SKEYS_NONE) {
+ error_report("S390_GET_KEYS error %d\n", r);
+ }
+ return (r == 0);
+}
+
+static int kvm_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn,
+ uint64_t count, uint8_t *keys)
+{
+ struct kvm_s390_skeys args = {
+ .start_gfn = start_gfn,
+ .count = count,
+ .skeydata_addr = (__u64)keys
+ };
+
+ return kvm_vm_ioctl(kvm_state, KVM_S390_GET_SKEYS, &args);
+}
+
+static int kvm_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn,
+ uint64_t count, uint8_t *keys)
+{
+ struct kvm_s390_skeys args = {
+ .start_gfn = start_gfn,
+ .count = count,
+ .skeydata_addr = (__u64)keys
+ };
+
+ return kvm_vm_ioctl(kvm_state, KVM_S390_SET_SKEYS, &args);
+}
+
+static void kvm_s390_skeys_class_init(ObjectClass *oc, void *data)
+{
+ S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc);
+
+ skeyclass->skeys_enabled = kvm_s390_skeys_enabled;
+ skeyclass->get_skeys = kvm_s390_skeys_get;
+ skeyclass->set_skeys = kvm_s390_skeys_set;
+}
+
+static const TypeInfo kvm_s390_skeys_info = {
+ .name = TYPE_KVM_S390_SKEYS,
+ .parent = TYPE_S390_SKEYS,
+ .instance_size = sizeof(S390SKeysState),
+ .class_init = kvm_s390_skeys_class_init,
+ .class_size = sizeof(S390SKeysClass),
+};
+
+static void kvm_s390_skeys_register_types(void)
+{
+ type_register_static(&kvm_s390_skeys_info);
+}
+
+type_init(kvm_s390_skeys_register_types)
diff --git a/src/hw/s390x/s390-skeys.c b/src/hw/s390x/s390-skeys.c
new file mode 100644
index 0000000..539ef6d
--- /dev/null
+++ b/src/hw/s390x/s390-skeys.c
@@ -0,0 +1,415 @@
+/*
+ * s390 storage key device
+ *
+ * Copyright 2015 IBM Corp.
+ * Author(s): Jason J. Herne <jjherne@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "hw/boards.h"
+#include "qmp-commands.h"
+#include "migration/qemu-file.h"
+#include "hw/s390x/storage-keys.h"
+#include "qemu/error-report.h"
+
+#define S390_SKEYS_BUFFER_SIZE 131072 /* Room for 128k storage keys */
+#define S390_SKEYS_SAVE_FLAG_EOS 0x01
+#define S390_SKEYS_SAVE_FLAG_SKEYS 0x02
+#define S390_SKEYS_SAVE_FLAG_ERROR 0x04
+
+S390SKeysState *s390_get_skeys_device(void)
+{
+ S390SKeysState *ss;
+
+ ss = S390_SKEYS(object_resolve_path_type("", TYPE_S390_SKEYS, NULL));
+ assert(ss);
+ return ss;
+}
+
+void s390_skeys_init(void)
+{
+ Object *obj;
+
+ if (kvm_enabled()) {
+ obj = object_new(TYPE_KVM_S390_SKEYS);
+ } else {
+ obj = object_new(TYPE_QEMU_S390_SKEYS);
+ }
+ object_property_add_child(qdev_get_machine(), TYPE_S390_SKEYS,
+ obj, NULL);
+ object_unref(obj);
+
+ qdev_init_nofail(DEVICE(obj));
+}
+
+static void write_keys(QEMUFile *f, uint8_t *keys, uint64_t startgfn,
+ uint64_t count, Error **errp)
+{
+ uint64_t curpage = startgfn;
+ uint64_t maxpage = curpage + count - 1;
+ const char *fmt = "page=%03" PRIx64 ": key(%d) => ACC=%X, FP=%d, REF=%d,"
+ " ch=%d, reserved=%d\n";
+ char buf[128];
+ int len;
+
+ for (; curpage <= maxpage; curpage++) {
+ uint8_t acc = (*keys & 0xF0) >> 4;
+ int fp = (*keys & 0x08);
+ int ref = (*keys & 0x04);
+ int ch = (*keys & 0x02);
+ int res = (*keys & 0x01);
+
+ len = snprintf(buf, sizeof(buf), fmt, curpage,
+ *keys, acc, fp, ref, ch, res);
+ assert(len < sizeof(buf));
+ qemu_put_buffer(f, (uint8_t *)buf, len);
+ keys++;
+ }
+}
+
+void hmp_info_skeys(Monitor *mon, const QDict *qdict)
+{
+ S390SKeysState *ss = s390_get_skeys_device();
+ S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss);
+ uint64_t addr = qdict_get_int(qdict, "addr");
+ uint8_t key;
+ int r;
+
+ /* Quick check to see if guest is using storage keys*/
+ if (!skeyclass->skeys_enabled(ss)) {
+ monitor_printf(mon, "Error: This guest is not using storage keys\n");
+ return;
+ }
+
+ r = skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
+ if (r < 0) {
+ monitor_printf(mon, "Error: %s\n", strerror(-r));
+ return;
+ }
+
+ monitor_printf(mon, " key: 0x%X\n", key);
+}
+
+void hmp_dump_skeys(Monitor *mon, const QDict *qdict)
+{
+ const char *filename = qdict_get_str(qdict, "filename");
+ Error *err = NULL;
+
+ qmp_dump_skeys(filename, &err);
+ if (err) {
+ monitor_printf(mon, "%s\n", error_get_pretty(err));
+ error_free(err);
+ }
+}
+
+void qmp_dump_skeys(const char *filename, Error **errp)
+{
+ S390SKeysState *ss = s390_get_skeys_device();
+ S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss);
+ const uint64_t total_count = ram_size / TARGET_PAGE_SIZE;
+ uint64_t handled_count = 0, cur_count;
+ Error *lerr = NULL;
+ vaddr cur_gfn = 0;
+ uint8_t *buf;
+ int ret;
+ QEMUFile *f;
+
+ /* Quick check to see if guest is using storage keys*/
+ if (!skeyclass->skeys_enabled(ss)) {
+ error_setg(errp, "This guest is not using storage keys - "
+ "nothing to dump");
+ return;
+ }
+
+ f = qemu_fopen(filename, "wb");
+ if (!f) {
+ error_setg_file_open(errp, errno, filename);
+ return;
+ }
+
+ buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE);
+ if (!buf) {
+ error_setg(errp, "Could not allocate memory");
+ goto out;
+ }
+
+ /* we'll only dump initial memory for now */
+ while (handled_count < total_count) {
+ /* Calculate how many keys to ask for & handle overflow case */
+ cur_count = MIN(total_count - handled_count, S390_SKEYS_BUFFER_SIZE);
+
+ ret = skeyclass->get_skeys(ss, cur_gfn, cur_count, buf);
+ if (ret < 0) {
+ error_setg(errp, "get_keys error %d", ret);
+ goto out_free;
+ }
+
+ /* write keys to stream */
+ write_keys(f, buf, cur_gfn, cur_count, &lerr);
+ if (lerr) {
+ goto out_free;
+ }
+
+ cur_gfn += cur_count;
+ handled_count += cur_count;
+ }
+
+out_free:
+ error_propagate(errp, lerr);
+ g_free(buf);
+out:
+ qemu_fclose(f);
+}
+
+static void qemu_s390_skeys_init(Object *obj)
+{
+ QEMUS390SKeysState *skeys = QEMU_S390_SKEYS(obj);
+ MachineState *machine = MACHINE(qdev_get_machine());
+
+ skeys->key_count = machine->maxram_size / TARGET_PAGE_SIZE;
+ skeys->keydata = g_malloc0(skeys->key_count);
+}
+
+static int qemu_s390_skeys_enabled(S390SKeysState *ss)
+{
+ return 1;
+}
+
+/*
+ * TODO: for memory hotplug support qemu_s390_skeys_set and qemu_s390_skeys_get
+ * will have to make sure that the given gfn belongs to a memory region and not
+ * a memory hole.
+ */
+static int qemu_s390_skeys_set(S390SKeysState *ss, uint64_t start_gfn,
+ uint64_t count, uint8_t *keys)
+{
+ QEMUS390SKeysState *skeydev = QEMU_S390_SKEYS(ss);
+ int i;
+
+ /* Check for uint64 overflow and access beyond end of key data */
+ if (start_gfn + count > skeydev->key_count || start_gfn + count < count) {
+ error_report("Error: Setting storage keys for page beyond the end "
+ "of memory: gfn=%" PRIx64 " count=%" PRId64 "\n", start_gfn,
+ count);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ skeydev->keydata[start_gfn + i] = keys[i];
+ }
+ return 0;
+}
+
+static int qemu_s390_skeys_get(S390SKeysState *ss, uint64_t start_gfn,
+ uint64_t count, uint8_t *keys)
+{
+ QEMUS390SKeysState *skeydev = QEMU_S390_SKEYS(ss);
+ int i;
+
+ /* Check for uint64 overflow and access beyond end of key data */
+ if (start_gfn + count > skeydev->key_count || start_gfn + count < count) {
+ error_report("Error: Getting storage keys for page beyond the end "
+ "of memory: gfn=%" PRIx64 " count=%" PRId64 "\n", start_gfn,
+ count);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ keys[i] = skeydev->keydata[start_gfn + i];
+ }
+ return 0;
+}
+
+static void qemu_s390_skeys_class_init(ObjectClass *oc, void *data)
+{
+ S390SKeysClass *skeyclass = S390_SKEYS_CLASS(oc);
+
+ skeyclass->skeys_enabled = qemu_s390_skeys_enabled;
+ skeyclass->get_skeys = qemu_s390_skeys_get;
+ skeyclass->set_skeys = qemu_s390_skeys_set;
+}
+
+static const TypeInfo qemu_s390_skeys_info = {
+ .name = TYPE_QEMU_S390_SKEYS,
+ .parent = TYPE_S390_SKEYS,
+ .instance_init = qemu_s390_skeys_init,
+ .instance_size = sizeof(QEMUS390SKeysState),
+ .class_init = qemu_s390_skeys_class_init,
+ .instance_size = sizeof(S390SKeysClass),
+};
+
+static void s390_storage_keys_save(QEMUFile *f, void *opaque)
+{
+ S390SKeysState *ss = S390_SKEYS(opaque);
+ S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss);
+ uint64_t pages_left = ram_size / TARGET_PAGE_SIZE;
+ uint64_t read_count, eos = S390_SKEYS_SAVE_FLAG_EOS;
+ vaddr cur_gfn = 0;
+ int error = 0;
+ uint8_t *buf;
+
+ if (!skeyclass->skeys_enabled(ss)) {
+ goto end_stream;
+ }
+
+ buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE);
+ if (!buf) {
+ error_report("storage key save could not allocate memory\n");
+ goto end_stream;
+ }
+
+ /* We only support initial memory. Standby memory is not handled yet. */
+ qemu_put_be64(f, (cur_gfn * TARGET_PAGE_SIZE) | S390_SKEYS_SAVE_FLAG_SKEYS);
+ qemu_put_be64(f, pages_left);
+
+ while (pages_left) {
+ read_count = MIN(pages_left, S390_SKEYS_BUFFER_SIZE);
+
+ if (!error) {
+ error = skeyclass->get_skeys(ss, cur_gfn, read_count, buf);
+ if (error) {
+ /*
+ * If error: we want to fill the stream with valid data instead
+ * of stopping early so we pad the stream with 0x00 values and
+ * use S390_SKEYS_SAVE_FLAG_ERROR to indicate failure to the
+ * reading side.
+ */
+ error_report("S390_GET_KEYS error %d\n", error);
+ memset(buf, 0, S390_SKEYS_BUFFER_SIZE);
+ eos = S390_SKEYS_SAVE_FLAG_ERROR;
+ }
+ }
+
+ qemu_put_buffer(f, buf, read_count);
+ cur_gfn += read_count;
+ pages_left -= read_count;
+ }
+
+ g_free(buf);
+end_stream:
+ qemu_put_be64(f, eos);
+}
+
+static int s390_storage_keys_load(QEMUFile *f, void *opaque, int version_id)
+{
+ S390SKeysState *ss = S390_SKEYS(opaque);
+ S390SKeysClass *skeyclass = S390_SKEYS_GET_CLASS(ss);
+ int ret = 0;
+
+ while (!ret) {
+ ram_addr_t addr;
+ int flags;
+
+ addr = qemu_get_be64(f);
+ flags = addr & ~TARGET_PAGE_MASK;
+ addr &= TARGET_PAGE_MASK;
+
+ switch (flags) {
+ case S390_SKEYS_SAVE_FLAG_SKEYS: {
+ const uint64_t total_count = qemu_get_be64(f);
+ uint64_t handled_count = 0, cur_count;
+ uint64_t cur_gfn = addr / TARGET_PAGE_SIZE;
+ uint8_t *buf = g_try_malloc(S390_SKEYS_BUFFER_SIZE);
+
+ if (!buf) {
+ error_report("storage key load could not allocate memory\n");
+ ret = -ENOMEM;
+ break;
+ }
+
+ while (handled_count < total_count) {
+ cur_count = MIN(total_count - handled_count,
+ S390_SKEYS_BUFFER_SIZE);
+ qemu_get_buffer(f, buf, cur_count);
+
+ ret = skeyclass->set_skeys(ss, cur_gfn, cur_count, buf);
+ if (ret < 0) {
+ error_report("S390_SET_KEYS error %d\n", ret);
+ break;
+ }
+ handled_count += cur_count;
+ cur_gfn += cur_count;
+ }
+ g_free(buf);
+ break;
+ }
+ case S390_SKEYS_SAVE_FLAG_ERROR: {
+ error_report("Storage key data is incomplete");
+ ret = -EINVAL;
+ break;
+ }
+ case S390_SKEYS_SAVE_FLAG_EOS:
+ /* normal exit */
+ return 0;
+ default:
+ error_report("Unexpected storage key flag data: %#x", flags);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static inline bool s390_skeys_get_migration_enabled(Object *obj, Error **errp)
+{
+ S390SKeysState *ss = S390_SKEYS(obj);
+
+ return ss->migration_enabled;
+}
+
+static inline void s390_skeys_set_migration_enabled(Object *obj, bool value,
+ Error **errp)
+{
+ S390SKeysState *ss = S390_SKEYS(obj);
+
+ /* Prevent double registration of savevm handler */
+ if (ss->migration_enabled == value) {
+ return;
+ }
+
+ ss->migration_enabled = value;
+
+ if (ss->migration_enabled) {
+ register_savevm(NULL, TYPE_S390_SKEYS, 0, 1, s390_storage_keys_save,
+ s390_storage_keys_load, ss);
+ } else {
+ unregister_savevm(DEVICE(ss), TYPE_S390_SKEYS, ss);
+ }
+}
+
+static void s390_skeys_instance_init(Object *obj)
+{
+ object_property_add_bool(obj, "migration-enabled",
+ s390_skeys_get_migration_enabled,
+ s390_skeys_set_migration_enabled, NULL);
+ object_property_set_bool(obj, true, "migration-enabled", NULL);
+}
+
+static void s390_skeys_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->hotpluggable = false;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo s390_skeys_info = {
+ .name = TYPE_S390_SKEYS,
+ .parent = TYPE_DEVICE,
+ .instance_init = s390_skeys_instance_init,
+ .instance_size = sizeof(S390SKeysState),
+ .class_init = s390_skeys_class_init,
+ .class_size = sizeof(S390SKeysClass),
+ .abstract = true,
+};
+
+static void qemu_s390_skeys_register_types(void)
+{
+ type_register_static(&s390_skeys_info);
+ type_register_static(&qemu_s390_skeys_info);
+}
+
+type_init(qemu_s390_skeys_register_types)
diff --git a/src/hw/s390x/s390-virtio-bus.c b/src/hw/s390x/s390-virtio-bus.c
new file mode 100644
index 0000000..98cb129
--- /dev/null
+++ b/src/hw/s390x/s390-virtio-bus.c
@@ -0,0 +1,758 @@
+/*
+ * QEMU S390 virtio target
+ *
+ * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/hw.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/sysemu.h"
+#include "hw/boards.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-rng.h"
+#include "hw/virtio/virtio-serial.h"
+#include "hw/virtio/virtio-net.h"
+#include "hw/virtio/vhost-scsi.h"
+#include "hw/sysbus.h"
+#include "sysemu/kvm.h"
+
+#include "hw/s390x/s390-virtio-bus.h"
+#include "hw/virtio/virtio-bus.h"
+
+/* #define DEBUG_S390 */
+
+#ifdef DEBUG_S390
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+#define VIRTIO_S390_QUEUE_MAX 64
+
+static void virtio_s390_bus_new(VirtioBusState *bus, size_t bus_size,
+ VirtIOS390Device *dev);
+
+static const TypeInfo s390_virtio_bus_info = {
+ .name = TYPE_S390_VIRTIO_BUS,
+ .parent = TYPE_BUS,
+ .instance_size = sizeof(VirtIOS390Bus),
+};
+
+static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev);
+
+/* length of VirtIO device pages */
+const hwaddr virtio_size = S390_DEVICE_PAGES * TARGET_PAGE_SIZE;
+
+static void s390_virtio_bus_reset(void *opaque)
+{
+ VirtIOS390Bus *bus = opaque;
+ bus->next_ring = bus->dev_page + TARGET_PAGE_SIZE;
+}
+
+void s390_virtio_reset_idx(VirtIOS390Device *dev)
+{
+ int i;
+ hwaddr idx_addr;
+ uint8_t num_vq;
+
+ num_vq = s390_virtio_device_num_vq(dev);
+ for (i = 0; i < num_vq; i++) {
+ idx_addr = virtio_queue_get_avail_addr(dev->vdev, i) +
+ VIRTIO_VRING_AVAIL_IDX_OFFS;
+ address_space_stw(&address_space_memory, idx_addr, 0,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ idx_addr = virtio_queue_get_avail_addr(dev->vdev, i) +
+ virtio_queue_get_avail_size(dev->vdev, i);
+ address_space_stw(&address_space_memory, idx_addr, 0,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ idx_addr = virtio_queue_get_used_addr(dev->vdev, i) +
+ VIRTIO_VRING_USED_IDX_OFFS;
+ address_space_stw(&address_space_memory, idx_addr, 0,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ idx_addr = virtio_queue_get_used_addr(dev->vdev, i) +
+ virtio_queue_get_used_size(dev->vdev, i);
+ address_space_stw(&address_space_memory, idx_addr, 0,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ }
+}
+
+VirtIOS390Bus *s390_virtio_bus_init(ram_addr_t *ram_size)
+{
+ VirtIOS390Bus *bus;
+ BusState *_bus;
+ DeviceState *dev;
+
+ /* Create bridge device */
+ dev = qdev_create(NULL, "s390-virtio-bridge");
+ qdev_init_nofail(dev);
+
+ /* Create bus on bridge device */
+
+ _bus = qbus_create(TYPE_S390_VIRTIO_BUS, dev, "s390-virtio");
+ bus = DO_UPCAST(VirtIOS390Bus, bus, _bus);
+
+ bus->dev_page = *ram_size;
+ bus->dev_offs = bus->dev_page;
+ bus->next_ring = bus->dev_page + TARGET_PAGE_SIZE;
+
+ /* Enable hotplugging */
+ qbus_set_hotplug_handler(_bus, dev, &error_abort);
+
+ /* Allocate RAM for VirtIO device pages (descriptors, queues, rings) */
+ *ram_size += S390_DEVICE_PAGES * TARGET_PAGE_SIZE;
+
+ qemu_register_reset(s390_virtio_bus_reset, bus);
+ return bus;
+}
+
+static void s390_virtio_device_init(VirtIOS390Device *dev,
+ VirtIODevice *vdev)
+{
+ VirtIOS390Bus *bus;
+ int dev_len;
+
+ bus = DO_UPCAST(VirtIOS390Bus, bus, dev->qdev.parent_bus);
+ dev->vdev = vdev;
+ dev->dev_offs = bus->dev_offs;
+ dev->feat_len = sizeof(uint32_t); /* always keep 32 bits features */
+
+ dev_len = VIRTIO_DEV_OFFS_CONFIG;
+ dev_len += s390_virtio_device_num_vq(dev) * VIRTIO_VQCONFIG_LEN;
+ dev_len += dev->feat_len * 2;
+ dev_len += virtio_bus_get_vdev_config_len(&dev->bus);
+
+ bus->dev_offs += dev_len;
+
+ s390_virtio_device_sync(dev);
+ s390_virtio_reset_idx(dev);
+ if (dev->qdev.hotplugged) {
+ s390_virtio_irq(VIRTIO_PARAM_DEV_ADD, dev->dev_offs);
+ }
+}
+
+static void s390_virtio_net_realize(VirtIOS390Device *s390_dev, Error **errp)
+{
+ DeviceState *qdev = DEVICE(s390_dev);
+ VirtIONetS390 *dev = VIRTIO_NET_S390(s390_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ virtio_net_set_netclient_name(&dev->vdev, qdev->id,
+ object_get_typename(OBJECT(qdev)));
+ qdev_set_parent_bus(vdev, BUS(&s390_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ s390_virtio_device_init(s390_dev, VIRTIO_DEVICE(vdev));
+}
+
+static void s390_virtio_net_instance_init(Object *obj)
+{
+ VirtIONetS390 *dev = VIRTIO_NET_S390(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_NET);
+ object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
+ "bootindex", &error_abort);
+}
+
+static void s390_virtio_blk_realize(VirtIOS390Device *s390_dev, Error **errp)
+{
+ VirtIOBlkS390 *dev = VIRTIO_BLK_S390(s390_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&s390_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ s390_virtio_device_init(s390_dev, VIRTIO_DEVICE(vdev));
+}
+
+static void s390_virtio_blk_instance_init(Object *obj)
+{
+ VirtIOBlkS390 *dev = VIRTIO_BLK_S390(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_BLK);
+ object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev),"iothread",
+ &error_abort);
+ object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
+ "bootindex", &error_abort);
+}
+
+static void s390_virtio_serial_realize(VirtIOS390Device *s390_dev, Error **errp)
+{
+ VirtIOSerialS390 *dev = VIRTIO_SERIAL_S390(s390_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ DeviceState *qdev = DEVICE(s390_dev);
+ Error *err = NULL;
+ VirtIOS390Bus *bus;
+ char *bus_name;
+
+ bus = DO_UPCAST(VirtIOS390Bus, bus, qdev->parent_bus);
+
+ /*
+ * For command line compatibility, this sets the virtio-serial-device bus
+ * name as before.
+ */
+ if (qdev->id) {
+ bus_name = g_strdup_printf("%s.0", qdev->id);
+ virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
+ g_free(bus_name);
+ }
+
+ qdev_set_parent_bus(vdev, BUS(&s390_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ s390_virtio_device_init(s390_dev, VIRTIO_DEVICE(vdev));
+ bus->console = s390_dev;
+}
+
+static void s390_virtio_serial_instance_init(Object *obj)
+{
+ VirtIOSerialS390 *dev = VIRTIO_SERIAL_S390(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_SERIAL);
+}
+
+static void s390_virtio_scsi_realize(VirtIOS390Device *s390_dev, Error **errp)
+{
+ VirtIOSCSIS390 *dev = VIRTIO_SCSI_S390(s390_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ DeviceState *qdev = DEVICE(s390_dev);
+ Error *err = NULL;
+ char *bus_name;
+
+ /*
+ * For command line compatibility, this sets the virtio-scsi-device bus
+ * name as before.
+ */
+ if (qdev->id) {
+ bus_name = g_strdup_printf("%s.0", qdev->id);
+ virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
+ g_free(bus_name);
+ }
+
+ qdev_set_parent_bus(vdev, BUS(&s390_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ s390_virtio_device_init(s390_dev, VIRTIO_DEVICE(vdev));
+}
+
+static void s390_virtio_scsi_instance_init(Object *obj)
+{
+ VirtIOSCSIS390 *dev = VIRTIO_SCSI_S390(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_SCSI);
+}
+
+#ifdef CONFIG_VHOST_SCSI
+static void s390_vhost_scsi_realize(VirtIOS390Device *s390_dev, Error **errp)
+{
+ VHostSCSIS390 *dev = VHOST_SCSI_S390(s390_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&s390_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ s390_virtio_device_init(s390_dev, VIRTIO_DEVICE(vdev));
+}
+
+static void s390_vhost_scsi_instance_init(Object *obj)
+{
+ VHostSCSIS390 *dev = VHOST_SCSI_S390(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_SCSI);
+}
+#endif
+
+
+static void s390_virtio_rng_realize(VirtIOS390Device *s390_dev, Error **errp)
+{
+ VirtIORNGS390 *dev = VIRTIO_RNG_S390(s390_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&s390_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ object_property_set_link(OBJECT(dev),
+ OBJECT(dev->vdev.conf.rng), "rng",
+ NULL);
+
+ s390_virtio_device_init(s390_dev, VIRTIO_DEVICE(vdev));
+}
+
+static void s390_virtio_rng_instance_init(Object *obj)
+{
+ VirtIORNGS390 *dev = VIRTIO_RNG_S390(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_RNG);
+ object_property_add_alias(obj, "rng", OBJECT(&dev->vdev),
+ "rng", &error_abort);
+}
+
+static uint64_t s390_virtio_device_vq_token(VirtIOS390Device *dev, int vq)
+{
+ ram_addr_t token_off;
+
+ token_off = (dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG) +
+ (vq * VIRTIO_VQCONFIG_LEN) +
+ VIRTIO_VQCONFIG_OFFS_TOKEN;
+
+ return address_space_ldq_be(&address_space_memory, token_off,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+}
+
+static ram_addr_t s390_virtio_device_num_vq(VirtIOS390Device *dev)
+{
+ VirtIODevice *vdev = dev->vdev;
+ int num_vq;
+
+ for (num_vq = 0; num_vq < VIRTIO_S390_QUEUE_MAX; num_vq++) {
+ if (!virtio_queue_get_num(vdev, num_vq)) {
+ break;
+ }
+ }
+
+ return num_vq;
+}
+
+static ram_addr_t s390_virtio_next_ring(VirtIOS390Bus *bus)
+{
+ ram_addr_t r = bus->next_ring;
+
+ bus->next_ring += VIRTIO_RING_LEN;
+ return r;
+}
+
+void s390_virtio_device_sync(VirtIOS390Device *dev)
+{
+ VirtIOS390Bus *bus = DO_UPCAST(VirtIOS390Bus, bus, dev->qdev.parent_bus);
+ ram_addr_t cur_offs;
+ uint8_t num_vq;
+ int i;
+
+ virtio_reset(dev->vdev);
+
+ /* Sync dev space */
+ address_space_stb(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_TYPE,
+ dev->vdev->device_id,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+
+ address_space_stb(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ,
+ s390_virtio_device_num_vq(dev),
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ address_space_stb(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_FEATURE_LEN,
+ dev->feat_len,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+
+ address_space_stb(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG_LEN,
+ dev->vdev->config_len,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+
+ num_vq = s390_virtio_device_num_vq(dev);
+ address_space_stb(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_NUM_VQ, num_vq,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+
+ /* Sync virtqueues */
+ for (i = 0; i < num_vq; i++) {
+ ram_addr_t vq = (dev->dev_offs + VIRTIO_DEV_OFFS_CONFIG) +
+ (i * VIRTIO_VQCONFIG_LEN);
+ ram_addr_t vring;
+
+ vring = s390_virtio_next_ring(bus);
+ virtio_queue_set_addr(dev->vdev, i, vring);
+ virtio_queue_set_vector(dev->vdev, i, i);
+ address_space_stq_be(&address_space_memory,
+ vq + VIRTIO_VQCONFIG_OFFS_ADDRESS, vring,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ address_space_stw_be(&address_space_memory,
+ vq + VIRTIO_VQCONFIG_OFFS_NUM,
+ virtio_queue_get_num(dev->vdev, i),
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ }
+
+ cur_offs = dev->dev_offs;
+ cur_offs += VIRTIO_DEV_OFFS_CONFIG;
+ cur_offs += num_vq * VIRTIO_VQCONFIG_LEN;
+
+ /* Sync feature bitmap */
+ address_space_stl_le(&address_space_memory, cur_offs,
+ dev->vdev->host_features,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+
+ dev->feat_offs = cur_offs + dev->feat_len;
+ cur_offs += dev->feat_len * 2;
+
+ /* Sync config space */
+ virtio_bus_get_vdev_config(&dev->bus, dev->vdev->config);
+
+ cpu_physical_memory_write(cur_offs,
+ dev->vdev->config, dev->vdev->config_len);
+ cur_offs += dev->vdev->config_len;
+}
+
+void s390_virtio_device_update_status(VirtIOS390Device *dev)
+{
+ VirtIODevice *vdev = dev->vdev;
+ uint32_t features;
+
+ virtio_set_status(vdev,
+ address_space_ldub(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_STATUS,
+ MEMTXATTRS_UNSPECIFIED, NULL));
+
+ /* Update guest supported feature bitmap */
+
+ features = bswap32(address_space_ldl_be(&address_space_memory,
+ dev->feat_offs,
+ MEMTXATTRS_UNSPECIFIED, NULL));
+ virtio_set_features(vdev, features);
+}
+
+/* Find a device by vring address */
+VirtIOS390Device *s390_virtio_bus_find_vring(VirtIOS390Bus *bus,
+ ram_addr_t mem,
+ int *vq_num)
+{
+ BusChild *kid;
+ int i;
+
+ QTAILQ_FOREACH(kid, &bus->bus.children, sibling) {
+ VirtIOS390Device *dev = (VirtIOS390Device *)kid->child;
+
+ for (i = 0; i < VIRTIO_S390_QUEUE_MAX; i++) {
+ if (!virtio_queue_get_addr(dev->vdev, i))
+ break;
+ if (virtio_queue_get_addr(dev->vdev, i) == mem) {
+ if (vq_num) {
+ *vq_num = i;
+ }
+ return dev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* Find a device by device descriptor location */
+VirtIOS390Device *s390_virtio_bus_find_mem(VirtIOS390Bus *bus, ram_addr_t mem)
+{
+ BusChild *kid;
+
+ QTAILQ_FOREACH(kid, &bus->bus.children, sibling) {
+ VirtIOS390Device *dev = (VirtIOS390Device *)kid->child;
+ if (dev->dev_offs == mem) {
+ return dev;
+ }
+ }
+
+ return NULL;
+}
+
+/* DeviceState to VirtIOS390Device. Note: used on datapath,
+ * be careful and test performance if you change this.
+ */
+static inline VirtIOS390Device *to_virtio_s390_device_fast(DeviceState *d)
+{
+ return container_of(d, VirtIOS390Device, qdev);
+}
+
+/* DeviceState to VirtIOS390Device. TODO: use QOM. */
+static inline VirtIOS390Device *to_virtio_s390_device(DeviceState *d)
+{
+ return container_of(d, VirtIOS390Device, qdev);
+}
+
+static void virtio_s390_notify(DeviceState *d, uint16_t vector)
+{
+ VirtIOS390Device *dev = to_virtio_s390_device_fast(d);
+ uint64_t token = s390_virtio_device_vq_token(dev, vector);
+
+ s390_virtio_irq(0, token);
+}
+
+static void virtio_s390_device_plugged(DeviceState *d, Error **errp)
+{
+ VirtIOS390Device *dev = to_virtio_s390_device(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+ int n = virtio_get_num_queues(vdev);
+
+ if (n > VIRTIO_S390_QUEUE_MAX) {
+ error_setg(errp, "The nubmer of virtqueues %d "
+ "exceeds s390 limit %d", n,
+ VIRTIO_S390_QUEUE_MAX);
+ }
+}
+
+/**************** S390 Virtio Bus Device Descriptions *******************/
+
+static void s390_virtio_net_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass);
+
+ k->realize = s390_virtio_net_realize;
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
+}
+
+static const TypeInfo s390_virtio_net = {
+ .name = TYPE_VIRTIO_NET_S390,
+ .parent = TYPE_VIRTIO_S390_DEVICE,
+ .instance_size = sizeof(VirtIONetS390),
+ .instance_init = s390_virtio_net_instance_init,
+ .class_init = s390_virtio_net_class_init,
+};
+
+static void s390_virtio_blk_class_init(ObjectClass *klass, void *data)
+{
+ VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ k->realize = s390_virtio_blk_realize;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo s390_virtio_blk = {
+ .name = "virtio-blk-s390",
+ .parent = TYPE_VIRTIO_S390_DEVICE,
+ .instance_size = sizeof(VirtIOBlkS390),
+ .instance_init = s390_virtio_blk_instance_init,
+ .class_init = s390_virtio_blk_class_init,
+};
+
+static void s390_virtio_serial_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass);
+
+ k->realize = s390_virtio_serial_realize;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+}
+
+static const TypeInfo s390_virtio_serial = {
+ .name = TYPE_VIRTIO_SERIAL_S390,
+ .parent = TYPE_VIRTIO_S390_DEVICE,
+ .instance_size = sizeof(VirtIOSerialS390),
+ .instance_init = s390_virtio_serial_instance_init,
+ .class_init = s390_virtio_serial_class_init,
+};
+
+static void s390_virtio_rng_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass);
+
+ k->realize = s390_virtio_rng_realize;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo s390_virtio_rng = {
+ .name = TYPE_VIRTIO_RNG_S390,
+ .parent = TYPE_VIRTIO_S390_DEVICE,
+ .instance_size = sizeof(VirtIORNGS390),
+ .instance_init = s390_virtio_rng_instance_init,
+ .class_init = s390_virtio_rng_class_init,
+};
+
+static void s390_virtio_busdev_realize(DeviceState *dev, Error **errp)
+{
+ VirtIOS390Device *_dev = (VirtIOS390Device *)dev;
+ VirtIOS390DeviceClass *_info = VIRTIO_S390_DEVICE_GET_CLASS(dev);
+
+ virtio_s390_bus_new(&_dev->bus, sizeof(_dev->bus), _dev);
+
+ _info->realize(_dev, errp);
+}
+
+static void s390_virtio_busdev_reset(DeviceState *dev)
+{
+ VirtIOS390Device *_dev = (VirtIOS390Device *)dev;
+
+ virtio_reset(_dev->vdev);
+}
+
+static void virtio_s390_device_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = s390_virtio_busdev_realize;
+ dc->bus_type = TYPE_S390_VIRTIO_BUS;
+ dc->reset = s390_virtio_busdev_reset;
+}
+
+static const TypeInfo virtio_s390_device_info = {
+ .name = TYPE_VIRTIO_S390_DEVICE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(VirtIOS390Device),
+ .class_init = virtio_s390_device_class_init,
+ .class_size = sizeof(VirtIOS390DeviceClass),
+ .abstract = true,
+};
+
+static void s390_virtio_scsi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass);
+
+ k->realize = s390_virtio_scsi_realize;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo s390_virtio_scsi = {
+ .name = TYPE_VIRTIO_SCSI_S390,
+ .parent = TYPE_VIRTIO_S390_DEVICE,
+ .instance_size = sizeof(VirtIOSCSIS390),
+ .instance_init = s390_virtio_scsi_instance_init,
+ .class_init = s390_virtio_scsi_class_init,
+};
+
+#ifdef CONFIG_VHOST_SCSI
+static void s390_vhost_scsi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOS390DeviceClass *k = VIRTIO_S390_DEVICE_CLASS(klass);
+
+ k->realize = s390_vhost_scsi_realize;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo s390_vhost_scsi = {
+ .name = TYPE_VHOST_SCSI_S390,
+ .parent = TYPE_VIRTIO_S390_DEVICE,
+ .instance_size = sizeof(VHostSCSIS390),
+ .instance_init = s390_vhost_scsi_instance_init,
+ .class_init = s390_vhost_scsi_class_init,
+};
+#endif
+
+/***************** S390 Virtio Bus Bridge Device *******************/
+/* Only required to have the virtio bus as child in the system bus */
+
+static int s390_virtio_bridge_init(SysBusDevice *dev)
+{
+ /* nothing */
+ return 0;
+}
+
+static void s390_virtio_bridge_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ k->init = s390_virtio_bridge_init;
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+}
+
+static const TypeInfo s390_virtio_bridge_info = {
+ .name = "s390-virtio-bridge",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SysBusDevice),
+ .class_init = s390_virtio_bridge_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ }
+};
+
+/* virtio-s390-bus */
+
+static void virtio_s390_bus_new(VirtioBusState *bus, size_t bus_size,
+ VirtIOS390Device *dev)
+{
+ DeviceState *qdev = DEVICE(dev);
+ char virtio_bus_name[] = "virtio-bus";
+
+ qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_S390_BUS,
+ qdev, virtio_bus_name);
+}
+
+static void virtio_s390_bus_class_init(ObjectClass *klass, void *data)
+{
+ VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
+ BusClass *bus_class = BUS_CLASS(klass);
+ bus_class->max_dev = 1;
+ k->notify = virtio_s390_notify;
+ k->device_plugged = virtio_s390_device_plugged;
+}
+
+static const TypeInfo virtio_s390_bus_info = {
+ .name = TYPE_VIRTIO_S390_BUS,
+ .parent = TYPE_VIRTIO_BUS,
+ .instance_size = sizeof(VirtioS390BusState),
+ .class_init = virtio_s390_bus_class_init,
+};
+
+static void s390_virtio_register_types(void)
+{
+ type_register_static(&virtio_s390_bus_info);
+ type_register_static(&s390_virtio_bus_info);
+ type_register_static(&virtio_s390_device_info);
+ type_register_static(&s390_virtio_serial);
+ type_register_static(&s390_virtio_blk);
+ type_register_static(&s390_virtio_net);
+ type_register_static(&s390_virtio_scsi);
+#ifdef CONFIG_VHOST_SCSI
+ type_register_static(&s390_vhost_scsi);
+#endif
+ type_register_static(&s390_virtio_rng);
+ type_register_static(&s390_virtio_bridge_info);
+}
+
+type_init(s390_virtio_register_types)
diff --git a/src/hw/s390x/s390-virtio-bus.h b/src/hw/s390x/s390-virtio-bus.h
new file mode 100644
index 0000000..7ad295e
--- /dev/null
+++ b/src/hw/s390x/s390-virtio-bus.h
@@ -0,0 +1,186 @@
+/*
+ * QEMU S390x VirtIO BUS definitions
+ *
+ * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef HW_S390_VIRTIO_BUS_H
+#define HW_S390_VIRTIO_BUS_H 1
+
+#include <stddef.h>
+
+#include "standard-headers/asm-s390/kvm_virtio.h"
+#include "standard-headers/linux/virtio_ring.h"
+#include "hw/virtio/virtio-blk.h"
+#include "hw/virtio/virtio-net.h"
+#include "hw/virtio/virtio-rng.h"
+#include "hw/virtio/virtio-serial.h"
+#include "hw/virtio/virtio-scsi.h"
+#include "hw/virtio/virtio-bus.h"
+#ifdef CONFIG_VHOST_SCSI
+#include "hw/virtio/vhost-scsi.h"
+#endif
+
+typedef struct kvm_device_desc KvmDeviceDesc;
+
+#define VIRTIO_DEV_OFFS_TYPE offsetof(KvmDeviceDesc, type)
+#define VIRTIO_DEV_OFFS_NUM_VQ offsetof(KvmDeviceDesc, num_vq)
+#define VIRTIO_DEV_OFFS_FEATURE_LEN offsetof(KvmDeviceDesc, feature_len)
+#define VIRTIO_DEV_OFFS_CONFIG_LEN offsetof(KvmDeviceDesc, config_len)
+#define VIRTIO_DEV_OFFS_STATUS offsetof(KvmDeviceDesc, status)
+#define VIRTIO_DEV_OFFS_CONFIG offsetof(KvmDeviceDesc, config)
+
+typedef struct kvm_vqconfig KvmVqConfig;
+#define VIRTIO_VQCONFIG_OFFS_TOKEN offsetof(KvmVqConfig,token) /* 64 bit */
+#define VIRTIO_VQCONFIG_OFFS_ADDRESS offsetof(KvmVqConfig, address) /* 64 bit */
+#define VIRTIO_VQCONFIG_OFFS_NUM offsetof(KvmVqConfig, num) /* 16 bit */
+#define VIRTIO_VQCONFIG_LEN sizeof(KvmVqConfig)
+
+#define VIRTIO_RING_LEN (TARGET_PAGE_SIZE * 3)
+#define VIRTIO_VRING_AVAIL_IDX_OFFS offsetof(struct vring_avail, idx)
+#define VIRTIO_VRING_USED_IDX_OFFS offsetof(struct vring_used, idx)
+#define S390_DEVICE_PAGES 512
+
+#define TYPE_VIRTIO_S390_DEVICE "virtio-s390-device"
+#define VIRTIO_S390_DEVICE(obj) \
+ OBJECT_CHECK(VirtIOS390Device, (obj), TYPE_VIRTIO_S390_DEVICE)
+#define VIRTIO_S390_DEVICE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtIOS390DeviceClass, (klass), TYPE_VIRTIO_S390_DEVICE)
+#define VIRTIO_S390_DEVICE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtIOS390DeviceClass, (obj), TYPE_VIRTIO_S390_DEVICE)
+
+#define TYPE_S390_VIRTIO_BUS "s390-virtio-bus"
+#define S390_VIRTIO_BUS(obj) \
+ OBJECT_CHECK(VirtIOS390Bus, (obj), TYPE_S390_VIRTIO_BUS)
+
+/* virtio-s390-bus */
+
+typedef struct VirtioBusState VirtioS390BusState;
+typedef struct VirtioBusClass VirtioS390BusClass;
+
+#define TYPE_VIRTIO_S390_BUS "virtio-s390-bus"
+#define VIRTIO_S390_BUS(obj) \
+ OBJECT_CHECK(VirtioS390BusState, (obj), TYPE_VIRTIO_S390_BUS)
+#define VIRTIO_S390_BUS_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtioS390BusClass, obj, TYPE_VIRTIO_S390_BUS)
+#define VIRTIO_S390_BUS_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtioS390BusClass, klass, TYPE_VIRTIO_S390_BUS)
+
+
+typedef struct VirtIOS390Device VirtIOS390Device;
+
+typedef struct VirtIOS390DeviceClass {
+ DeviceClass qdev;
+ void (*realize)(VirtIOS390Device *dev, Error **errp);
+} VirtIOS390DeviceClass;
+
+struct VirtIOS390Device {
+ DeviceState qdev;
+ ram_addr_t dev_offs;
+ ram_addr_t feat_offs;
+ uint8_t feat_len;
+ VirtIODevice *vdev;
+ VirtioBusState bus;
+};
+
+typedef struct VirtIOS390Bus {
+ BusState bus;
+
+ VirtIOS390Device *console;
+ ram_addr_t dev_page;
+ ram_addr_t dev_offs;
+ ram_addr_t next_ring;
+} VirtIOS390Bus;
+
+
+void s390_virtio_device_update_status(VirtIOS390Device *dev);
+
+VirtIOS390Bus *s390_virtio_bus_init(ram_addr_t *ram_size);
+
+VirtIOS390Device *s390_virtio_bus_find_vring(VirtIOS390Bus *bus,
+ ram_addr_t mem, int *vq_num);
+VirtIOS390Device *s390_virtio_bus_find_mem(VirtIOS390Bus *bus, ram_addr_t mem);
+void s390_virtio_device_sync(VirtIOS390Device *dev);
+void s390_virtio_reset_idx(VirtIOS390Device *dev);
+
+/* virtio-blk-s390 */
+
+#define TYPE_VIRTIO_BLK_S390 "virtio-blk-s390"
+#define VIRTIO_BLK_S390(obj) \
+ OBJECT_CHECK(VirtIOBlkS390, (obj), TYPE_VIRTIO_BLK_S390)
+
+typedef struct VirtIOBlkS390 {
+ VirtIOS390Device parent_obj;
+ VirtIOBlock vdev;
+} VirtIOBlkS390;
+
+/* virtio-scsi-s390 */
+
+#define TYPE_VIRTIO_SCSI_S390 "virtio-scsi-s390"
+#define VIRTIO_SCSI_S390(obj) \
+ OBJECT_CHECK(VirtIOSCSIS390, (obj), TYPE_VIRTIO_SCSI_S390)
+
+typedef struct VirtIOSCSIS390 {
+ VirtIOS390Device parent_obj;
+ VirtIOSCSI vdev;
+} VirtIOSCSIS390;
+
+/* virtio-serial-s390 */
+
+#define TYPE_VIRTIO_SERIAL_S390 "virtio-serial-s390"
+#define VIRTIO_SERIAL_S390(obj) \
+ OBJECT_CHECK(VirtIOSerialS390, (obj), TYPE_VIRTIO_SERIAL_S390)
+
+typedef struct VirtIOSerialS390 {
+ VirtIOS390Device parent_obj;
+ VirtIOSerial vdev;
+} VirtIOSerialS390;
+
+/* virtio-net-s390 */
+
+#define TYPE_VIRTIO_NET_S390 "virtio-net-s390"
+#define VIRTIO_NET_S390(obj) \
+ OBJECT_CHECK(VirtIONetS390, (obj), TYPE_VIRTIO_NET_S390)
+
+typedef struct VirtIONetS390 {
+ VirtIOS390Device parent_obj;
+ VirtIONet vdev;
+} VirtIONetS390;
+
+/* vhost-scsi-s390 */
+
+#ifdef CONFIG_VHOST_SCSI
+#define TYPE_VHOST_SCSI_S390 "vhost-scsi-s390"
+#define VHOST_SCSI_S390(obj) \
+ OBJECT_CHECK(VHostSCSIS390, (obj), TYPE_VHOST_SCSI_S390)
+
+typedef struct VHostSCSIS390 {
+ VirtIOS390Device parent_obj;
+ VHostSCSI vdev;
+} VHostSCSIS390;
+#endif
+
+/* virtio-rng-s390 */
+
+#define TYPE_VIRTIO_RNG_S390 "virtio-rng-s390"
+#define VIRTIO_RNG_S390(obj) \
+ OBJECT_CHECK(VirtIORNGS390, (obj), TYPE_VIRTIO_RNG_S390)
+
+typedef struct VirtIORNGS390 {
+ VirtIOS390Device parent_obj;
+ VirtIORNG vdev;
+} VirtIORNGS390;
+
+#endif
diff --git a/src/hw/s390x/s390-virtio-ccw.c b/src/hw/s390x/s390-virtio-ccw.c
new file mode 100644
index 0000000..5a52ff2
--- /dev/null
+++ b/src/hw/s390x/s390-virtio-ccw.c
@@ -0,0 +1,318 @@
+/*
+ * virtio ccw machine
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "hw/boards.h"
+#include "exec/address-spaces.h"
+#include "s390-virtio.h"
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/s390_flic.h"
+#include "ioinst.h"
+#include "css.h"
+#include "virtio-ccw.h"
+#include "qemu/config-file.h"
+#include "s390-pci-bus.h"
+#include "hw/s390x/storage-keys.h"
+#include "hw/compat.h"
+
+#define TYPE_S390_CCW_MACHINE "s390-ccw-machine"
+
+#define S390_CCW_MACHINE(obj) \
+ OBJECT_CHECK(S390CcwMachineState, (obj), TYPE_S390_CCW_MACHINE)
+
+typedef struct S390CcwMachineState {
+ /*< private >*/
+ MachineState parent_obj;
+
+ /*< public >*/
+ bool aes_key_wrap;
+ bool dea_key_wrap;
+} S390CcwMachineState;
+
+static const char *const reset_dev_types[] = {
+ "virtual-css-bridge",
+ "s390-sclp-event-facility",
+ "s390-flic",
+ "diag288",
+};
+
+void subsystem_reset(void)
+{
+ DeviceState *dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reset_dev_types); i++) {
+ dev = DEVICE(object_resolve_path_type("", reset_dev_types[i], NULL));
+ if (dev) {
+ qdev_reset_all(dev);
+ }
+ }
+}
+
+static int virtio_ccw_hcall_notify(const uint64_t *args)
+{
+ uint64_t subch_id = args[0];
+ uint64_t queue = args[1];
+ SubchDev *sch;
+ int cssid, ssid, schid, m;
+
+ if (ioinst_disassemble_sch_ident(subch_id, &m, &cssid, &ssid, &schid)) {
+ return -EINVAL;
+ }
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (!sch || !css_subch_visible(sch)) {
+ return -EINVAL;
+ }
+ if (queue >= VIRTIO_CCW_QUEUE_MAX) {
+ return -EINVAL;
+ }
+ virtio_queue_notify(virtio_ccw_get_vdev(sch), queue);
+ return 0;
+
+}
+
+static int virtio_ccw_hcall_early_printk(const uint64_t *args)
+{
+ uint64_t mem = args[0];
+
+ if (mem < ram_size) {
+ /* Early printk */
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void virtio_ccw_register_hcalls(void)
+{
+ s390_register_virtio_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY,
+ virtio_ccw_hcall_notify);
+ /* Tolerate early printk. */
+ s390_register_virtio_hypercall(KVM_S390_VIRTIO_NOTIFY,
+ virtio_ccw_hcall_early_printk);
+}
+
+void s390_memory_init(ram_addr_t mem_size)
+{
+ MemoryRegion *sysmem = get_system_memory();
+ MemoryRegion *ram = g_new(MemoryRegion, 1);
+
+ /* allocate RAM for core */
+ memory_region_allocate_system_memory(ram, NULL, "s390.ram", mem_size);
+ memory_region_add_subregion(sysmem, 0, ram);
+
+ /* Initialize storage key device */
+ s390_skeys_init();
+}
+
+static void ccw_init(MachineState *machine)
+{
+ int ret;
+ VirtualCssBus *css_bus;
+ DeviceState *dev;
+
+ s390_sclp_init();
+ s390_memory_init(machine->ram_size);
+
+ /* get a BUS */
+ css_bus = virtual_css_bus_init();
+ s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
+ machine->initrd_filename, "s390-ccw.img", true);
+ s390_flic_init();
+
+ dev = qdev_create(NULL, TYPE_S390_PCI_HOST_BRIDGE);
+ object_property_add_child(qdev_get_machine(), TYPE_S390_PCI_HOST_BRIDGE,
+ OBJECT(dev), NULL);
+ qdev_init_nofail(dev);
+
+ /* register hypercalls */
+ virtio_ccw_register_hcalls();
+
+ /* init CPUs */
+ s390_init_cpus(machine->cpu_model);
+
+ if (kvm_enabled()) {
+ kvm_s390_enable_css_support(s390_cpu_addr2state(0));
+ }
+ /*
+ * Create virtual css and set it as default so that non mcss-e
+ * enabled guests only see virtio devices.
+ */
+ ret = css_create_css_image(VIRTUAL_CSSID, true);
+ assert(ret == 0);
+
+ /* Create VirtIO network adapters */
+ s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw");
+
+ /* Register savevm handler for guest TOD clock */
+ register_savevm(NULL, "todclock", 0, 1,
+ gtod_save, gtod_load, kvm_state);
+}
+
+static void ccw_machine_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ NMIClass *nc = NMI_CLASS(oc);
+
+ mc->init = ccw_init;
+ mc->reset = s390_machine_reset;
+ mc->block_default_type = IF_VIRTIO;
+ mc->no_cdrom = 1;
+ mc->no_floppy = 1;
+ mc->no_serial = 1;
+ mc->no_parallel = 1;
+ mc->no_sdcard = 1;
+ mc->use_sclp = 1;
+ mc->max_cpus = 255;
+ nc->nmi_monitor_handler = s390_nmi;
+}
+
+static inline bool machine_get_aes_key_wrap(Object *obj, Error **errp)
+{
+ S390CcwMachineState *ms = S390_CCW_MACHINE(obj);
+
+ return ms->aes_key_wrap;
+}
+
+static inline void machine_set_aes_key_wrap(Object *obj, bool value,
+ Error **errp)
+{
+ S390CcwMachineState *ms = S390_CCW_MACHINE(obj);
+
+ ms->aes_key_wrap = value;
+}
+
+static inline bool machine_get_dea_key_wrap(Object *obj, Error **errp)
+{
+ S390CcwMachineState *ms = S390_CCW_MACHINE(obj);
+
+ return ms->dea_key_wrap;
+}
+
+static inline void machine_set_dea_key_wrap(Object *obj, bool value,
+ Error **errp)
+{
+ S390CcwMachineState *ms = S390_CCW_MACHINE(obj);
+
+ ms->dea_key_wrap = value;
+}
+
+static inline void s390_machine_initfn(Object *obj)
+{
+ object_property_add_bool(obj, "aes-key-wrap",
+ machine_get_aes_key_wrap,
+ machine_set_aes_key_wrap, NULL);
+ object_property_set_description(obj, "aes-key-wrap",
+ "enable/disable AES key wrapping using the CPACF wrapping key",
+ NULL);
+ object_property_set_bool(obj, true, "aes-key-wrap", NULL);
+
+ object_property_add_bool(obj, "dea-key-wrap",
+ machine_get_dea_key_wrap,
+ machine_set_dea_key_wrap, NULL);
+ object_property_set_description(obj, "dea-key-wrap",
+ "enable/disable DEA key wrapping using the CPACF wrapping key",
+ NULL);
+ object_property_set_bool(obj, true, "dea-key-wrap", NULL);
+}
+
+static const TypeInfo ccw_machine_info = {
+ .name = TYPE_S390_CCW_MACHINE,
+ .parent = TYPE_MACHINE,
+ .abstract = true,
+ .instance_size = sizeof(S390CcwMachineState),
+ .instance_init = s390_machine_initfn,
+ .class_init = ccw_machine_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_NMI },
+ { }
+ },
+};
+
+#define CCW_COMPAT_2_4 \
+ HW_COMPAT_2_4 \
+ {\
+ .driver = TYPE_S390_SKEYS,\
+ .property = "migration-enabled",\
+ .value = "off",\
+ },{\
+ .driver = "virtio-blk-ccw",\
+ .property = "max_revision",\
+ .value = "0",\
+ },{\
+ .driver = "virtio-balloon-ccw",\
+ .property = "max_revision",\
+ .value = "0",\
+ },{\
+ .driver = "virtio-serial-ccw",\
+ .property = "max_revision",\
+ .value = "0",\
+ },{\
+ .driver = "virtio-9p-ccw",\
+ .property = "max_revision",\
+ .value = "0",\
+ },{\
+ .driver = "virtio-rng-ccw",\
+ .property = "max_revision",\
+ .value = "0",\
+ },{\
+ .driver = "virtio-net-ccw",\
+ .property = "max_revision",\
+ .value = "0",\
+ },{\
+ .driver = "virtio-scsi-ccw",\
+ .property = "max_revision",\
+ .value = "0",\
+ },{\
+ .driver = "vhost-scsi-ccw",\
+ .property = "max_revision",\
+ .value = "0",\
+ },
+
+static void ccw_machine_2_4_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ static GlobalProperty compat_props[] = {
+ CCW_COMPAT_2_4
+ { /* end of list */ }
+ };
+
+ mc->desc = "VirtIO-ccw based S390 machine v2.4";
+ mc->compat_props = compat_props;
+}
+
+static const TypeInfo ccw_machine_2_4_info = {
+ .name = MACHINE_TYPE_NAME("s390-ccw-virtio-2.4"),
+ .parent = TYPE_S390_CCW_MACHINE,
+ .class_init = ccw_machine_2_4_class_init,
+};
+
+static void ccw_machine_2_5_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+
+ mc->alias = "s390-ccw-virtio";
+ mc->desc = "VirtIO-ccw based S390 machine v2.5";
+ mc->is_default = 1;
+}
+
+static const TypeInfo ccw_machine_2_5_info = {
+ .name = MACHINE_TYPE_NAME("s390-ccw-virtio-2.5"),
+ .parent = TYPE_S390_CCW_MACHINE,
+ .class_init = ccw_machine_2_5_class_init,
+};
+
+static void ccw_machine_register_types(void)
+{
+ type_register_static(&ccw_machine_info);
+ type_register_static(&ccw_machine_2_4_info);
+ type_register_static(&ccw_machine_2_5_info);
+}
+
+type_init(ccw_machine_register_types)
diff --git a/src/hw/s390x/s390-virtio-hcall.c b/src/hw/s390x/s390-virtio-hcall.c
new file mode 100644
index 0000000..c7bdc20
--- /dev/null
+++ b/src/hw/s390x/s390-virtio-hcall.c
@@ -0,0 +1,40 @@
+/*
+ * Support for virtio hypercalls on s390
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "cpu.h"
+#include "hw/s390x/s390-virtio.h"
+
+#define MAX_DIAG_SUBCODES 255
+
+static s390_virtio_fn s390_diag500_table[MAX_DIAG_SUBCODES];
+
+void s390_register_virtio_hypercall(uint64_t code, s390_virtio_fn fn)
+{
+ assert(code < MAX_DIAG_SUBCODES);
+ assert(!s390_diag500_table[code]);
+
+ s390_diag500_table[code] = fn;
+}
+
+int s390_virtio_hypercall(CPUS390XState *env)
+{
+ s390_virtio_fn fn;
+
+ if (env->regs[1] < MAX_DIAG_SUBCODES) {
+ fn = s390_diag500_table[env->regs[1]];
+ if (fn) {
+ env->regs[2] = fn(&env->regs[2]);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/src/hw/s390x/s390-virtio.c b/src/hw/s390x/s390-virtio.c
new file mode 100644
index 0000000..ae55760
--- /dev/null
+++ b/src/hw/s390x/s390-virtio.c
@@ -0,0 +1,373 @@
+/*
+ * QEMU S390 virtio target
+ *
+ * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
+ * Copyright IBM Corp 2012
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * Contributions after 2012-10-29 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ *
+ * You should have received a copy of the GNU (Lesser) General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/hw.h"
+#include "qapi/qmp/qerror.h"
+#include "qemu/error-report.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/blockdev.h"
+#include "sysemu/sysemu.h"
+#include "net/net.h"
+#include "hw/boards.h"
+#include "hw/loader.h"
+#include "hw/virtio/virtio.h"
+#include "sysemu/kvm.h"
+#include "exec/address-spaces.h"
+#include "sysemu/qtest.h"
+
+#include "hw/s390x/s390-virtio-bus.h"
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/s390_flic.h"
+#include "hw/s390x/s390-virtio.h"
+#include "hw/s390x/storage-keys.h"
+#include "hw/s390x/ipl.h"
+#include "cpu.h"
+
+//#define DEBUG_S390
+
+#ifdef DEBUG_S390
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+#define MAX_BLK_DEVS 10
+#define ZIPL_FILENAME "s390-zipl.rom"
+#define S390_MACHINE "s390-virtio"
+#define TYPE_S390_MACHINE MACHINE_TYPE_NAME(S390_MACHINE)
+
+#define S390_TOD_CLOCK_VALUE_MISSING 0x00
+#define S390_TOD_CLOCK_VALUE_PRESENT 0x01
+
+static VirtIOS390Bus *s390_bus;
+static S390CPU **ipi_states;
+
+S390CPU *s390_cpu_addr2state(uint16_t cpu_addr)
+{
+ if (cpu_addr >= smp_cpus) {
+ return NULL;
+ }
+
+ return ipi_states[cpu_addr];
+}
+
+static int s390_virtio_hcall_notify(const uint64_t *args)
+{
+ uint64_t mem = args[0];
+ int r = 0, i;
+
+ if (mem > ram_size) {
+ VirtIOS390Device *dev = s390_virtio_bus_find_vring(s390_bus, mem, &i);
+ if (dev) {
+ /*
+ * Older kernels will use the virtqueue before setting DRIVER_OK.
+ * In this case the feature bits are not yet up to date, meaning
+ * that several funny things can happen, e.g. the guest thinks
+ * EVENT_IDX is on and QEMU thinks it is off. Let's force a feature
+ * and status sync.
+ */
+ if (!(dev->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ s390_virtio_device_update_status(dev);
+ }
+ virtio_queue_notify(dev->vdev, i);
+ } else {
+ r = -EINVAL;
+ }
+ } else {
+ /* Early printk */
+ }
+ return r;
+}
+
+static int s390_virtio_hcall_reset(const uint64_t *args)
+{
+ uint64_t mem = args[0];
+ VirtIOS390Device *dev;
+
+ dev = s390_virtio_bus_find_mem(s390_bus, mem);
+ if (dev == NULL) {
+ return -EINVAL;
+ }
+ virtio_reset(dev->vdev);
+ address_space_stb(&address_space_memory,
+ dev->dev_offs + VIRTIO_DEV_OFFS_STATUS, 0,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ s390_virtio_device_sync(dev);
+ s390_virtio_reset_idx(dev);
+
+ return 0;
+}
+
+static int s390_virtio_hcall_set_status(const uint64_t *args)
+{
+ uint64_t mem = args[0];
+ int r = 0;
+ VirtIOS390Device *dev;
+
+ dev = s390_virtio_bus_find_mem(s390_bus, mem);
+ if (dev) {
+ s390_virtio_device_update_status(dev);
+ } else {
+ r = -EINVAL;
+ }
+ return r;
+}
+
+static void s390_virtio_register_hcalls(void)
+{
+ s390_register_virtio_hypercall(KVM_S390_VIRTIO_NOTIFY,
+ s390_virtio_hcall_notify);
+ s390_register_virtio_hypercall(KVM_S390_VIRTIO_RESET,
+ s390_virtio_hcall_reset);
+ s390_register_virtio_hypercall(KVM_S390_VIRTIO_SET_STATUS,
+ s390_virtio_hcall_set_status);
+}
+
+void s390_init_ipl_dev(const char *kernel_filename,
+ const char *kernel_cmdline,
+ const char *initrd_filename,
+ const char *firmware,
+ bool enforce_bios)
+{
+ Object *new = object_new(TYPE_S390_IPL);
+ DeviceState *dev = DEVICE(new);
+
+ if (kernel_filename) {
+ qdev_prop_set_string(dev, "kernel", kernel_filename);
+ }
+ if (initrd_filename) {
+ qdev_prop_set_string(dev, "initrd", initrd_filename);
+ }
+ qdev_prop_set_string(dev, "cmdline", kernel_cmdline);
+ qdev_prop_set_string(dev, "firmware", firmware);
+ qdev_prop_set_bit(dev, "enforce_bios", enforce_bios);
+ object_property_add_child(qdev_get_machine(), TYPE_S390_IPL,
+ new, NULL);
+ object_unref(new);
+ qdev_init_nofail(dev);
+}
+
+void s390_init_cpus(const char *cpu_model)
+{
+ int i;
+
+ if (cpu_model == NULL) {
+ cpu_model = "host";
+ }
+
+ ipi_states = g_malloc(sizeof(S390CPU *) * smp_cpus);
+
+ for (i = 0; i < smp_cpus; i++) {
+ S390CPU *cpu;
+ CPUState *cs;
+
+ cpu = cpu_s390x_init(cpu_model);
+ cs = CPU(cpu);
+
+ ipi_states[i] = cpu;
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ }
+}
+
+
+void s390_create_virtio_net(BusState *bus, const char *name)
+{
+ int i;
+
+ for (i = 0; i < nb_nics; i++) {
+ NICInfo *nd = &nd_table[i];
+ DeviceState *dev;
+
+ if (!nd->model) {
+ nd->model = g_strdup("virtio");
+ }
+
+ if (strcmp(nd->model, "virtio")) {
+ fprintf(stderr, "S390 only supports VirtIO nics\n");
+ exit(1);
+ }
+
+ dev = qdev_create(bus, name);
+ qdev_set_nic_properties(dev, nd);
+ qdev_init_nofail(dev);
+ }
+}
+
+void gtod_save(QEMUFile *f, void *opaque)
+{
+ uint64_t tod_low;
+ uint8_t tod_high;
+ int r;
+
+ r = s390_get_clock(&tod_high, &tod_low);
+ if (r) {
+ fprintf(stderr, "WARNING: Unable to get guest clock for migration. "
+ "Error code %d. Guest clock will not be migrated "
+ "which could cause the guest to hang.\n", r);
+ qemu_put_byte(f, S390_TOD_CLOCK_VALUE_MISSING);
+ return;
+ }
+
+ qemu_put_byte(f, S390_TOD_CLOCK_VALUE_PRESENT);
+ qemu_put_byte(f, tod_high);
+ qemu_put_be64(f, tod_low);
+}
+
+int gtod_load(QEMUFile *f, void *opaque, int version_id)
+{
+ uint64_t tod_low;
+ uint8_t tod_high;
+ int r;
+
+ if (qemu_get_byte(f) == S390_TOD_CLOCK_VALUE_MISSING) {
+ fprintf(stderr, "WARNING: Guest clock was not migrated. This could "
+ "cause the guest to hang.\n");
+ return 0;
+ }
+
+ tod_high = qemu_get_byte(f);
+ tod_low = qemu_get_be64(f);
+
+ r = s390_set_clock(&tod_high, &tod_low);
+ if (r) {
+ fprintf(stderr, "WARNING: Unable to set guest clock value. "
+ "s390_get_clock returned error %d. This could cause "
+ "the guest to hang.\n", r);
+ }
+
+ return 0;
+}
+
+/* PC hardware initialisation */
+static void s390_init(MachineState *machine)
+{
+ ram_addr_t my_ram_size;
+ void *virtio_region;
+ hwaddr virtio_region_len;
+ hwaddr virtio_region_start;
+
+ if (!qtest_enabled()) {
+ error_printf("WARNING\n"
+ "The s390-virtio machine (non-ccw) is deprecated.\n"
+ "It will be removed in 2.6. Please use s390-ccw-virtio\n");
+ }
+
+ if (machine->ram_slots) {
+ error_report("Memory hotplug not supported by the selected machine.");
+ exit(EXIT_FAILURE);
+ }
+ s390_sclp_init();
+ my_ram_size = machine->ram_size;
+
+ /* get a BUS */
+ s390_bus = s390_virtio_bus_init(&my_ram_size);
+ s390_init_ipl_dev(machine->kernel_filename, machine->kernel_cmdline,
+ machine->initrd_filename, ZIPL_FILENAME, false);
+ s390_flic_init();
+
+ /* register hypercalls */
+ s390_virtio_register_hcalls();
+
+ /* allocate RAM */
+ s390_memory_init(my_ram_size);
+
+ /* clear virtio region */
+ virtio_region_len = my_ram_size - ram_size;
+ virtio_region_start = ram_size;
+ virtio_region = cpu_physical_memory_map(virtio_region_start,
+ &virtio_region_len, true);
+ memset(virtio_region, 0, virtio_region_len);
+ cpu_physical_memory_unmap(virtio_region, virtio_region_len, 1,
+ virtio_region_len);
+
+ /* init CPUs */
+ s390_init_cpus(machine->cpu_model);
+
+ /* Create VirtIO network adapters */
+ s390_create_virtio_net((BusState *)s390_bus, "virtio-net-s390");
+
+ /* Register savevm handler for guest TOD clock */
+ register_savevm(NULL, "todclock", 0, 1, gtod_save, gtod_load, NULL);
+}
+
+void s390_nmi(NMIState *n, int cpu_index, Error **errp)
+{
+ CPUState *cs = qemu_get_cpu(cpu_index);
+
+ if (s390_cpu_restart(S390_CPU(cs))) {
+ error_setg(errp, QERR_UNSUPPORTED);
+ }
+}
+
+void s390_machine_reset(void)
+{
+ S390CPU *ipl_cpu = S390_CPU(qemu_get_cpu(0));
+
+ qemu_devices_reset();
+ s390_cmma_reset();
+ s390_crypto_reset();
+
+ /* all cpus are stopped - configure and start the ipl cpu only */
+ s390_ipl_prepare_cpu(ipl_cpu);
+ s390_cpu_set_state(CPU_STATE_OPERATING, ipl_cpu);
+}
+
+static void s390_machine_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ NMIClass *nc = NMI_CLASS(oc);
+
+ mc->alias = "s390";
+ mc->desc = "VirtIO based S390 machine (deprecated)";
+ mc->init = s390_init;
+ mc->reset = s390_machine_reset;
+ mc->block_default_type = IF_VIRTIO;
+ mc->max_cpus = 255;
+ mc->no_serial = 1;
+ mc->no_parallel = 1;
+ mc->use_virtcon = 1;
+ mc->no_floppy = 1;
+ mc->no_cdrom = 1;
+ mc->no_sdcard = 1;
+ nc->nmi_monitor_handler = s390_nmi;
+}
+
+static const TypeInfo s390_machine_info = {
+ .name = TYPE_S390_MACHINE,
+ .parent = TYPE_MACHINE,
+ .class_init = s390_machine_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_NMI },
+ { }
+ },
+};
+
+static void s390_machine_register_types(void)
+{
+ type_register_static(&s390_machine_info);
+}
+
+type_init(s390_machine_register_types)
diff --git a/src/hw/s390x/s390-virtio.h b/src/hw/s390x/s390-virtio.h
new file mode 100644
index 0000000..eebce8e
--- /dev/null
+++ b/src/hw/s390x/s390-virtio.h
@@ -0,0 +1,32 @@
+/*
+ * Virtio interfaces for s390
+ *
+ * Copyright 2012 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390_VIRTIO_H
+#define HW_S390_VIRTIO_H 1
+
+#include "hw/nmi.h"
+#include "standard-headers/asm-s390/kvm_virtio.h"
+#include "standard-headers/asm-s390/virtio-ccw.h"
+
+typedef int (*s390_virtio_fn)(const uint64_t *args);
+void s390_register_virtio_hypercall(uint64_t code, s390_virtio_fn fn);
+
+void s390_init_cpus(const char *cpu_model);
+void s390_init_ipl_dev(const char *kernel_filename,
+ const char *kernel_cmdline,
+ const char *initrd_filename,
+ const char *firmware,
+ bool enforce_bios);
+void s390_create_virtio_net(BusState *bus, const char *name);
+void s390_nmi(NMIState *n, int cpu_index, Error **errp);
+void s390_machine_reset(void);
+void s390_memory_init(ram_addr_t mem_size);
+#endif
diff --git a/src/hw/s390x/sclp.c b/src/hw/s390x/sclp.c
new file mode 100644
index 0000000..a061b49
--- /dev/null
+++ b/src/hw/s390x/sclp.c
@@ -0,0 +1,615 @@
+/*
+ * SCLP Support
+ *
+ * Copyright IBM, Corp. 2012
+ *
+ * Authors:
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ * Heinz Graalfs <graalfs@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "exec/memory.h"
+#include "sysemu/sysemu.h"
+#include "exec/address-spaces.h"
+#include "hw/boards.h"
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/event-facility.h"
+#include "hw/s390x/s390-pci-bus.h"
+
+static inline SCLPDevice *get_sclp_device(void)
+{
+ return SCLP(object_resolve_path_type("", TYPE_SCLP, NULL));
+}
+
+/* Provide information about the configuration, CPUs and storage */
+static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb)
+{
+ ReadInfo *read_info = (ReadInfo *) sccb;
+ MachineState *machine = MACHINE(qdev_get_machine());
+ sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
+ CPUState *cpu;
+ int cpu_count = 0;
+ int i = 0;
+ int rnsize, rnmax;
+ int slots = MIN(machine->ram_slots, s390_get_memslot_count(kvm_state));
+
+ CPU_FOREACH(cpu) {
+ cpu_count++;
+ }
+
+ /* CPU information */
+ read_info->entries_cpu = cpu_to_be16(cpu_count);
+ read_info->offset_cpu = cpu_to_be16(offsetof(ReadInfo, entries));
+ read_info->highest_cpu = cpu_to_be16(max_cpus);
+
+ for (i = 0; i < cpu_count; i++) {
+ read_info->entries[i].address = i;
+ read_info->entries[i].type = 0;
+ }
+
+ read_info->facilities = cpu_to_be64(SCLP_HAS_CPU_INFO |
+ SCLP_HAS_PCI_RECONFIG);
+
+ /* Memory Hotplug is only supported for the ccw machine type */
+ if (mhd) {
+ mhd->standby_subregion_size = MEM_SECTION_SIZE;
+ /* Deduct the memory slot already used for core */
+ if (slots > 0) {
+ while ((mhd->standby_subregion_size * (slots - 1)
+ < mhd->standby_mem_size)) {
+ mhd->standby_subregion_size = mhd->standby_subregion_size << 1;
+ }
+ }
+ /*
+ * Initialize mapping of guest standby memory sections indicating which
+ * are and are not online. Assume all standby memory begins offline.
+ */
+ if (mhd->standby_state_map == 0) {
+ if (mhd->standby_mem_size % mhd->standby_subregion_size) {
+ mhd->standby_state_map = g_malloc0((mhd->standby_mem_size /
+ mhd->standby_subregion_size + 1) *
+ (mhd->standby_subregion_size /
+ MEM_SECTION_SIZE));
+ } else {
+ mhd->standby_state_map = g_malloc0(mhd->standby_mem_size /
+ MEM_SECTION_SIZE);
+ }
+ }
+ mhd->padded_ram_size = ram_size + mhd->pad_size;
+ mhd->rzm = 1 << mhd->increment_size;
+
+ read_info->facilities |= cpu_to_be64(SCLP_FC_ASSIGN_ATTACH_READ_STOR);
+ }
+
+ rnsize = 1 << (sclp->increment_size - 20);
+ if (rnsize <= 128) {
+ read_info->rnsize = rnsize;
+ } else {
+ read_info->rnsize = 0;
+ read_info->rnsize2 = cpu_to_be32(rnsize);
+ }
+
+ rnmax = machine->maxram_size >> sclp->increment_size;
+ if (rnmax < 0x10000) {
+ read_info->rnmax = cpu_to_be16(rnmax);
+ } else {
+ read_info->rnmax = cpu_to_be16(0);
+ read_info->rnmax2 = cpu_to_be64(rnmax);
+ }
+
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
+}
+
+static void read_storage_element0_info(SCLPDevice *sclp, SCCB *sccb)
+{
+ int i, assigned;
+ int subincrement_id = SCLP_STARTING_SUBINCREMENT_ID;
+ ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
+ sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
+
+ if (!mhd) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
+ return;
+ }
+
+ if ((ram_size >> mhd->increment_size) >= 0x10000) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
+ return;
+ }
+
+ /* Return information regarding core memory */
+ storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
+ assigned = ram_size >> mhd->increment_size;
+ storage_info->assigned = cpu_to_be16(assigned);
+
+ for (i = 0; i < assigned; i++) {
+ storage_info->entries[i] = cpu_to_be32(subincrement_id);
+ subincrement_id += SCLP_INCREMENT_UNIT;
+ }
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
+}
+
+static void read_storage_element1_info(SCLPDevice *sclp, SCCB *sccb)
+{
+ ReadStorageElementInfo *storage_info = (ReadStorageElementInfo *) sccb;
+ sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
+
+ if (!mhd) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
+ return;
+ }
+
+ if ((mhd->standby_mem_size >> mhd->increment_size) >= 0x10000) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_SCCB_BOUNDARY_VIOLATION);
+ return;
+ }
+
+ /* Return information regarding standby memory */
+ storage_info->max_id = cpu_to_be16(mhd->standby_mem_size ? 1 : 0);
+ storage_info->assigned = cpu_to_be16(mhd->standby_mem_size >>
+ mhd->increment_size);
+ storage_info->standby = cpu_to_be16(mhd->standby_mem_size >>
+ mhd->increment_size);
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_STANDBY_READ_COMPLETION);
+}
+
+static void attach_storage_element(SCLPDevice *sclp, SCCB *sccb,
+ uint16_t element)
+{
+ int i, assigned, subincrement_id;
+ AttachStorageElement *attach_info = (AttachStorageElement *) sccb;
+ sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
+
+ if (!mhd) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
+ return;
+ }
+
+ if (element != 1) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
+ return;
+ }
+
+ assigned = mhd->standby_mem_size >> mhd->increment_size;
+ attach_info->assigned = cpu_to_be16(assigned);
+ subincrement_id = ((ram_size >> mhd->increment_size) << 16)
+ + SCLP_STARTING_SUBINCREMENT_ID;
+ for (i = 0; i < assigned; i++) {
+ attach_info->entries[i] = cpu_to_be32(subincrement_id);
+ subincrement_id += SCLP_INCREMENT_UNIT;
+ }
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
+}
+
+static void assign_storage(SCLPDevice *sclp, SCCB *sccb)
+{
+ MemoryRegion *mr = NULL;
+ uint64_t this_subregion_size;
+ AssignStorage *assign_info = (AssignStorage *) sccb;
+ sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
+ ram_addr_t assign_addr;
+ MemoryRegion *sysmem = get_system_memory();
+
+ if (!mhd) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
+ return;
+ }
+ assign_addr = (assign_info->rn - 1) * mhd->rzm;
+
+ if ((assign_addr % MEM_SECTION_SIZE == 0) &&
+ (assign_addr >= mhd->padded_ram_size)) {
+ /* Re-use existing memory region if found */
+ mr = memory_region_find(sysmem, assign_addr, 1).mr;
+ memory_region_unref(mr);
+ if (!mr) {
+
+ MemoryRegion *standby_ram = g_new(MemoryRegion, 1);
+
+ /* offset to align to standby_subregion_size for allocation */
+ ram_addr_t offset = assign_addr -
+ (assign_addr - mhd->padded_ram_size)
+ % mhd->standby_subregion_size;
+
+ /* strlen("standby.ram") + 4 (Max of KVM_MEMORY_SLOTS) + NULL */
+ char id[16];
+ snprintf(id, 16, "standby.ram%d",
+ (int)((offset - mhd->padded_ram_size) /
+ mhd->standby_subregion_size) + 1);
+
+ /* Allocate a subregion of the calculated standby_subregion_size */
+ if (offset + mhd->standby_subregion_size >
+ mhd->padded_ram_size + mhd->standby_mem_size) {
+ this_subregion_size = mhd->padded_ram_size +
+ mhd->standby_mem_size - offset;
+ } else {
+ this_subregion_size = mhd->standby_subregion_size;
+ }
+
+ memory_region_init_ram(standby_ram, NULL, id, this_subregion_size,
+ &error_fatal);
+ /* This is a hack to make memory hotunplug work again. Once we have
+ * subdevices, we have to unparent them when unassigning memory,
+ * instead of doing it via the ref count of the MemoryRegion. */
+ object_ref(OBJECT(standby_ram));
+ object_unparent(OBJECT(standby_ram));
+ vmstate_register_ram_global(standby_ram);
+ memory_region_add_subregion(sysmem, offset, standby_ram);
+ }
+ /* The specified subregion is no longer in standby */
+ mhd->standby_state_map[(assign_addr - mhd->padded_ram_size)
+ / MEM_SECTION_SIZE] = 1;
+ }
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
+}
+
+static void unassign_storage(SCLPDevice *sclp, SCCB *sccb)
+{
+ MemoryRegion *mr = NULL;
+ AssignStorage *assign_info = (AssignStorage *) sccb;
+ sclpMemoryHotplugDev *mhd = get_sclp_memory_hotplug_dev();
+ ram_addr_t unassign_addr;
+ MemoryRegion *sysmem = get_system_memory();
+
+ if (!mhd) {
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
+ return;
+ }
+ unassign_addr = (assign_info->rn - 1) * mhd->rzm;
+
+ /* if the addr is a multiple of 256 MB */
+ if ((unassign_addr % MEM_SECTION_SIZE == 0) &&
+ (unassign_addr >= mhd->padded_ram_size)) {
+ mhd->standby_state_map[(unassign_addr -
+ mhd->padded_ram_size) / MEM_SECTION_SIZE] = 0;
+
+ /* find the specified memory region and destroy it */
+ mr = memory_region_find(sysmem, unassign_addr, 1).mr;
+ memory_region_unref(mr);
+ if (mr) {
+ int i;
+ int is_removable = 1;
+ ram_addr_t map_offset = (unassign_addr - mhd->padded_ram_size -
+ (unassign_addr - mhd->padded_ram_size)
+ % mhd->standby_subregion_size);
+ /* Mark all affected subregions as 'standby' once again */
+ for (i = 0;
+ i < (mhd->standby_subregion_size / MEM_SECTION_SIZE);
+ i++) {
+
+ if (mhd->standby_state_map[i + map_offset / MEM_SECTION_SIZE]) {
+ is_removable = 0;
+ break;
+ }
+ }
+ if (is_removable) {
+ memory_region_del_subregion(sysmem, mr);
+ object_unref(OBJECT(mr));
+ }
+ }
+ }
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_COMPLETION);
+}
+
+/* Provide information about the CPU */
+static void sclp_read_cpu_info(SCLPDevice *sclp, SCCB *sccb)
+{
+ ReadCpuInfo *cpu_info = (ReadCpuInfo *) sccb;
+ CPUState *cpu;
+ int cpu_count = 0;
+ int i = 0;
+
+ CPU_FOREACH(cpu) {
+ cpu_count++;
+ }
+
+ cpu_info->nr_configured = cpu_to_be16(cpu_count);
+ cpu_info->offset_configured = cpu_to_be16(offsetof(ReadCpuInfo, entries));
+ cpu_info->nr_standby = cpu_to_be16(0);
+
+ /* The standby offset is 16-byte for each CPU */
+ cpu_info->offset_standby = cpu_to_be16(cpu_info->offset_configured
+ + cpu_info->nr_configured*sizeof(CPUEntry));
+
+ for (i = 0; i < cpu_count; i++) {
+ cpu_info->entries[i].address = i;
+ cpu_info->entries[i].type = 0;
+ }
+
+ sccb->h.response_code = cpu_to_be16(SCLP_RC_NORMAL_READ_COMPLETION);
+}
+
+static void sclp_execute(SCLPDevice *sclp, SCCB *sccb, uint32_t code)
+{
+ SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
+ SCLPEventFacility *ef = sclp->event_facility;
+ SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
+
+ switch (code & SCLP_CMD_CODE_MASK) {
+ case SCLP_CMDW_READ_SCP_INFO:
+ case SCLP_CMDW_READ_SCP_INFO_FORCED:
+ sclp_c->read_SCP_info(sclp, sccb);
+ break;
+ case SCLP_CMDW_READ_CPU_INFO:
+ sclp_c->read_cpu_info(sclp, sccb);
+ break;
+ case SCLP_READ_STORAGE_ELEMENT_INFO:
+ if (code & 0xff00) {
+ sclp_c->read_storage_element1_info(sclp, sccb);
+ } else {
+ sclp_c->read_storage_element0_info(sclp, sccb);
+ }
+ break;
+ case SCLP_ATTACH_STORAGE_ELEMENT:
+ sclp_c->attach_storage_element(sclp, sccb, (code & 0xff00) >> 8);
+ break;
+ case SCLP_ASSIGN_STORAGE:
+ sclp_c->assign_storage(sclp, sccb);
+ break;
+ case SCLP_UNASSIGN_STORAGE:
+ sclp_c->unassign_storage(sclp, sccb);
+ break;
+ case SCLP_CMDW_CONFIGURE_PCI:
+ s390_pci_sclp_configure(1, sccb);
+ break;
+ case SCLP_CMDW_DECONFIGURE_PCI:
+ s390_pci_sclp_configure(0, sccb);
+ break;
+ default:
+ efc->command_handler(ef, sccb, code);
+ break;
+ }
+}
+
+int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code)
+{
+ SCLPDevice *sclp = get_sclp_device();
+ SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
+ int r = 0;
+ SCCB work_sccb;
+
+ hwaddr sccb_len = sizeof(SCCB);
+
+ /* first some basic checks on program checks */
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ r = -PGM_PRIVILEGED;
+ goto out;
+ }
+ if (cpu_physical_memory_is_io(sccb)) {
+ r = -PGM_ADDRESSING;
+ goto out;
+ }
+ if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa
+ || (sccb & ~0x7ffffff8UL) != 0) {
+ r = -PGM_SPECIFICATION;
+ goto out;
+ }
+
+ /*
+ * we want to work on a private copy of the sccb, to prevent guests
+ * from playing dirty tricks by modifying the memory content after
+ * the host has checked the values
+ */
+ cpu_physical_memory_read(sccb, &work_sccb, sccb_len);
+
+ /* Valid sccb sizes */
+ if (be16_to_cpu(work_sccb.h.length) < sizeof(SCCBHeader) ||
+ be16_to_cpu(work_sccb.h.length) > SCCB_SIZE) {
+ r = -PGM_SPECIFICATION;
+ goto out;
+ }
+
+ sclp_c->execute(sclp, (SCCB *)&work_sccb, code);
+
+ cpu_physical_memory_write(sccb, &work_sccb,
+ be16_to_cpu(work_sccb.h.length));
+
+ sclp_c->service_interrupt(sclp, sccb);
+
+out:
+ return r;
+}
+
+static void service_interrupt(SCLPDevice *sclp, uint32_t sccb)
+{
+ SCLPEventFacility *ef = sclp->event_facility;
+ SCLPEventFacilityClass *efc = EVENT_FACILITY_GET_CLASS(ef);
+
+ uint32_t param = sccb & ~3;
+
+ /* Indicate whether an event is still pending */
+ param |= efc->event_pending(ef) ? 1 : 0;
+
+ if (!param) {
+ /* No need to send an interrupt, there's nothing to be notified about */
+ return;
+ }
+ s390_sclp_extint(param);
+}
+
+void sclp_service_interrupt(uint32_t sccb)
+{
+ SCLPDevice *sclp = get_sclp_device();
+ SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
+
+ sclp_c->service_interrupt(sclp, sccb);
+}
+
+/* qemu object creation and initialization functions */
+
+void s390_sclp_init(void)
+{
+ Object *new = object_new(TYPE_SCLP);
+
+ object_property_add_child(qdev_get_machine(), TYPE_SCLP, new,
+ NULL);
+ object_unref(OBJECT(new));
+ qdev_init_nofail(DEVICE(new));
+}
+
+static void sclp_realize(DeviceState *dev, Error **errp)
+{
+ MachineState *machine = MACHINE(qdev_get_machine());
+ SCLPDevice *sclp = SCLP(dev);
+ Error *l_err = NULL;
+ uint64_t hw_limit;
+ int ret;
+
+ object_property_set_bool(OBJECT(sclp->event_facility), true, "realized",
+ &l_err);
+ if (l_err) {
+ goto error;
+ }
+
+ ret = s390_set_memory_limit(machine->maxram_size, &hw_limit);
+ if (ret == -E2BIG) {
+ error_setg(&l_err, "qemu: host supports a maximum of %" PRIu64 " GB",
+ hw_limit >> 30);
+ goto error;
+ } else if (ret) {
+ error_setg(&l_err, "qemu: setting the guest size failed");
+ goto error;
+ }
+ return;
+error:
+ assert(l_err);
+ error_propagate(errp, l_err);
+}
+
+static void sclp_memory_init(SCLPDevice *sclp)
+{
+ MachineState *machine = MACHINE(qdev_get_machine());
+ ram_addr_t initial_mem = machine->ram_size;
+ ram_addr_t max_mem = machine->maxram_size;
+ ram_addr_t standby_mem = max_mem - initial_mem;
+ ram_addr_t pad_mem = 0;
+ int increment_size = 20;
+
+ /* The storage increment size is a multiple of 1M and is a power of 2.
+ * The number of storage increments must be MAX_STORAGE_INCREMENTS or fewer.
+ * The variable 'increment_size' is an exponent of 2 that can be
+ * used to calculate the size (in bytes) of an increment. */
+ while ((initial_mem >> increment_size) > MAX_STORAGE_INCREMENTS) {
+ increment_size++;
+ }
+ if (machine->ram_slots) {
+ while ((standby_mem >> increment_size) > MAX_STORAGE_INCREMENTS) {
+ increment_size++;
+ }
+ }
+ sclp->increment_size = increment_size;
+
+ /* The core and standby memory areas need to be aligned with
+ * the increment size. In effect, this can cause the
+ * user-specified memory size to be rounded down to align
+ * with the nearest increment boundary. */
+ initial_mem = initial_mem >> increment_size << increment_size;
+ standby_mem = standby_mem >> increment_size << increment_size;
+
+ /* If the size of ram is not on a MEM_SECTION_SIZE boundary,
+ calculate the pad size necessary to force this boundary. */
+ if (machine->ram_slots && standby_mem) {
+ sclpMemoryHotplugDev *mhd = init_sclp_memory_hotplug_dev();
+
+ if (initial_mem % MEM_SECTION_SIZE) {
+ pad_mem = MEM_SECTION_SIZE - initial_mem % MEM_SECTION_SIZE;
+ }
+ mhd->increment_size = increment_size;
+ mhd->pad_size = pad_mem;
+ mhd->standby_mem_size = standby_mem;
+ }
+ machine->ram_size = initial_mem;
+ machine->maxram_size = initial_mem + pad_mem + standby_mem;
+ /* let's propagate the changed ram size into the global variable. */
+ ram_size = initial_mem;
+}
+
+static void sclp_init(Object *obj)
+{
+ SCLPDevice *sclp = SCLP(obj);
+ Object *new;
+
+ new = object_new(TYPE_SCLP_EVENT_FACILITY);
+ object_property_add_child(obj, TYPE_SCLP_EVENT_FACILITY, new, NULL);
+ /* qdev_device_add searches the sysbus for TYPE_SCLP_EVENTS_BUS */
+ qdev_set_parent_bus(DEVICE(new), sysbus_get_default());
+ object_unref(new);
+ sclp->event_facility = EVENT_FACILITY(new);
+
+ sclp_memory_init(sclp);
+}
+
+static void sclp_class_init(ObjectClass *oc, void *data)
+{
+ SCLPDeviceClass *sc = SCLP_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ dc->desc = "SCLP (Service-Call Logical Processor)";
+ dc->realize = sclp_realize;
+ dc->hotpluggable = false;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+
+ sc->read_SCP_info = read_SCP_info;
+ sc->read_storage_element0_info = read_storage_element0_info;
+ sc->read_storage_element1_info = read_storage_element1_info;
+ sc->attach_storage_element = attach_storage_element;
+ sc->assign_storage = assign_storage;
+ sc->unassign_storage = unassign_storage;
+ sc->read_cpu_info = sclp_read_cpu_info;
+ sc->execute = sclp_execute;
+ sc->service_interrupt = service_interrupt;
+}
+
+static TypeInfo sclp_info = {
+ .name = TYPE_SCLP,
+ .parent = TYPE_DEVICE,
+ .instance_init = sclp_init,
+ .instance_size = sizeof(SCLPDevice),
+ .class_init = sclp_class_init,
+ .class_size = sizeof(SCLPDeviceClass),
+};
+
+sclpMemoryHotplugDev *init_sclp_memory_hotplug_dev(void)
+{
+ DeviceState *dev;
+ dev = qdev_create(NULL, TYPE_SCLP_MEMORY_HOTPLUG_DEV);
+ object_property_add_child(qdev_get_machine(),
+ TYPE_SCLP_MEMORY_HOTPLUG_DEV,
+ OBJECT(dev), NULL);
+ qdev_init_nofail(dev);
+ return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
+ TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
+}
+
+sclpMemoryHotplugDev *get_sclp_memory_hotplug_dev(void)
+{
+ return SCLP_MEMORY_HOTPLUG_DEV(object_resolve_path(
+ TYPE_SCLP_MEMORY_HOTPLUG_DEV, NULL));
+}
+
+static void sclp_memory_hotplug_dev_class_init(ObjectClass *klass,
+ void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static TypeInfo sclp_memory_hotplug_dev_info = {
+ .name = TYPE_SCLP_MEMORY_HOTPLUG_DEV,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(sclpMemoryHotplugDev),
+ .class_init = sclp_memory_hotplug_dev_class_init,
+};
+
+static void register_types(void)
+{
+ type_register_static(&sclp_memory_hotplug_dev_info);
+ type_register_static(&sclp_info);
+}
+type_init(register_types);
diff --git a/src/hw/s390x/sclpcpu.c b/src/hw/s390x/sclpcpu.c
new file mode 100644
index 0000000..322eb31
--- /dev/null
+++ b/src/hw/s390x/sclpcpu.c
@@ -0,0 +1,100 @@
+/*
+ * SCLP event type
+ * Signal CPU - Trigger SCLP interrupt for system CPU configure or
+ * de-configure
+ *
+ * Copyright IBM, Corp. 2013
+ *
+ * Authors:
+ * Thang Pham <thang.pham@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+#include "sysemu/sysemu.h"
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/event-facility.h"
+#include "cpu.h"
+#include "sysemu/cpus.h"
+#include "sysemu/kvm.h"
+
+typedef struct ConfigMgtData {
+ EventBufferHeader ebh;
+ uint8_t reserved;
+ uint8_t event_qualifier;
+} QEMU_PACKED ConfigMgtData;
+
+#define EVENT_QUAL_CPU_CHANGE 1
+
+void raise_irq_cpu_hotplug(void)
+{
+ Object *obj = object_resolve_path_type("", TYPE_SCLP_CPU_HOTPLUG, NULL);
+
+ SCLP_EVENT(obj)->event_pending = true;
+
+ /* Trigger SCLP read operation */
+ sclp_service_interrupt(0);
+}
+
+static unsigned int send_mask(void)
+{
+ return SCLP_EVENT_MASK_CONFIG_MGT_DATA;
+}
+
+static unsigned int receive_mask(void)
+{
+ return 0;
+}
+
+static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
+ int *slen)
+{
+ ConfigMgtData *cdata = (ConfigMgtData *) evt_buf_hdr;
+ if (*slen < sizeof(ConfigMgtData)) {
+ return 0;
+ }
+
+ /* Event is no longer pending */
+ if (!event->event_pending) {
+ return 0;
+ }
+ event->event_pending = false;
+
+ /* Event header data */
+ cdata->ebh.length = cpu_to_be16(sizeof(ConfigMgtData));
+ cdata->ebh.type = SCLP_EVENT_CONFIG_MGT_DATA;
+ cdata->ebh.flags |= SCLP_EVENT_BUFFER_ACCEPTED;
+
+ /* Trigger a rescan of CPUs by setting event qualifier */
+ cdata->event_qualifier = EVENT_QUAL_CPU_CHANGE;
+ *slen -= sizeof(ConfigMgtData);
+
+ return 1;
+}
+
+static void cpu_class_init(ObjectClass *oc, void *data)
+{
+ SCLPEventClass *k = SCLP_EVENT_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ k->get_send_mask = send_mask;
+ k->get_receive_mask = receive_mask;
+ k->read_event_data = read_event_data;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo sclp_cpu_info = {
+ .name = TYPE_SCLP_CPU_HOTPLUG,
+ .parent = TYPE_SCLP_EVENT,
+ .instance_size = sizeof(SCLPEvent),
+ .class_init = cpu_class_init,
+ .class_size = sizeof(SCLPEventClass),
+};
+
+static void sclp_cpu_register_types(void)
+{
+ type_register_static(&sclp_cpu_info);
+}
+
+type_init(sclp_cpu_register_types)
diff --git a/src/hw/s390x/sclpquiesce.c b/src/hw/s390x/sclpquiesce.c
new file mode 100644
index 0000000..15b06e1
--- /dev/null
+++ b/src/hw/s390x/sclpquiesce.c
@@ -0,0 +1,142 @@
+/*
+ * SCLP event type
+ * Signal Quiesce - trigger system powerdown request
+ *
+ * Copyright IBM, Corp. 2012
+ *
+ * Authors:
+ * Heinz Graalfs <graalfs@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ *
+ */
+#include <hw/qdev.h>
+#include "sysemu/sysemu.h"
+#include "hw/s390x/sclp.h"
+#include "hw/s390x/event-facility.h"
+
+typedef struct SignalQuiesce {
+ EventBufferHeader ebh;
+ uint16_t timeout;
+ uint8_t unit;
+} QEMU_PACKED SignalQuiesce;
+
+static bool can_handle_event(uint8_t type)
+{
+ return type == SCLP_EVENT_SIGNAL_QUIESCE;
+}
+
+static unsigned int send_mask(void)
+{
+ return SCLP_EVENT_MASK_SIGNAL_QUIESCE;
+}
+
+static unsigned int receive_mask(void)
+{
+ return 0;
+}
+
+static int read_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr,
+ int *slen)
+{
+ SignalQuiesce *sq = (SignalQuiesce *) evt_buf_hdr;
+
+ if (*slen < sizeof(SignalQuiesce)) {
+ return 0;
+ }
+
+ if (!event->event_pending) {
+ return 0;
+ }
+ event->event_pending = false;
+
+ sq->ebh.length = cpu_to_be16(sizeof(SignalQuiesce));
+ sq->ebh.type = SCLP_EVENT_SIGNAL_QUIESCE;
+ sq->ebh.flags |= SCLP_EVENT_BUFFER_ACCEPTED;
+ /*
+ * system_powerdown does not have a timeout. Fortunately the
+ * timeout value is currently ignored by Linux, anyway
+ */
+ sq->timeout = cpu_to_be16(0);
+ sq->unit = cpu_to_be16(0);
+ *slen -= sizeof(SignalQuiesce);
+
+ return 1;
+}
+
+static const VMStateDescription vmstate_sclpquiesce = {
+ .name = TYPE_SCLP_QUIESCE,
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(event_pending, SCLPEvent),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+typedef struct QuiesceNotifier QuiesceNotifier;
+
+static struct QuiesceNotifier {
+ Notifier notifier;
+ SCLPEvent *event;
+} qn;
+
+static void quiesce_powerdown_req(Notifier *n, void *opaque)
+{
+ QuiesceNotifier *qn = container_of(n, QuiesceNotifier, notifier);
+ SCLPEvent *event = qn->event;
+
+ event->event_pending = true;
+ /* trigger SCLP read operation */
+ sclp_service_interrupt(0);
+}
+
+static int quiesce_init(SCLPEvent *event)
+{
+ qn.notifier.notify = quiesce_powerdown_req;
+ qn.event = event;
+
+ qemu_register_powerdown_notifier(&qn.notifier);
+
+ return 0;
+}
+
+static void quiesce_reset(DeviceState *dev)
+{
+ SCLPEvent *event = SCLP_EVENT(dev);
+
+ event->event_pending = false;
+}
+
+static void quiesce_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SCLPEventClass *k = SCLP_EVENT_CLASS(klass);
+
+ dc->reset = quiesce_reset;
+ dc->vmsd = &vmstate_sclpquiesce;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ k->init = quiesce_init;
+
+ k->get_send_mask = send_mask;
+ k->get_receive_mask = receive_mask;
+ k->can_handle_event = can_handle_event;
+ k->read_event_data = read_event_data;
+ k->write_event_data = NULL;
+}
+
+static const TypeInfo sclp_quiesce_info = {
+ .name = TYPE_SCLP_QUIESCE,
+ .parent = TYPE_SCLP_EVENT,
+ .instance_size = sizeof(SCLPEvent),
+ .class_init = quiesce_class_init,
+ .class_size = sizeof(SCLPEventClass),
+};
+
+static void register_types(void)
+{
+ type_register_static(&sclp_quiesce_info);
+}
+
+type_init(register_types)
diff --git a/src/hw/s390x/virtio-ccw.c b/src/hw/s390x/virtio-ccw.c
new file mode 100644
index 0000000..63da303
--- /dev/null
+++ b/src/hw/s390x/virtio-ccw.c
@@ -0,0 +1,1988 @@
+/*
+ * virtio ccw target implementation
+ *
+ * Copyright 2012,2015 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Pierre Morel <pmorel@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "hw/hw.h"
+#include "sysemu/block-backend.h"
+#include "sysemu/blockdev.h"
+#include "sysemu/sysemu.h"
+#include "net/net.h"
+#include "hw/virtio/virtio.h"
+#include "hw/virtio/virtio-serial.h"
+#include "hw/virtio/virtio-net.h"
+#include "hw/sysbus.h"
+#include "qemu/bitops.h"
+#include "qemu/error-report.h"
+#include "hw/virtio/virtio-access.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/s390x/adapter.h"
+#include "hw/s390x/s390_flic.h"
+
+#include "ioinst.h"
+#include "css.h"
+#include "virtio-ccw.h"
+#include "trace.h"
+
+static QTAILQ_HEAD(, IndAddr) indicator_addresses =
+ QTAILQ_HEAD_INITIALIZER(indicator_addresses);
+
+static IndAddr *get_indicator(hwaddr ind_addr, int len)
+{
+ IndAddr *indicator;
+
+ QTAILQ_FOREACH(indicator, &indicator_addresses, sibling) {
+ if (indicator->addr == ind_addr) {
+ indicator->refcnt++;
+ return indicator;
+ }
+ }
+ indicator = g_new0(IndAddr, 1);
+ indicator->addr = ind_addr;
+ indicator->len = len;
+ indicator->refcnt = 1;
+ QTAILQ_INSERT_TAIL(&indicator_addresses, indicator, sibling);
+ return indicator;
+}
+
+static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr,
+ bool do_map)
+{
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
+
+ return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map);
+}
+
+static void release_indicator(AdapterInfo *adapter, IndAddr *indicator)
+{
+ assert(indicator->refcnt > 0);
+ indicator->refcnt--;
+ if (indicator->refcnt > 0) {
+ return;
+ }
+ QTAILQ_REMOVE(&indicator_addresses, indicator, sibling);
+ if (indicator->map) {
+ s390_io_adapter_map(adapter, indicator->map, false);
+ }
+ g_free(indicator);
+}
+
+static int map_indicator(AdapterInfo *adapter, IndAddr *indicator)
+{
+ int ret;
+
+ if (indicator->map) {
+ return 0; /* already mapped is not an error */
+ }
+ indicator->map = indicator->addr;
+ ret = s390_io_adapter_map(adapter, indicator->map, true);
+ if ((ret != 0) && (ret != -ENOSYS)) {
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ indicator->map = 0;
+ return ret;
+}
+
+static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
+ VirtioCcwDevice *dev);
+
+static void virtual_css_bus_reset(BusState *qbus)
+{
+ /* This should actually be modelled via the generic css */
+ css_reset();
+}
+
+
+static void virtual_css_bus_class_init(ObjectClass *klass, void *data)
+{
+ BusClass *k = BUS_CLASS(klass);
+
+ k->reset = virtual_css_bus_reset;
+}
+
+static const TypeInfo virtual_css_bus_info = {
+ .name = TYPE_VIRTUAL_CSS_BUS,
+ .parent = TYPE_BUS,
+ .instance_size = sizeof(VirtualCssBus),
+ .class_init = virtual_css_bus_class_init,
+};
+
+VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch)
+{
+ VirtIODevice *vdev = NULL;
+ VirtioCcwDevice *dev = sch->driver_data;
+
+ if (dev) {
+ vdev = virtio_bus_get_device(&dev->bus);
+ }
+ return vdev;
+}
+
+static int virtio_ccw_set_guest2host_notifier(VirtioCcwDevice *dev, int n,
+ bool assign, bool set_handler)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+ int r = 0;
+ SubchDev *sch = dev->sch;
+ uint32_t sch_id = (css_build_subchannel_id(sch) << 16) | sch->schid;
+
+ if (assign) {
+ r = event_notifier_init(notifier, 1);
+ if (r < 0) {
+ error_report("%s: unable to init event notifier: %d", __func__, r);
+ return r;
+ }
+ virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
+ r = s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
+ if (r < 0) {
+ error_report("%s: unable to assign ioeventfd: %d", __func__, r);
+ virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ event_notifier_cleanup(notifier);
+ return r;
+ }
+ } else {
+ virtio_queue_set_host_notifier_fd_handler(vq, false, false);
+ s390_assign_subch_ioeventfd(notifier, sch_id, n, assign);
+ event_notifier_cleanup(notifier);
+ }
+ return r;
+}
+
+static void virtio_ccw_start_ioeventfd(VirtioCcwDevice *dev)
+{
+ VirtIODevice *vdev;
+ int n, r;
+
+ if (!(dev->flags & VIRTIO_CCW_FLAG_USE_IOEVENTFD) ||
+ dev->ioeventfd_disabled ||
+ dev->ioeventfd_started) {
+ return;
+ }
+ vdev = virtio_bus_get_device(&dev->bus);
+ for (n = 0; n < VIRTIO_CCW_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ r = virtio_ccw_set_guest2host_notifier(dev, n, true, true);
+ if (r < 0) {
+ goto assign_error;
+ }
+ }
+ dev->ioeventfd_started = true;
+ return;
+
+ assign_error:
+ while (--n >= 0) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
+ assert(r >= 0);
+ }
+ dev->ioeventfd_started = false;
+ /* Disable ioeventfd for this device. */
+ dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
+ error_report("%s: failed. Fallback to userspace (slower).", __func__);
+}
+
+static void virtio_ccw_stop_ioeventfd(VirtioCcwDevice *dev)
+{
+ VirtIODevice *vdev;
+ int n, r;
+
+ if (!dev->ioeventfd_started) {
+ return;
+ }
+ vdev = virtio_bus_get_device(&dev->bus);
+ for (n = 0; n < VIRTIO_CCW_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ r = virtio_ccw_set_guest2host_notifier(dev, n, false, false);
+ assert(r >= 0);
+ }
+ dev->ioeventfd_started = false;
+}
+
+VirtualCssBus *virtual_css_bus_init(void)
+{
+ VirtualCssBus *cbus;
+ BusState *bus;
+ DeviceState *dev;
+
+ /* Create bridge device */
+ dev = qdev_create(NULL, "virtual-css-bridge");
+ qdev_init_nofail(dev);
+
+ /* Create bus on bridge device */
+ bus = qbus_create(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css");
+ cbus = VIRTUAL_CSS_BUS(bus);
+
+ /* Enable hotplugging */
+ qbus_set_hotplug_handler(bus, dev, &error_abort);
+
+ return cbus;
+}
+
+/* Communication blocks used by several channel commands. */
+typedef struct VqInfoBlockLegacy {
+ uint64_t queue;
+ uint32_t align;
+ uint16_t index;
+ uint16_t num;
+} QEMU_PACKED VqInfoBlockLegacy;
+
+typedef struct VqInfoBlock {
+ uint64_t desc;
+ uint32_t res0;
+ uint16_t index;
+ uint16_t num;
+ uint64_t avail;
+ uint64_t used;
+} QEMU_PACKED VqInfoBlock;
+
+typedef struct VqConfigBlock {
+ uint16_t index;
+ uint16_t num_max;
+} QEMU_PACKED VqConfigBlock;
+
+typedef struct VirtioFeatDesc {
+ uint32_t features;
+ uint8_t index;
+} QEMU_PACKED VirtioFeatDesc;
+
+typedef struct VirtioThinintInfo {
+ hwaddr summary_indicator;
+ hwaddr device_indicator;
+ uint64_t ind_bit;
+ uint8_t isc;
+} QEMU_PACKED VirtioThinintInfo;
+
+typedef struct VirtioRevInfo {
+ uint16_t revision;
+ uint16_t length;
+ uint8_t data[0];
+} QEMU_PACKED VirtioRevInfo;
+
+/* Specify where the virtqueues for the subchannel are in guest memory. */
+static int virtio_ccw_set_vqs(SubchDev *sch, VqInfoBlock *info,
+ VqInfoBlockLegacy *linfo)
+{
+ VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
+ uint16_t index = info ? info->index : linfo->index;
+ uint16_t num = info ? info->num : linfo->num;
+ uint64_t desc = info ? info->desc : linfo->queue;
+
+ if (index >= VIRTIO_CCW_QUEUE_MAX) {
+ return -EINVAL;
+ }
+
+ /* Current code in virtio.c relies on 4K alignment. */
+ if (linfo && desc && (linfo->align != 4096)) {
+ return -EINVAL;
+ }
+
+ if (!vdev) {
+ return -EINVAL;
+ }
+
+ if (info) {
+ virtio_queue_set_rings(vdev, index, desc, info->avail, info->used);
+ } else {
+ virtio_queue_set_addr(vdev, index, desc);
+ }
+ if (!desc) {
+ virtio_queue_set_vector(vdev, index, VIRTIO_NO_VECTOR);
+ } else {
+ if (info) {
+ /* virtio-1 allows changing the ring size. */
+ if (virtio_queue_get_num(vdev, index) < num) {
+ /* Fail if we exceed the maximum number. */
+ return -EINVAL;
+ }
+ virtio_queue_set_num(vdev, index, num);
+ } else if (virtio_queue_get_num(vdev, index) > num) {
+ /* Fail if we don't have a big enough queue. */
+ return -EINVAL;
+ }
+ /* We ignore possible increased num for legacy for compatibility. */
+ virtio_queue_set_vector(vdev, index, index);
+ }
+ /* tell notify handler in case of config change */
+ vdev->config_vector = VIRTIO_CCW_QUEUE_MAX;
+ return 0;
+}
+
+static void virtio_ccw_reset_virtio(VirtioCcwDevice *dev, VirtIODevice *vdev)
+{
+ virtio_ccw_stop_ioeventfd(dev);
+ virtio_reset(vdev);
+ if (dev->indicators) {
+ release_indicator(&dev->routes.adapter, dev->indicators);
+ dev->indicators = NULL;
+ }
+ if (dev->indicators2) {
+ release_indicator(&dev->routes.adapter, dev->indicators2);
+ dev->indicators2 = NULL;
+ }
+ if (dev->summary_indicator) {
+ release_indicator(&dev->routes.adapter, dev->summary_indicator);
+ dev->summary_indicator = NULL;
+ }
+ dev->sch->thinint_active = false;
+}
+
+static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len,
+ bool is_legacy)
+{
+ int ret;
+ VqInfoBlock info;
+ VqInfoBlockLegacy linfo;
+ size_t info_len = is_legacy ? sizeof(linfo) : sizeof(info);
+
+ if (check_len) {
+ if (ccw.count != info_len) {
+ return -EINVAL;
+ }
+ } else if (ccw.count < info_len) {
+ /* Can't execute command. */
+ return -EINVAL;
+ }
+ if (!ccw.cda) {
+ return -EFAULT;
+ }
+ if (is_legacy) {
+ linfo.queue = address_space_ldq_be(&address_space_memory, ccw.cda,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ linfo.align = address_space_ldl_be(&address_space_memory,
+ ccw.cda + sizeof(linfo.queue),
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ linfo.index = address_space_lduw_be(&address_space_memory,
+ ccw.cda + sizeof(linfo.queue)
+ + sizeof(linfo.align),
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ linfo.num = address_space_lduw_be(&address_space_memory,
+ ccw.cda + sizeof(linfo.queue)
+ + sizeof(linfo.align)
+ + sizeof(linfo.index),
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ ret = virtio_ccw_set_vqs(sch, NULL, &linfo);
+ } else {
+ info.desc = address_space_ldq_be(&address_space_memory, ccw.cda,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ info.index = address_space_lduw_be(&address_space_memory,
+ ccw.cda + sizeof(info.desc)
+ + sizeof(info.res0),
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ info.num = address_space_lduw_be(&address_space_memory,
+ ccw.cda + sizeof(info.desc)
+ + sizeof(info.res0)
+ + sizeof(info.index),
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ info.avail = address_space_ldq_be(&address_space_memory,
+ ccw.cda + sizeof(info.desc)
+ + sizeof(info.res0)
+ + sizeof(info.index)
+ + sizeof(info.num),
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ info.used = address_space_ldq_be(&address_space_memory,
+ ccw.cda + sizeof(info.desc)
+ + sizeof(info.res0)
+ + sizeof(info.index)
+ + sizeof(info.num)
+ + sizeof(info.avail),
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ ret = virtio_ccw_set_vqs(sch, &info, NULL);
+ }
+ sch->curr_status.scsw.count = 0;
+ return ret;
+}
+
+static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
+{
+ int ret;
+ VirtioRevInfo revinfo;
+ uint8_t status;
+ VirtioFeatDesc features;
+ void *config;
+ hwaddr indicators;
+ VqConfigBlock vq_config;
+ VirtioCcwDevice *dev = sch->driver_data;
+ VirtIODevice *vdev = virtio_ccw_get_vdev(sch);
+ bool check_len;
+ int len;
+ hwaddr hw_len;
+ VirtioThinintInfo *thinint;
+
+ if (!dev) {
+ return -EINVAL;
+ }
+
+ trace_virtio_ccw_interpret_ccw(sch->cssid, sch->ssid, sch->schid,
+ ccw.cmd_code);
+ check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
+
+ /* Look at the command. */
+ switch (ccw.cmd_code) {
+ case CCW_CMD_SET_VQ:
+ ret = virtio_ccw_handle_set_vq(sch, ccw, check_len, dev->revision < 1);
+ break;
+ case CCW_CMD_VDEV_RESET:
+ virtio_ccw_reset_virtio(dev, vdev);
+ ret = 0;
+ break;
+ case CCW_CMD_READ_FEAT:
+ if (check_len) {
+ if (ccw.count != sizeof(features)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(features)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ features.index = address_space_ldub(&address_space_memory,
+ ccw.cda
+ + sizeof(features.features),
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ if (features.index == 0) {
+ if (dev->revision >= 1) {
+ /* Don't offer legacy features for modern devices. */
+ features.features = (uint32_t)
+ (vdev->host_features & ~VIRTIO_LEGACY_FEATURES);
+ } else {
+ features.features = (uint32_t)vdev->host_features;
+ }
+ } else if ((features.index == 1) && (dev->revision >= 1)) {
+ /*
+ * Only offer feature bits beyond 31 if the guest has
+ * negotiated at least revision 1.
+ */
+ features.features = (uint32_t)(vdev->host_features >> 32);
+ } else {
+ /* Return zeroes if the guest supports more feature bits. */
+ features.features = 0;
+ }
+ address_space_stl_le(&address_space_memory, ccw.cda,
+ features.features, MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ sch->curr_status.scsw.count = ccw.count - sizeof(features);
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_WRITE_FEAT:
+ if (check_len) {
+ if (ccw.count != sizeof(features)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(features)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ features.index = address_space_ldub(&address_space_memory,
+ ccw.cda
+ + sizeof(features.features),
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ features.features = address_space_ldl_le(&address_space_memory,
+ ccw.cda,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ if (features.index == 0) {
+ virtio_set_features(vdev,
+ (vdev->guest_features & 0xffffffff00000000ULL) |
+ features.features);
+ } else if ((features.index == 1) && (dev->revision >= 1)) {
+ /*
+ * If the guest did not negotiate at least revision 1,
+ * we did not offer it any feature bits beyond 31. Such a
+ * guest passing us any bit here is therefore buggy.
+ */
+ virtio_set_features(vdev,
+ (vdev->guest_features & 0x00000000ffffffffULL) |
+ ((uint64_t)features.features << 32));
+ } else {
+ /*
+ * If the guest supports more feature bits, assert that it
+ * passes us zeroes for those we don't support.
+ */
+ if (features.features) {
+ fprintf(stderr, "Guest bug: features[%i]=%x (expected 0)\n",
+ features.index, features.features);
+ /* XXX: do a unit check here? */
+ }
+ }
+ sch->curr_status.scsw.count = ccw.count - sizeof(features);
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_READ_CONF:
+ if (check_len) {
+ if (ccw.count > vdev->config_len) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ len = MIN(ccw.count, vdev->config_len);
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ virtio_bus_get_vdev_config(&dev->bus, vdev->config);
+ /* XXX config space endianness */
+ cpu_physical_memory_write(ccw.cda, vdev->config, len);
+ sch->curr_status.scsw.count = ccw.count - len;
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_WRITE_CONF:
+ if (check_len) {
+ if (ccw.count > vdev->config_len) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ len = MIN(ccw.count, vdev->config_len);
+ hw_len = len;
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ config = cpu_physical_memory_map(ccw.cda, &hw_len, 0);
+ if (!config) {
+ ret = -EFAULT;
+ } else {
+ len = hw_len;
+ /* XXX config space endianness */
+ memcpy(vdev->config, config, len);
+ cpu_physical_memory_unmap(config, hw_len, 0, hw_len);
+ virtio_bus_set_vdev_config(&dev->bus, vdev->config);
+ sch->curr_status.scsw.count = ccw.count - len;
+ ret = 0;
+ }
+ }
+ break;
+ case CCW_CMD_WRITE_STATUS:
+ if (check_len) {
+ if (ccw.count != sizeof(status)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(status)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ status = address_space_ldub(&address_space_memory, ccw.cda,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ virtio_ccw_stop_ioeventfd(dev);
+ }
+ if (virtio_set_status(vdev, status) == 0) {
+ if (vdev->status == 0) {
+ virtio_ccw_reset_virtio(dev, vdev);
+ }
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ virtio_ccw_start_ioeventfd(dev);
+ }
+ sch->curr_status.scsw.count = ccw.count - sizeof(status);
+ ret = 0;
+ } else {
+ /* Trigger a command reject. */
+ ret = -ENOSYS;
+ }
+ }
+ break;
+ case CCW_CMD_SET_IND:
+ if (check_len) {
+ if (ccw.count != sizeof(indicators)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(indicators)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (sch->thinint_active) {
+ /* Trigger a command reject. */
+ ret = -ENOSYS;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ indicators = address_space_ldq_be(&address_space_memory, ccw.cda,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ dev->indicators = get_indicator(indicators, sizeof(uint64_t));
+ sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_SET_CONF_IND:
+ if (check_len) {
+ if (ccw.count != sizeof(indicators)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(indicators)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ indicators = address_space_ldq_be(&address_space_memory, ccw.cda,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
+ sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_READ_VQ_CONF:
+ if (check_len) {
+ if (ccw.count != sizeof(vq_config)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(vq_config)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else {
+ vq_config.index = address_space_lduw_be(&address_space_memory,
+ ccw.cda,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ if (vq_config.index >= VIRTIO_CCW_QUEUE_MAX) {
+ ret = -EINVAL;
+ break;
+ }
+ vq_config.num_max = virtio_queue_get_num(vdev,
+ vq_config.index);
+ address_space_stw_be(&address_space_memory,
+ ccw.cda + sizeof(vq_config.index),
+ vq_config.num_max,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
+ ret = 0;
+ }
+ break;
+ case CCW_CMD_SET_IND_ADAPTER:
+ if (check_len) {
+ if (ccw.count != sizeof(*thinint)) {
+ ret = -EINVAL;
+ break;
+ }
+ } else if (ccw.count < sizeof(*thinint)) {
+ /* Can't execute command. */
+ ret = -EINVAL;
+ break;
+ }
+ len = sizeof(*thinint);
+ hw_len = len;
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ } else if (dev->indicators && !sch->thinint_active) {
+ /* Trigger a command reject. */
+ ret = -ENOSYS;
+ } else {
+ thinint = cpu_physical_memory_map(ccw.cda, &hw_len, 0);
+ if (!thinint) {
+ ret = -EFAULT;
+ } else {
+ uint64_t ind_bit = ldq_be_p(&thinint->ind_bit);
+
+ len = hw_len;
+ dev->summary_indicator =
+ get_indicator(ldq_be_p(&thinint->summary_indicator),
+ sizeof(uint8_t));
+ dev->indicators =
+ get_indicator(ldq_be_p(&thinint->device_indicator),
+ ind_bit / 8 + 1);
+ dev->thinint_isc = thinint->isc;
+ dev->routes.adapter.ind_offset = ind_bit;
+ dev->routes.adapter.summary_offset = 7;
+ cpu_physical_memory_unmap(thinint, hw_len, 0, hw_len);
+ ret = css_register_io_adapter(CSS_IO_ADAPTER_VIRTIO,
+ dev->thinint_isc, true, false,
+ &dev->routes.adapter.adapter_id);
+ assert(ret == 0);
+ sch->thinint_active = ((dev->indicators != NULL) &&
+ (dev->summary_indicator != NULL));
+ sch->curr_status.scsw.count = ccw.count - len;
+ ret = 0;
+ }
+ }
+ break;
+ case CCW_CMD_SET_VIRTIO_REV:
+ len = sizeof(revinfo);
+ if (ccw.count < len) {
+ ret = -EINVAL;
+ break;
+ }
+ if (!ccw.cda) {
+ ret = -EFAULT;
+ break;
+ }
+ revinfo.revision =
+ address_space_lduw_be(&address_space_memory, ccw.cda,
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ revinfo.length =
+ address_space_lduw_be(&address_space_memory,
+ ccw.cda + sizeof(revinfo.revision),
+ MEMTXATTRS_UNSPECIFIED, NULL);
+ if (ccw.count < len + revinfo.length ||
+ (check_len && ccw.count > len + revinfo.length)) {
+ ret = -EINVAL;
+ break;
+ }
+ /*
+ * Once we start to support revisions with additional data, we'll
+ * need to fetch it here. Nothing to do for now, though.
+ */
+ if (dev->revision >= 0 ||
+ revinfo.revision > virtio_ccw_rev_max(dev)) {
+ ret = -ENOSYS;
+ break;
+ }
+ ret = 0;
+ dev->revision = revinfo.revision;
+ break;
+ default:
+ ret = -ENOSYS;
+ break;
+ }
+ return ret;
+}
+
+static void virtio_sch_disable_cb(SubchDev *sch)
+{
+ VirtioCcwDevice *dev = sch->driver_data;
+
+ dev->revision = -1;
+}
+
+static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
+{
+ unsigned int cssid = 0;
+ unsigned int ssid = 0;
+ unsigned int schid;
+ unsigned int devno;
+ bool have_devno = false;
+ bool found = false;
+ SubchDev *sch;
+ int num;
+ Error *err = NULL;
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
+
+ sch = g_malloc0(sizeof(SubchDev));
+
+ sch->driver_data = dev;
+ dev->sch = sch;
+
+ dev->indicators = NULL;
+
+ /* Initialize subchannel structure. */
+ sch->channel_prog = 0x0;
+ sch->last_cmd_valid = false;
+ sch->thinint_active = false;
+ /*
+ * Use a device number if provided. Otherwise, fall back to subchannel
+ * number.
+ */
+ if (dev->bus_id) {
+ num = sscanf(dev->bus_id, "%x.%x.%04x", &cssid, &ssid, &devno);
+ if (num == 3) {
+ if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) {
+ error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x",
+ cssid, ssid);
+ goto out_err;
+ }
+ /* Enforce use of virtual cssid. */
+ if (cssid != VIRTUAL_CSSID) {
+ error_setg(errp, "cssid %x not valid for virtio devices",
+ cssid);
+ goto out_err;
+ }
+ if (css_devno_used(cssid, ssid, devno)) {
+ error_setg(errp, "Device %x.%x.%04x already exists",
+ cssid, ssid, devno);
+ goto out_err;
+ }
+ sch->cssid = cssid;
+ sch->ssid = ssid;
+ sch->devno = devno;
+ have_devno = true;
+ } else {
+ error_setg(errp, "Malformed devno parameter '%s'", dev->bus_id);
+ goto out_err;
+ }
+ }
+
+ /* Find the next free id. */
+ if (have_devno) {
+ for (schid = 0; schid <= MAX_SCHID; schid++) {
+ if (!css_find_subch(1, cssid, ssid, schid)) {
+ sch->schid = schid;
+ css_subch_assign(cssid, ssid, schid, devno, sch);
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ error_setg(errp, "No free subchannel found for %x.%x.%04x",
+ cssid, ssid, devno);
+ goto out_err;
+ }
+ trace_virtio_ccw_new_device(cssid, ssid, schid, devno,
+ "user-configured");
+ } else {
+ cssid = VIRTUAL_CSSID;
+ for (ssid = 0; ssid <= MAX_SSID; ssid++) {
+ for (schid = 0; schid <= MAX_SCHID; schid++) {
+ if (!css_find_subch(1, cssid, ssid, schid)) {
+ sch->cssid = cssid;
+ sch->ssid = ssid;
+ sch->schid = schid;
+ devno = schid;
+ /*
+ * If the devno is already taken, look further in this
+ * subchannel set.
+ */
+ while (css_devno_used(cssid, ssid, devno)) {
+ if (devno == MAX_SCHID) {
+ devno = 0;
+ } else if (devno == schid - 1) {
+ error_setg(errp, "No free devno found");
+ goto out_err;
+ } else {
+ devno++;
+ }
+ }
+ sch->devno = devno;
+ css_subch_assign(cssid, ssid, schid, devno, sch);
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ break;
+ }
+ }
+ if (!found) {
+ error_setg(errp, "Virtual channel subsystem is full!");
+ goto out_err;
+ }
+ trace_virtio_ccw_new_device(cssid, ssid, schid, devno,
+ "auto-configured");
+ }
+
+ /* Build initial schib. */
+ css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE);
+
+ sch->ccw_cb = virtio_ccw_cb;
+ sch->disable_cb = virtio_sch_disable_cb;
+
+ /* Build senseid data. */
+ memset(&sch->id, 0, sizeof(SenseId));
+ sch->id.reserved = 0xff;
+ sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
+
+ dev->revision = -1;
+
+ if (k->realize) {
+ k->realize(dev, &err);
+ }
+ if (err) {
+ error_propagate(errp, err);
+ css_subch_assign(cssid, ssid, schid, devno, NULL);
+ goto out_err;
+ }
+
+ return;
+
+out_err:
+ dev->sch = NULL;
+ g_free(sch);
+}
+
+static int virtio_ccw_exit(VirtioCcwDevice *dev)
+{
+ SubchDev *sch = dev->sch;
+
+ if (sch) {
+ css_subch_assign(sch->cssid, sch->ssid, sch->schid, sch->devno, NULL);
+ g_free(sch);
+ }
+ if (dev->indicators) {
+ release_indicator(&dev->routes.adapter, dev->indicators);
+ dev->indicators = NULL;
+ }
+ return 0;
+}
+
+static void virtio_ccw_net_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ DeviceState *qdev = DEVICE(ccw_dev);
+ VirtIONetCcw *dev = VIRTIO_NET_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ virtio_net_set_netclient_name(&dev->vdev, qdev->id,
+ object_get_typename(OBJECT(qdev)));
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ }
+}
+
+static void virtio_ccw_net_instance_init(Object *obj)
+{
+ VirtIONetCcw *dev = VIRTIO_NET_CCW(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_NET);
+ object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
+ "bootindex", &error_abort);
+}
+
+static void virtio_ccw_blk_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ VirtIOBlkCcw *dev = VIRTIO_BLK_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ }
+}
+
+static void virtio_ccw_blk_instance_init(Object *obj)
+{
+ VirtIOBlkCcw *dev = VIRTIO_BLK_CCW(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_BLK);
+ object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev),"iothread",
+ &error_abort);
+ object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev),
+ "bootindex", &error_abort);
+}
+
+static void virtio_ccw_serial_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ VirtioSerialCcw *dev = VIRTIO_SERIAL_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ DeviceState *proxy = DEVICE(ccw_dev);
+ Error *err = NULL;
+ char *bus_name;
+
+ /*
+ * For command line compatibility, this sets the virtio-serial-device bus
+ * name as before.
+ */
+ if (proxy->id) {
+ bus_name = g_strdup_printf("%s.0", proxy->id);
+ virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
+ g_free(bus_name);
+ }
+
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ }
+}
+
+
+static void virtio_ccw_serial_instance_init(Object *obj)
+{
+ VirtioSerialCcw *dev = VIRTIO_SERIAL_CCW(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_SERIAL);
+}
+
+static void virtio_ccw_balloon_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ VirtIOBalloonCcw *dev = VIRTIO_BALLOON_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ }
+}
+
+static void virtio_ccw_balloon_instance_init(Object *obj)
+{
+ VirtIOBalloonCcw *dev = VIRTIO_BALLOON_CCW(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_BALLOON);
+ object_property_add_alias(obj, "guest-stats", OBJECT(&dev->vdev),
+ "guest-stats", &error_abort);
+ object_property_add_alias(obj, "guest-stats-polling-interval",
+ OBJECT(&dev->vdev),
+ "guest-stats-polling-interval", &error_abort);
+}
+
+static void virtio_ccw_scsi_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ VirtIOSCSICcw *dev = VIRTIO_SCSI_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ DeviceState *qdev = DEVICE(ccw_dev);
+ Error *err = NULL;
+ char *bus_name;
+
+ /*
+ * For command line compatibility, this sets the virtio-scsi-device bus
+ * name as before.
+ */
+ if (qdev->id) {
+ bus_name = g_strdup_printf("%s.0", qdev->id);
+ virtio_device_set_child_bus_name(VIRTIO_DEVICE(vdev), bus_name);
+ g_free(bus_name);
+ }
+
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ }
+}
+
+static void virtio_ccw_scsi_instance_init(Object *obj)
+{
+ VirtIOSCSICcw *dev = VIRTIO_SCSI_CCW(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_SCSI);
+ object_property_add_alias(obj, "iothread", OBJECT(&dev->vdev), "iothread",
+ &error_abort);
+}
+
+#ifdef CONFIG_VHOST_SCSI
+static void vhost_ccw_scsi_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ VHostSCSICcw *dev = VHOST_SCSI_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ }
+}
+
+static void vhost_ccw_scsi_instance_init(Object *obj)
+{
+ VHostSCSICcw *dev = VHOST_SCSI_CCW(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_SCSI);
+}
+#endif
+
+static void virtio_ccw_rng_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ VirtIORNGCcw *dev = VIRTIO_RNG_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ object_property_set_link(OBJECT(dev),
+ OBJECT(dev->vdev.conf.rng), "rng",
+ NULL);
+}
+
+/* DeviceState to VirtioCcwDevice. Note: used on datapath,
+ * be careful and test performance if you change this.
+ */
+static inline VirtioCcwDevice *to_virtio_ccw_dev_fast(DeviceState *d)
+{
+ return container_of(d, VirtioCcwDevice, parent_obj);
+}
+
+static uint8_t virtio_set_ind_atomic(SubchDev *sch, uint64_t ind_loc,
+ uint8_t to_be_set)
+{
+ uint8_t ind_old, ind_new;
+ hwaddr len = 1;
+ uint8_t *ind_addr;
+
+ ind_addr = cpu_physical_memory_map(ind_loc, &len, 1);
+ if (!ind_addr) {
+ error_report("%s(%x.%x.%04x): unable to access indicator",
+ __func__, sch->cssid, sch->ssid, sch->schid);
+ return -1;
+ }
+ do {
+ ind_old = *ind_addr;
+ ind_new = ind_old | to_be_set;
+ } while (atomic_cmpxchg(ind_addr, ind_old, ind_new) != ind_old);
+ cpu_physical_memory_unmap(ind_addr, len, 1, len);
+
+ return ind_old;
+}
+
+static void virtio_ccw_notify(DeviceState *d, uint16_t vector)
+{
+ VirtioCcwDevice *dev = to_virtio_ccw_dev_fast(d);
+ SubchDev *sch = dev->sch;
+ uint64_t indicators;
+
+ if (vector >= 128) {
+ return;
+ }
+
+ if (vector < VIRTIO_CCW_QUEUE_MAX) {
+ if (!dev->indicators) {
+ return;
+ }
+ if (sch->thinint_active) {
+ /*
+ * In the adapter interrupt case, indicators points to a
+ * memory area that may be (way) larger than 64 bit and
+ * ind_bit indicates the start of the indicators in a big
+ * endian notation.
+ */
+ uint64_t ind_bit = dev->routes.adapter.ind_offset;
+
+ virtio_set_ind_atomic(sch, dev->indicators->addr +
+ (ind_bit + vector) / 8,
+ 0x80 >> ((ind_bit + vector) % 8));
+ if (!virtio_set_ind_atomic(sch, dev->summary_indicator->addr,
+ 0x01)) {
+ css_adapter_interrupt(dev->thinint_isc);
+ }
+ } else {
+ indicators = address_space_ldq(&address_space_memory,
+ dev->indicators->addr,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ indicators |= 1ULL << vector;
+ address_space_stq(&address_space_memory, dev->indicators->addr,
+ indicators, MEMTXATTRS_UNSPECIFIED, NULL);
+ css_conditional_io_interrupt(sch);
+ }
+ } else {
+ if (!dev->indicators2) {
+ return;
+ }
+ vector = 0;
+ indicators = address_space_ldq(&address_space_memory,
+ dev->indicators2->addr,
+ MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ indicators |= 1ULL << vector;
+ address_space_stq(&address_space_memory, dev->indicators2->addr,
+ indicators, MEMTXATTRS_UNSPECIFIED, NULL);
+ css_conditional_io_interrupt(sch);
+ }
+}
+
+static void virtio_ccw_reset(DeviceState *d)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+
+ virtio_ccw_reset_virtio(dev, vdev);
+ css_reset_sch(dev->sch);
+}
+
+static void virtio_ccw_vmstate_change(DeviceState *d, bool running)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+ if (running) {
+ virtio_ccw_start_ioeventfd(dev);
+ } else {
+ virtio_ccw_stop_ioeventfd(dev);
+ }
+}
+
+static bool virtio_ccw_query_guest_notifiers(DeviceState *d)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+ return !!(dev->sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA);
+}
+
+static int virtio_ccw_set_host_notifier(DeviceState *d, int n, bool assign)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+ /* Stop using the generic ioeventfd, we are doing eventfd handling
+ * ourselves below */
+ dev->ioeventfd_disabled = assign;
+ if (assign) {
+ virtio_ccw_stop_ioeventfd(dev);
+ }
+ return virtio_ccw_set_guest2host_notifier(dev, n, assign, false);
+}
+
+static int virtio_ccw_get_mappings(VirtioCcwDevice *dev)
+{
+ int r;
+
+ if (!dev->sch->thinint_active) {
+ return -EINVAL;
+ }
+
+ r = map_indicator(&dev->routes.adapter, dev->summary_indicator);
+ if (r) {
+ return r;
+ }
+ r = map_indicator(&dev->routes.adapter, dev->indicators);
+ if (r) {
+ return r;
+ }
+ dev->routes.adapter.summary_addr = dev->summary_indicator->map;
+ dev->routes.adapter.ind_addr = dev->indicators->map;
+
+ return 0;
+}
+
+static int virtio_ccw_setup_irqroutes(VirtioCcwDevice *dev, int nvqs)
+{
+ int i;
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+ int ret;
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
+
+ ret = virtio_ccw_get_mappings(dev);
+ if (ret) {
+ return ret;
+ }
+ for (i = 0; i < nvqs; i++) {
+ if (!virtio_queue_get_num(vdev, i)) {
+ break;
+ }
+ }
+ dev->routes.num_routes = i;
+ return fsc->add_adapter_routes(fs, &dev->routes);
+}
+
+static void virtio_ccw_release_irqroutes(VirtioCcwDevice *dev, int nvqs)
+{
+ S390FLICState *fs = s390_get_flic();
+ S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
+
+ fsc->release_adapter_routes(fs, &dev->routes);
+}
+
+static int virtio_ccw_add_irqfd(VirtioCcwDevice *dev, int n)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
+
+ return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, notifier, NULL,
+ dev->routes.gsi[n]);
+}
+
+static void virtio_ccw_remove_irqfd(VirtioCcwDevice *dev, int n)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
+ int ret;
+
+ ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, notifier,
+ dev->routes.gsi[n]);
+ assert(ret == 0);
+}
+
+static int virtio_ccw_set_guest_notifier(VirtioCcwDevice *dev, int n,
+ bool assign, bool with_irqfd)
+{
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+ VirtQueue *vq = virtio_get_queue(vdev, n);
+ EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+
+ if (assign) {
+ int r = event_notifier_init(notifier, 0);
+
+ if (r < 0) {
+ return r;
+ }
+ virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
+ if (with_irqfd) {
+ r = virtio_ccw_add_irqfd(dev, n);
+ if (r) {
+ virtio_queue_set_guest_notifier_fd_handler(vq, false,
+ with_irqfd);
+ return r;
+ }
+ }
+ /*
+ * We do not support individual masking for channel devices, so we
+ * need to manually trigger any guest masking callbacks here.
+ */
+ if (k->guest_notifier_mask) {
+ k->guest_notifier_mask(vdev, n, false);
+ }
+ /* get lost events and re-inject */
+ if (k->guest_notifier_pending &&
+ k->guest_notifier_pending(vdev, n)) {
+ event_notifier_set(notifier);
+ }
+ } else {
+ if (k->guest_notifier_mask) {
+ k->guest_notifier_mask(vdev, n, true);
+ }
+ if (with_irqfd) {
+ virtio_ccw_remove_irqfd(dev, n);
+ }
+ virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
+ event_notifier_cleanup(notifier);
+ }
+ return 0;
+}
+
+static int virtio_ccw_set_guest_notifiers(DeviceState *d, int nvqs,
+ bool assigned)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+ bool with_irqfd = dev->sch->thinint_active && kvm_irqfds_enabled();
+ int r, n;
+
+ if (with_irqfd && assigned) {
+ /* irq routes need to be set up before assigning irqfds */
+ r = virtio_ccw_setup_irqroutes(dev, nvqs);
+ if (r < 0) {
+ goto irqroute_error;
+ }
+ }
+ for (n = 0; n < nvqs; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ break;
+ }
+ r = virtio_ccw_set_guest_notifier(dev, n, assigned, with_irqfd);
+ if (r < 0) {
+ goto assign_error;
+ }
+ }
+ if (with_irqfd && !assigned) {
+ /* release irq routes after irqfds have been released */
+ virtio_ccw_release_irqroutes(dev, nvqs);
+ }
+ return 0;
+
+assign_error:
+ while (--n >= 0) {
+ virtio_ccw_set_guest_notifier(dev, n, !assigned, false);
+ }
+irqroute_error:
+ if (with_irqfd && assigned) {
+ virtio_ccw_release_irqroutes(dev, nvqs);
+ }
+ return r;
+}
+
+static void virtio_ccw_save_queue(DeviceState *d, int n, QEMUFile *f)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+
+ qemu_put_be16(f, virtio_queue_vector(vdev, n));
+}
+
+static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+ uint16_t vector;
+
+ qemu_get_be16s(f, &vector);
+ virtio_queue_set_vector(vdev, n , vector);
+
+ return 0;
+}
+
+static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ SubchDev *s = dev->sch;
+ VirtIODevice *vdev = virtio_ccw_get_vdev(s);
+
+ subch_device_save(s, f);
+ if (dev->indicators != NULL) {
+ qemu_put_be32(f, dev->indicators->len);
+ qemu_put_be64(f, dev->indicators->addr);
+ } else {
+ qemu_put_be32(f, 0);
+ qemu_put_be64(f, 0UL);
+ }
+ if (dev->indicators2 != NULL) {
+ qemu_put_be32(f, dev->indicators2->len);
+ qemu_put_be64(f, dev->indicators2->addr);
+ } else {
+ qemu_put_be32(f, 0);
+ qemu_put_be64(f, 0UL);
+ }
+ if (dev->summary_indicator != NULL) {
+ qemu_put_be32(f, dev->summary_indicator->len);
+ qemu_put_be64(f, dev->summary_indicator->addr);
+ } else {
+ qemu_put_be32(f, 0);
+ qemu_put_be64(f, 0UL);
+ }
+ qemu_put_be16(f, vdev->config_vector);
+ qemu_put_be64(f, dev->routes.adapter.ind_offset);
+ qemu_put_byte(f, dev->thinint_isc);
+ qemu_put_be32(f, dev->revision);
+}
+
+static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ SubchDev *s = dev->sch;
+ VirtIODevice *vdev = virtio_ccw_get_vdev(s);
+ int len;
+
+ s->driver_data = dev;
+ subch_device_load(s, f);
+ len = qemu_get_be32(f);
+ if (len != 0) {
+ dev->indicators = get_indicator(qemu_get_be64(f), len);
+ } else {
+ qemu_get_be64(f);
+ dev->indicators = NULL;
+ }
+ len = qemu_get_be32(f);
+ if (len != 0) {
+ dev->indicators2 = get_indicator(qemu_get_be64(f), len);
+ } else {
+ qemu_get_be64(f);
+ dev->indicators2 = NULL;
+ }
+ len = qemu_get_be32(f);
+ if (len != 0) {
+ dev->summary_indicator = get_indicator(qemu_get_be64(f), len);
+ } else {
+ qemu_get_be64(f);
+ dev->summary_indicator = NULL;
+ }
+ qemu_get_be16s(f, &vdev->config_vector);
+ dev->routes.adapter.ind_offset = qemu_get_be64(f);
+ dev->thinint_isc = qemu_get_byte(f);
+ dev->revision = qemu_get_be32(f);
+ if (s->thinint_active) {
+ return css_register_io_adapter(CSS_IO_ADAPTER_VIRTIO,
+ dev->thinint_isc, true, false,
+ &dev->routes.adapter.adapter_id);
+ }
+
+ return 0;
+}
+
+/* This is called by virtio-bus just after the device is plugged. */
+static void virtio_ccw_device_plugged(DeviceState *d, Error **errp)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+ SubchDev *sch = dev->sch;
+ int n = virtio_get_num_queues(vdev);
+
+ if (virtio_get_num_queues(vdev) > VIRTIO_CCW_QUEUE_MAX) {
+ error_setg(errp, "The nubmer of virtqueues %d "
+ "exceeds ccw limit %d", n,
+ VIRTIO_CCW_QUEUE_MAX);
+ return;
+ }
+
+ if (!kvm_eventfds_enabled()) {
+ dev->flags &= ~VIRTIO_CCW_FLAG_USE_IOEVENTFD;
+ }
+
+ sch->id.cu_model = virtio_bus_get_vdev_id(&dev->bus);
+
+ if (dev->max_rev >= 1) {
+ virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
+ }
+
+ css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
+ d->hotplugged, 1);
+}
+
+static void virtio_ccw_post_plugged(DeviceState *d, Error **errp)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+ VirtIODevice *vdev = virtio_bus_get_device(&dev->bus);
+
+ if (!virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ /* A backend didn't support modern virtio. */
+ dev->max_rev = 0;
+ }
+}
+
+static void virtio_ccw_device_unplugged(DeviceState *d)
+{
+ VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d);
+
+ virtio_ccw_stop_ioeventfd(dev);
+}
+/**************** Virtio-ccw Bus Device Descriptions *******************/
+
+static Property virtio_ccw_net_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_net_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->realize = virtio_ccw_net_realize;
+ k->exit = virtio_ccw_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_net_properties;
+ set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
+}
+
+static const TypeInfo virtio_ccw_net = {
+ .name = TYPE_VIRTIO_NET_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtIONetCcw),
+ .instance_init = virtio_ccw_net_instance_init,
+ .class_init = virtio_ccw_net_class_init,
+};
+
+static Property virtio_ccw_blk_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_blk_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->realize = virtio_ccw_blk_realize;
+ k->exit = virtio_ccw_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_blk_properties;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo virtio_ccw_blk = {
+ .name = TYPE_VIRTIO_BLK_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtIOBlkCcw),
+ .instance_init = virtio_ccw_blk_instance_init,
+ .class_init = virtio_ccw_blk_class_init,
+};
+
+static Property virtio_ccw_serial_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_serial_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->realize = virtio_ccw_serial_realize;
+ k->exit = virtio_ccw_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_serial_properties;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+}
+
+static const TypeInfo virtio_ccw_serial = {
+ .name = TYPE_VIRTIO_SERIAL_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtioSerialCcw),
+ .instance_init = virtio_ccw_serial_instance_init,
+ .class_init = virtio_ccw_serial_class_init,
+};
+
+static Property virtio_ccw_balloon_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_balloon_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->realize = virtio_ccw_balloon_realize;
+ k->exit = virtio_ccw_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_balloon_properties;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo virtio_ccw_balloon = {
+ .name = TYPE_VIRTIO_BALLOON_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtIOBalloonCcw),
+ .instance_init = virtio_ccw_balloon_instance_init,
+ .class_init = virtio_ccw_balloon_class_init,
+};
+
+static Property virtio_ccw_scsi_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_scsi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->realize = virtio_ccw_scsi_realize;
+ k->exit = virtio_ccw_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_scsi_properties;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo virtio_ccw_scsi = {
+ .name = TYPE_VIRTIO_SCSI_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtIOSCSICcw),
+ .instance_init = virtio_ccw_scsi_instance_init,
+ .class_init = virtio_ccw_scsi_class_init,
+};
+
+#ifdef CONFIG_VHOST_SCSI
+static Property vhost_ccw_scsi_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vhost_ccw_scsi_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->realize = vhost_ccw_scsi_realize;
+ k->exit = virtio_ccw_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = vhost_ccw_scsi_properties;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static const TypeInfo vhost_ccw_scsi = {
+ .name = TYPE_VHOST_SCSI_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VHostSCSICcw),
+ .instance_init = vhost_ccw_scsi_instance_init,
+ .class_init = vhost_ccw_scsi_class_init,
+};
+#endif
+
+static void virtio_ccw_rng_instance_init(Object *obj)
+{
+ VirtIORNGCcw *dev = VIRTIO_RNG_CCW(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_RNG);
+ object_property_add_alias(obj, "rng", OBJECT(&dev->vdev),
+ "rng", &error_abort);
+}
+
+static Property virtio_ccw_rng_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_rng_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->realize = virtio_ccw_rng_realize;
+ k->exit = virtio_ccw_exit;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_rng_properties;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+}
+
+static const TypeInfo virtio_ccw_rng = {
+ .name = TYPE_VIRTIO_RNG_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(VirtIORNGCcw),
+ .instance_init = virtio_ccw_rng_instance_init,
+ .class_init = virtio_ccw_rng_class_init,
+};
+
+static void virtio_ccw_busdev_realize(DeviceState *dev, Error **errp)
+{
+ VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
+
+ virtio_ccw_bus_new(&_dev->bus, sizeof(_dev->bus), _dev);
+ virtio_ccw_device_realize(_dev, errp);
+}
+
+static int virtio_ccw_busdev_exit(DeviceState *dev)
+{
+ VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
+ VirtIOCCWDeviceClass *_info = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
+
+ return _info->exit(_dev);
+}
+
+static void virtio_ccw_busdev_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ VirtioCcwDevice *_dev = (VirtioCcwDevice *)dev;
+ SubchDev *sch = _dev->sch;
+
+ virtio_ccw_stop_ioeventfd(_dev);
+
+ /*
+ * We should arrive here only for device_del, since we don't support
+ * direct hot(un)plug of channels, but only through virtio.
+ */
+ assert(sch != NULL);
+ /* Subchannel is now disabled and no longer valid. */
+ sch->curr_status.pmcw.flags &= ~(PMCW_FLAGS_MASK_ENA |
+ PMCW_FLAGS_MASK_DNV);
+
+ css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid, 1, 0);
+
+ object_unparent(OBJECT(dev));
+}
+
+static void virtio_ccw_device_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->realize = virtio_ccw_busdev_realize;
+ dc->exit = virtio_ccw_busdev_exit;
+ dc->bus_type = TYPE_VIRTUAL_CSS_BUS;
+}
+
+static const TypeInfo virtio_ccw_device_info = {
+ .name = TYPE_VIRTIO_CCW_DEVICE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(VirtioCcwDevice),
+ .class_init = virtio_ccw_device_class_init,
+ .class_size = sizeof(VirtIOCCWDeviceClass),
+ .abstract = true,
+};
+
+/***************** Virtual-css Bus Bridge Device ********************/
+/* Only required to have the virtio bus as child in the system bus */
+
+static int virtual_css_bridge_init(SysBusDevice *dev)
+{
+ /* nothing */
+ return 0;
+}
+
+static void virtual_css_bridge_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ k->init = virtual_css_bridge_init;
+ hc->unplug = virtio_ccw_busdev_unplug;
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+}
+
+static const TypeInfo virtual_css_bridge_info = {
+ .name = "virtual-css-bridge",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(SysBusDevice),
+ .class_init = virtual_css_bridge_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ }
+};
+
+/* virtio-ccw-bus */
+
+static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size,
+ VirtioCcwDevice *dev)
+{
+ DeviceState *qdev = DEVICE(dev);
+ char virtio_bus_name[] = "virtio-bus";
+
+ qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS,
+ qdev, virtio_bus_name);
+}
+
+static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data)
+{
+ VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
+ BusClass *bus_class = BUS_CLASS(klass);
+
+ bus_class->max_dev = 1;
+ k->notify = virtio_ccw_notify;
+ k->vmstate_change = virtio_ccw_vmstate_change;
+ k->query_guest_notifiers = virtio_ccw_query_guest_notifiers;
+ k->set_host_notifier = virtio_ccw_set_host_notifier;
+ k->set_guest_notifiers = virtio_ccw_set_guest_notifiers;
+ k->save_queue = virtio_ccw_save_queue;
+ k->load_queue = virtio_ccw_load_queue;
+ k->save_config = virtio_ccw_save_config;
+ k->load_config = virtio_ccw_load_config;
+ k->device_plugged = virtio_ccw_device_plugged;
+ k->post_plugged = virtio_ccw_post_plugged;
+ k->device_unplugged = virtio_ccw_device_unplugged;
+}
+
+static const TypeInfo virtio_ccw_bus_info = {
+ .name = TYPE_VIRTIO_CCW_BUS,
+ .parent = TYPE_VIRTIO_BUS,
+ .instance_size = sizeof(VirtioCcwBusState),
+ .class_init = virtio_ccw_bus_class_init,
+};
+
+#ifdef CONFIG_VIRTFS
+static Property virtio_ccw_9p_properties[] = {
+ DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id),
+ DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags,
+ VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("max_revision", VirtioCcwDevice, max_rev,
+ VIRTIO_CCW_MAX_REV),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_ccw_9p_realize(VirtioCcwDevice *ccw_dev, Error **errp)
+{
+ V9fsCCWState *dev = VIRTIO_9P_CCW(ccw_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+ Error *err = NULL;
+
+ qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus));
+ object_property_set_bool(OBJECT(vdev), true, "realized", &err);
+ if (err) {
+ error_propagate(errp, err);
+ }
+}
+
+static void virtio_ccw_9p_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass);
+
+ k->exit = virtio_ccw_exit;
+ k->realize = virtio_ccw_9p_realize;
+ dc->reset = virtio_ccw_reset;
+ dc->props = virtio_ccw_9p_properties;
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
+}
+
+static void virtio_ccw_9p_instance_init(Object *obj)
+{
+ V9fsCCWState *dev = VIRTIO_9P_CCW(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VIRTIO_9P);
+}
+
+static const TypeInfo virtio_ccw_9p_info = {
+ .name = TYPE_VIRTIO_9P_CCW,
+ .parent = TYPE_VIRTIO_CCW_DEVICE,
+ .instance_size = sizeof(V9fsCCWState),
+ .instance_init = virtio_ccw_9p_instance_init,
+ .class_init = virtio_ccw_9p_class_init,
+};
+#endif
+
+static void virtio_ccw_register(void)
+{
+ type_register_static(&virtio_ccw_bus_info);
+ type_register_static(&virtual_css_bus_info);
+ type_register_static(&virtio_ccw_device_info);
+ type_register_static(&virtio_ccw_serial);
+ type_register_static(&virtio_ccw_blk);
+ type_register_static(&virtio_ccw_net);
+ type_register_static(&virtio_ccw_balloon);
+ type_register_static(&virtio_ccw_scsi);
+#ifdef CONFIG_VHOST_SCSI
+ type_register_static(&vhost_ccw_scsi);
+#endif
+ type_register_static(&virtio_ccw_rng);
+ type_register_static(&virtual_css_bridge_info);
+#ifdef CONFIG_VIRTFS
+ type_register_static(&virtio_ccw_9p_info);
+#endif
+}
+
+type_init(virtio_ccw_register)
diff --git a/src/hw/s390x/virtio-ccw.h b/src/hw/s390x/virtio-ccw.h
new file mode 100644
index 0000000..7ab8367
--- /dev/null
+++ b/src/hw/s390x/virtio-ccw.h
@@ -0,0 +1,218 @@
+/*
+ * virtio ccw target definitions
+ *
+ * Copyright 2012,2015 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Pierre Morel <pmorel@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef HW_S390X_VIRTIO_CCW_H
+#define HW_S390X_VIRTIO_CCW_H
+
+#include <hw/virtio/virtio-blk.h>
+#include <hw/virtio/virtio-net.h>
+#include <hw/virtio/virtio-serial.h>
+#include <hw/virtio/virtio-scsi.h>
+#ifdef CONFIG_VHOST_SCSI
+#include <hw/virtio/vhost-scsi.h>
+#endif
+#include <hw/virtio/virtio-balloon.h>
+#include <hw/virtio/virtio-rng.h>
+#include <hw/virtio/virtio-bus.h>
+#include <hw/s390x/s390_flic.h>
+
+#define VIRTUAL_CSSID 0xfe
+
+#define VIRTIO_CCW_CU_TYPE 0x3832
+#define VIRTIO_CCW_CHPID_TYPE 0x32
+
+#define CCW_CMD_SET_VQ 0x13
+#define CCW_CMD_VDEV_RESET 0x33
+#define CCW_CMD_READ_FEAT 0x12
+#define CCW_CMD_WRITE_FEAT 0x11
+#define CCW_CMD_READ_CONF 0x22
+#define CCW_CMD_WRITE_CONF 0x21
+#define CCW_CMD_WRITE_STATUS 0x31
+#define CCW_CMD_SET_IND 0x43
+#define CCW_CMD_SET_CONF_IND 0x53
+#define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_SET_IND_ADAPTER 0x73
+#define CCW_CMD_SET_VIRTIO_REV 0x83
+
+#define TYPE_VIRTIO_CCW_DEVICE "virtio-ccw-device"
+#define VIRTIO_CCW_DEVICE(obj) \
+ OBJECT_CHECK(VirtioCcwDevice, (obj), TYPE_VIRTIO_CCW_DEVICE)
+#define VIRTIO_CCW_DEVICE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtIOCCWDeviceClass, (klass), TYPE_VIRTIO_CCW_DEVICE)
+#define VIRTIO_CCW_DEVICE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtIOCCWDeviceClass, (obj), TYPE_VIRTIO_CCW_DEVICE)
+
+typedef struct VirtioBusState VirtioCcwBusState;
+typedef struct VirtioBusClass VirtioCcwBusClass;
+
+#define TYPE_VIRTIO_CCW_BUS "virtio-ccw-bus"
+#define VIRTIO_CCW_BUS(obj) \
+ OBJECT_CHECK(VirtioCcwBus, (obj), TYPE_VIRTIO_CCW_BUS)
+#define VIRTIO_CCW_BUS_GET_CLASS(obj) \
+ OBJECT_CHECK(VirtioCcwBusState, (obj), TYPE_VIRTIO_CCW_BUS)
+#define VIRTIO_CCW_BUS_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtioCcwBusClass, klass, TYPE_VIRTIO_CCW_BUS)
+
+typedef struct VirtioCcwDevice VirtioCcwDevice;
+
+typedef struct VirtIOCCWDeviceClass {
+ DeviceClass parent_class;
+ void (*realize)(VirtioCcwDevice *dev, Error **errp);
+ int (*exit)(VirtioCcwDevice *dev);
+} VirtIOCCWDeviceClass;
+
+/* Performance improves when virtqueue kick processing is decoupled from the
+ * vcpu thread using ioeventfd for some devices. */
+#define VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT 1
+#define VIRTIO_CCW_FLAG_USE_IOEVENTFD (1 << VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT)
+
+typedef struct IndAddr {
+ hwaddr addr;
+ uint64_t map;
+ unsigned long refcnt;
+ int len;
+ QTAILQ_ENTRY(IndAddr) sibling;
+} IndAddr;
+
+struct VirtioCcwDevice {
+ DeviceState parent_obj;
+ SubchDev *sch;
+ char *bus_id;
+ int revision;
+ uint32_t max_rev;
+ VirtioBusState bus;
+ bool ioeventfd_started;
+ bool ioeventfd_disabled;
+ uint32_t flags;
+ uint8_t thinint_isc;
+ AdapterRoutes routes;
+ /* Guest provided values: */
+ IndAddr *indicators;
+ IndAddr *indicators2;
+ IndAddr *summary_indicator;
+ uint64_t ind_bit;
+};
+
+/* The maximum virtio revision we support. */
+#define VIRTIO_CCW_MAX_REV 1
+static inline int virtio_ccw_rev_max(VirtioCcwDevice *dev)
+{
+ return dev->max_rev;
+}
+
+/* virtual css bus type */
+typedef struct VirtualCssBus {
+ BusState parent_obj;
+} VirtualCssBus;
+
+#define TYPE_VIRTUAL_CSS_BUS "virtual-css-bus"
+#define VIRTUAL_CSS_BUS(obj) \
+ OBJECT_CHECK(VirtualCssBus, (obj), TYPE_VIRTUAL_CSS_BUS)
+
+/* virtio-scsi-ccw */
+
+#define TYPE_VIRTIO_SCSI_CCW "virtio-scsi-ccw"
+#define VIRTIO_SCSI_CCW(obj) \
+ OBJECT_CHECK(VirtIOSCSICcw, (obj), TYPE_VIRTIO_SCSI_CCW)
+
+typedef struct VirtIOSCSICcw {
+ VirtioCcwDevice parent_obj;
+ VirtIOSCSI vdev;
+} VirtIOSCSICcw;
+
+#ifdef CONFIG_VHOST_SCSI
+/* vhost-scsi-ccw */
+
+#define TYPE_VHOST_SCSI_CCW "vhost-scsi-ccw"
+#define VHOST_SCSI_CCW(obj) \
+ OBJECT_CHECK(VHostSCSICcw, (obj), TYPE_VHOST_SCSI_CCW)
+
+typedef struct VHostSCSICcw {
+ VirtioCcwDevice parent_obj;
+ VHostSCSI vdev;
+} VHostSCSICcw;
+#endif
+
+/* virtio-blk-ccw */
+
+#define TYPE_VIRTIO_BLK_CCW "virtio-blk-ccw"
+#define VIRTIO_BLK_CCW(obj) \
+ OBJECT_CHECK(VirtIOBlkCcw, (obj), TYPE_VIRTIO_BLK_CCW)
+
+typedef struct VirtIOBlkCcw {
+ VirtioCcwDevice parent_obj;
+ VirtIOBlock vdev;
+} VirtIOBlkCcw;
+
+/* virtio-balloon-ccw */
+
+#define TYPE_VIRTIO_BALLOON_CCW "virtio-balloon-ccw"
+#define VIRTIO_BALLOON_CCW(obj) \
+ OBJECT_CHECK(VirtIOBalloonCcw, (obj), TYPE_VIRTIO_BALLOON_CCW)
+
+typedef struct VirtIOBalloonCcw {
+ VirtioCcwDevice parent_obj;
+ VirtIOBalloon vdev;
+} VirtIOBalloonCcw;
+
+/* virtio-serial-ccw */
+
+#define TYPE_VIRTIO_SERIAL_CCW "virtio-serial-ccw"
+#define VIRTIO_SERIAL_CCW(obj) \
+ OBJECT_CHECK(VirtioSerialCcw, (obj), TYPE_VIRTIO_SERIAL_CCW)
+
+typedef struct VirtioSerialCcw {
+ VirtioCcwDevice parent_obj;
+ VirtIOSerial vdev;
+} VirtioSerialCcw;
+
+/* virtio-net-ccw */
+
+#define TYPE_VIRTIO_NET_CCW "virtio-net-ccw"
+#define VIRTIO_NET_CCW(obj) \
+ OBJECT_CHECK(VirtIONetCcw, (obj), TYPE_VIRTIO_NET_CCW)
+
+typedef struct VirtIONetCcw {
+ VirtioCcwDevice parent_obj;
+ VirtIONet vdev;
+} VirtIONetCcw;
+
+/* virtio-rng-ccw */
+
+#define TYPE_VIRTIO_RNG_CCW "virtio-rng-ccw"
+#define VIRTIO_RNG_CCW(obj) \
+ OBJECT_CHECK(VirtIORNGCcw, (obj), TYPE_VIRTIO_RNG_CCW)
+
+typedef struct VirtIORNGCcw {
+ VirtioCcwDevice parent_obj;
+ VirtIORNG vdev;
+} VirtIORNGCcw;
+
+VirtualCssBus *virtual_css_bus_init(void);
+void virtio_ccw_device_update_status(SubchDev *sch);
+VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch);
+
+#ifdef CONFIG_VIRTFS
+#include "hw/9pfs/virtio-9p.h"
+
+#define TYPE_VIRTIO_9P_CCW "virtio-9p-ccw"
+#define VIRTIO_9P_CCW(obj) \
+ OBJECT_CHECK(V9fsCCWState, (obj), TYPE_VIRTIO_9P_CCW)
+
+typedef struct V9fsCCWState {
+ VirtioCcwDevice parent_obj;
+ V9fsState vdev;
+} V9fsCCWState;
+
+#endif /* CONFIG_VIRTFS */
+
+#endif
OpenPOWER on IntegriCloud