summaryrefslogtreecommitdiffstats
path: root/migration
diff options
context:
space:
mode:
authorDr. David Alan Gilbert <dgilbert@redhat.com>2015-11-05 18:11:08 +0000
committerJuan Quintela <quintela@redhat.com>2015-11-10 15:00:27 +0100
commit6c595cdee116dc46b0d4d7d632a426681ae66ad9 (patch)
tree98026fbdc9644b65f56a4f62aae5271bb7178b42 /migration
parent1e2d90ebc54531c416a6765849308c8476d98f2d (diff)
downloadhqemu-6c595cdee116dc46b0d4d7d632a426681ae66ad9.zip
hqemu-6c595cdee116dc46b0d4d7d632a426681ae66ad9.tar.gz
Page request: Process incoming page request
On receiving MIG_RPCOMM_REQ_PAGES look up the address and queue the page. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Amit Shah <amit.shah@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/migration.c31
-rw-r--r--migration/ram.c85
2 files changed, 115 insertions, 1 deletions
diff --git a/migration/migration.c b/migration/migration.c
index 6ccdeb8..7d64cd3 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -21,6 +21,7 @@
#include "sysemu/sysemu.h"
#include "block/block.h"
#include "qapi/qmp/qerror.h"
+#include "qapi/util.h"
#include "qemu/sockets.h"
#include "qemu/rcu.h"
#include "migration/block.h"
@@ -28,9 +29,10 @@
#include "qemu/thread.h"
#include "qmp-commands.h"
#include "trace.h"
-#include "qapi/util.h"
#include "qapi-event.h"
#include "qom/cpu.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
#define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
@@ -72,6 +74,7 @@ static PostcopyState incoming_postcopy_state;
/* For outgoing */
MigrationState *migrate_get_current(void)
{
+ static bool once;
static MigrationState current_migration = {
.state = MIGRATION_STATUS_NONE,
.bandwidth_limit = MAX_THROTTLE,
@@ -89,6 +92,10 @@ MigrationState *migrate_get_current(void)
DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT,
};
+ if (!once) {
+ qemu_mutex_init(&current_migration.src_page_req_mutex);
+ once = true;
+ }
return &current_migration;
}
@@ -771,6 +778,8 @@ static void migrate_fd_cleanup(void *opaque)
qemu_bh_delete(s->cleanup_bh);
s->cleanup_bh = NULL;
+ flush_page_queue(s);
+
if (s->file) {
trace_migrate_fd_cleanup();
qemu_mutex_unlock_iothread();
@@ -903,6 +912,8 @@ MigrationState *migrate_init(const MigrationParams *params)
s->bandwidth_limit = bandwidth_limit;
migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
+ QSIMPLEQ_INIT(&s->src_page_requests);
+
s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
return s;
}
@@ -1193,7 +1204,25 @@ static struct rp_cmd_args {
static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
ram_addr_t start, size_t len)
{
+ long our_host_ps = getpagesize();
+
trace_migrate_handle_rp_req_pages(rbname, start, len);
+
+ /*
+ * Since we currently insist on matching page sizes, just sanity check
+ * we're being asked for whole host pages.
+ */
+ if (start & (our_host_ps-1) ||
+ (len & (our_host_ps-1))) {
+ error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
+ " len: %zd", __func__, start, len);
+ mark_source_rp_bad(ms);
+ return;
+ }
+
+ if (ram_save_queue_pages(ms, rbname, start, len)) {
+ mark_source_rp_bad(ms);
+ }
}
/*
diff --git a/migration/ram.c b/migration/ram.c
index 2e27b26..8302d09 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1016,6 +1016,91 @@ static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
}
/**
+ * flush_page_queue: Flush any remaining pages in the ram request queue
+ * it should be empty at the end anyway, but in error cases there may be
+ * some left.
+ *
+ * ms: MigrationState
+ */
+void flush_page_queue(MigrationState *ms)
+{
+ struct MigrationSrcPageRequest *mspr, *next_mspr;
+ /* This queue generally should be empty - but in the case of a failed
+ * migration might have some droppings in.
+ */
+ rcu_read_lock();
+ QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
+ memory_region_unref(mspr->rb->mr);
+ QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
+ g_free(mspr);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * Queue the pages for transmission, e.g. a request from postcopy destination
+ * ms: MigrationStatus in which the queue is held
+ * rbname: The RAMBlock the request is for - may be NULL (to mean reuse last)
+ * start: Offset from the start of the RAMBlock
+ * len: Length (in bytes) to send
+ * Return: 0 on success
+ */
+int ram_save_queue_pages(MigrationState *ms, const char *rbname,
+ ram_addr_t start, ram_addr_t len)
+{
+ RAMBlock *ramblock;
+
+ rcu_read_lock();
+ if (!rbname) {
+ /* Reuse last RAMBlock */
+ ramblock = ms->last_req_rb;
+
+ if (!ramblock) {
+ /*
+ * Shouldn't happen, we can't reuse the last RAMBlock if
+ * it's the 1st request.
+ */
+ error_report("ram_save_queue_pages no previous block");
+ goto err;
+ }
+ } else {
+ ramblock = qemu_ram_block_by_name(rbname);
+
+ if (!ramblock) {
+ /* We shouldn't be asked for a non-existent RAMBlock */
+ error_report("ram_save_queue_pages no block '%s'", rbname);
+ goto err;
+ }
+ ms->last_req_rb = ramblock;
+ }
+ trace_ram_save_queue_pages(ramblock->idstr, start, len);
+ if (start+len > ramblock->used_length) {
+ error_report("%s request overrun start=%zx len=%zx blocklen=%zx",
+ __func__, start, len, ramblock->used_length);
+ goto err;
+ }
+
+ struct MigrationSrcPageRequest *new_entry =
+ g_malloc0(sizeof(struct MigrationSrcPageRequest));
+ new_entry->rb = ramblock;
+ new_entry->offset = start;
+ new_entry->len = len;
+
+ memory_region_ref(ramblock->mr);
+ qemu_mutex_lock(&ms->src_page_req_mutex);
+ QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
+ qemu_mutex_unlock(&ms->src_page_req_mutex);
+ rcu_read_unlock();
+
+ return 0;
+
+err:
+ rcu_read_unlock();
+ return -1;
+}
+
+
+/**
* ram_find_and_save_block: Finds a dirty page and sends it to f
*
* Called within an RCU critical section.
OpenPOWER on IntegriCloud