summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2015-06-12 18:04:14 +0100
committerPeter Maydell <peter.maydell@linaro.org>2015-06-12 18:04:14 +0100
commit8aeaa055f5d3d4e87bf870892ba301eae57bdc1d (patch)
treedae0e0c12a82bae3828894b240ff15b6101b1404 /tests
parent0a2df857a7038c75379cc575de5d4be4c0ac629e (diff)
parent2db33f88d2b340c049c576ad75d442e4b6ffe768 (diff)
downloadhqemu-8aeaa055f5d3d4e87bf870892ba301eae57bdc1d.zip
hqemu-8aeaa055f5d3d4e87bf870892ba301eae57bdc1d.tar.gz
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
# gpg: Signature made Fri Jun 12 15:57:47 2015 BST using RSA key ID 81AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" * remotes/stefanha/tags/block-pull-request: qemu-iotests: expand test 093 to support group throttling throttle: Update throttle infrastructure copyright throttle: add the name of the ThrottleGroup to BlockDeviceInfo throttle: acquire the ThrottleGroup lock in bdrv_swap() throttle: Add throttle group support throttle: Add throttle group infrastructure tests throttle: Add throttle group infrastructure throttle: Extract timers from ThrottleState into a separate structure raw-posix: Fix .bdrv_co_get_block_status() for unaligned image size Revert "iothread: release iothread around aio_poll" Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'tests')
-rwxr-xr-xtests/qemu-iotests/09393
-rw-r--r--tests/test-aio.c19
-rw-r--r--tests/test-throttle.c163
3 files changed, 190 insertions, 85 deletions
diff --git a/tests/qemu-iotests/093 b/tests/qemu-iotests/093
index b9096a5..c0e9e2b 100755
--- a/tests/qemu-iotests/093
+++ b/tests/qemu-iotests/093
@@ -3,6 +3,7 @@
# Tests for IO throttling
#
# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2015 Igalia, S.L.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -22,6 +23,7 @@ import iotests
class ThrottleTestCase(iotests.QMPTestCase):
test_img = "null-aio://"
+ max_drives = 3
def blockstats(self, device):
result = self.vm.qmp("query-blockstats")
@@ -32,26 +34,31 @@ class ThrottleTestCase(iotests.QMPTestCase):
raise Exception("Device not found for blockstats: %s" % device)
def setUp(self):
- self.vm = iotests.VM().add_drive(self.test_img)
+ self.vm = iotests.VM()
+ for i in range(0, self.max_drives):
+ self.vm.add_drive(self.test_img)
self.vm.launch()
def tearDown(self):
self.vm.shutdown()
- def do_test_throttle(self, seconds, params):
+ def do_test_throttle(self, ndrives, seconds, params):
def check_limit(limit, num):
# IO throttling algorithm is discrete, allow 10% error so the test
# is more robust
return limit == 0 or \
- (num < seconds * limit * 1.1
- and num > seconds * limit * 0.9)
+ (num < seconds * limit * 1.1 / ndrives
+ and num > seconds * limit * 0.9 / ndrives)
nsec_per_sec = 1000000000
- params['device'] = 'drive0'
+ params['group'] = 'test'
- result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
- self.assert_qmp(result, 'return', {})
+ # Set the I/O throttling parameters to all drives
+ for i in range(0, ndrives):
+ params['device'] = 'drive%d' % i
+ result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params)
+ self.assert_qmp(result, 'return', {})
# Set vm clock to a known value
ns = seconds * nsec_per_sec
@@ -66,32 +73,60 @@ class ThrottleTestCase(iotests.QMPTestCase):
params['iops'] / 2,
params['iops_rd'])
rd_nr *= seconds * 2
+ rd_nr /= ndrives
wr_nr = max(params['bps'] / rq_size / 2,
params['bps_wr'] / rq_size,
params['iops'] / 2,
params['iops_wr'])
wr_nr *= seconds * 2
+ wr_nr /= ndrives
+
+ # Send I/O requests to all drives
for i in range(rd_nr):
- self.vm.hmp_qemu_io("drive0", "aio_read %d %d" % (i * rq_size, rq_size))
- for i in range(wr_nr):
- self.vm.hmp_qemu_io("drive0", "aio_write %d %d" % (i * rq_size, rq_size))
+ for drive in range(0, ndrives):
+ self.vm.hmp_qemu_io("drive%d" % drive, "aio_read %d %d" %
+ (i * rq_size, rq_size))
- start_rd_bytes, start_rd_iops, start_wr_bytes, start_wr_iops = self.blockstats('drive0')
+ for i in range(wr_nr):
+ for drive in range(0, ndrives):
+ self.vm.hmp_qemu_io("drive%d" % drive, "aio_write %d %d" %
+ (i * rq_size, rq_size))
+
+ # We'll store the I/O stats for each drive in these arrays
+ start_rd_bytes = [0] * ndrives
+ start_rd_iops = [0] * ndrives
+ start_wr_bytes = [0] * ndrives
+ start_wr_iops = [0] * ndrives
+ end_rd_bytes = [0] * ndrives
+ end_rd_iops = [0] * ndrives
+ end_wr_bytes = [0] * ndrives
+ end_wr_iops = [0] * ndrives
+
+ # Read the stats before advancing the clock
+ for i in range(0, ndrives):
+ start_rd_bytes[i], start_rd_iops[i], start_wr_bytes[i], \
+ start_wr_iops[i] = self.blockstats('drive%d' % i)
self.vm.qtest("clock_step %d" % ns)
- end_rd_bytes, end_rd_iops, end_wr_bytes, end_wr_iops = self.blockstats('drive0')
-
- rd_bytes = end_rd_bytes - start_rd_bytes
- rd_iops = end_rd_iops - start_rd_iops
- wr_bytes = end_wr_bytes - start_wr_bytes
- wr_iops = end_wr_iops - start_wr_iops
- self.assertTrue(check_limit(params['bps'], rd_bytes + wr_bytes))
- self.assertTrue(check_limit(params['bps_rd'], rd_bytes))
- self.assertTrue(check_limit(params['bps_wr'], wr_bytes))
- self.assertTrue(check_limit(params['iops'], rd_iops + wr_iops))
- self.assertTrue(check_limit(params['iops_rd'], rd_iops))
- self.assertTrue(check_limit(params['iops_wr'], wr_iops))
+ # Read the stats after advancing the clock
+ for i in range(0, ndrives):
+ end_rd_bytes[i], end_rd_iops[i], end_wr_bytes[i], \
+ end_wr_iops[i] = self.blockstats('drive%d' % i)
+
+ # Check that the I/O is within the limits and evenly distributed
+ for i in range(0, ndrives):
+ rd_bytes = end_rd_bytes[i] - start_rd_bytes[i]
+ rd_iops = end_rd_iops[i] - start_rd_iops[i]
+ wr_bytes = end_wr_bytes[i] - start_wr_bytes[i]
+ wr_iops = end_wr_iops[i] - start_wr_iops[i]
+
+ self.assertTrue(check_limit(params['bps'], rd_bytes + wr_bytes))
+ self.assertTrue(check_limit(params['bps_rd'], rd_bytes))
+ self.assertTrue(check_limit(params['bps_wr'], wr_bytes))
+ self.assertTrue(check_limit(params['iops'], rd_iops + wr_iops))
+ self.assertTrue(check_limit(params['iops_rd'], rd_iops))
+ self.assertTrue(check_limit(params['iops_wr'], wr_iops))
def test_all(self):
params = {"bps": 4096,
@@ -101,11 +136,13 @@ class ThrottleTestCase(iotests.QMPTestCase):
"iops_rd": 10,
"iops_wr": 10,
}
- # Pick each out of all possible params and test
- for tk in params:
- limits = dict([(k, 0) for k in params])
- limits[tk] = params[tk]
- self.do_test_throttle(5, limits)
+ # Repeat the test with different numbers of drives
+ for ndrives in range(1, self.max_drives + 1):
+ # Pick each out of all possible params and test
+ for tk in params:
+ limits = dict([(k, 0) for k in params])
+ limits[tk] = params[tk] * ndrives
+ self.do_test_throttle(ndrives, 5, limits)
class ThrottleTestCoroutine(ThrottleTestCase):
test_img = "null-co://"
diff --git a/tests/test-aio.c b/tests/test-aio.c
index 4b0cb45..a7cb5c9 100644
--- a/tests/test-aio.c
+++ b/tests/test-aio.c
@@ -107,7 +107,6 @@ static void test_notify(void)
typedef struct {
QemuMutex start_lock;
- EventNotifier notifier;
bool thread_acquired;
} AcquireTestData;
@@ -119,8 +118,6 @@ static void *test_acquire_thread(void *opaque)
qemu_mutex_lock(&data->start_lock);
qemu_mutex_unlock(&data->start_lock);
- g_usleep(500000);
- event_notifier_set(&data->notifier);
aio_context_acquire(ctx);
aio_context_release(ctx);
@@ -129,19 +126,20 @@ static void *test_acquire_thread(void *opaque)
return NULL;
}
-static void dummy_notifier_read(EventNotifier *n)
+static void dummy_notifier_read(EventNotifier *unused)
{
- event_notifier_test_and_clear(n);
+ g_assert(false); /* should never be invoked */
}
static void test_acquire(void)
{
QemuThread thread;
+ EventNotifier notifier;
AcquireTestData data;
/* Dummy event notifier ensures aio_poll() will block */
- event_notifier_init(&data.notifier, false);
- aio_set_event_notifier(ctx, &data.notifier, dummy_notifier_read);
+ event_notifier_init(&notifier, false);
+ aio_set_event_notifier(ctx, &notifier, dummy_notifier_read);
g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */
qemu_mutex_init(&data.start_lock);
@@ -155,13 +153,12 @@ static void test_acquire(void)
/* Block in aio_poll(), let other thread kick us and acquire context */
aio_context_acquire(ctx);
qemu_mutex_unlock(&data.start_lock); /* let the thread run */
- g_assert(aio_poll(ctx, true));
- g_assert(!data.thread_acquired);
+ g_assert(!aio_poll(ctx, true));
aio_context_release(ctx);
qemu_thread_join(&thread);
- aio_set_event_notifier(ctx, &data.notifier, NULL);
- event_notifier_cleanup(&data.notifier);
+ aio_set_event_notifier(ctx, &notifier, NULL);
+ event_notifier_cleanup(&notifier);
g_assert(data.thread_acquired);
}
diff --git a/tests/test-throttle.c b/tests/test-throttle.c
index d8ba415..0168445 100644
--- a/tests/test-throttle.c
+++ b/tests/test-throttle.c
@@ -1,10 +1,12 @@
/*
* Throttle infrastructure tests
*
- * Copyright Nodalink, SARL. 2013
+ * Copyright Nodalink, EURL. 2013-2014
+ * Copyright Igalia, S.L. 2015
*
* Authors:
- * Benoît Canet <benoit.canet@irqsave.net>
+ * Benoît Canet <benoit.canet@nodalink.com>
+ * Alberto Garcia <berto@igalia.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2 or later.
* See the COPYING.LIB file in the top-level directory.
@@ -15,11 +17,13 @@
#include "block/aio.h"
#include "qemu/throttle.h"
#include "qemu/error-report.h"
+#include "block/throttle-groups.h"
static AioContext *ctx;
static LeakyBucket bkt;
static ThrottleConfig cfg;
static ThrottleState ts;
+static ThrottleTimers tt;
/* useful function */
static bool double_cmp(double x, double y)
@@ -103,17 +107,19 @@ static void test_init(void)
{
int i;
- /* fill the structure with crap */
+ /* fill the structures with crap */
memset(&ts, 1, sizeof(ts));
+ memset(&tt, 1, sizeof(tt));
- /* init the structure */
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
+ /* init structures */
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
/* check initialized fields */
- g_assert(ts.clock_type == QEMU_CLOCK_VIRTUAL);
- g_assert(ts.timers[0]);
- g_assert(ts.timers[1]);
+ g_assert(tt.clock_type == QEMU_CLOCK_VIRTUAL);
+ g_assert(tt.timers[0]);
+ g_assert(tt.timers[1]);
/* check other fields where cleared */
g_assert(!ts.previous_leak);
@@ -124,17 +130,18 @@ static void test_init(void)
g_assert(!ts.cfg.buckets[i].level);
}
- throttle_destroy(&ts);
+ throttle_timers_destroy(&tt);
}
static void test_destroy(void)
{
int i;
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
- throttle_destroy(&ts);
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
+ throttle_timers_destroy(&tt);
for (i = 0; i < 2; i++) {
- g_assert(!ts.timers[i]);
+ g_assert(!tt.timers[i]);
}
}
@@ -170,11 +177,12 @@ static void test_config_functions(void)
orig_cfg.op_size = 1;
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
/* structure reset by throttle_init previous_leak should be null */
g_assert(!ts.previous_leak);
- throttle_config(&ts, &orig_cfg);
+ throttle_config(&ts, &tt, &orig_cfg);
/* has previous leak been initialized by throttle_config ? */
g_assert(ts.previous_leak);
@@ -182,7 +190,7 @@ static void test_config_functions(void)
/* get back the fixed configuration */
throttle_get_config(&ts, &final_cfg);
- throttle_destroy(&ts);
+ throttle_timers_destroy(&tt);
g_assert(final_cfg.buckets[THROTTLE_BPS_TOTAL].avg == 153);
g_assert(final_cfg.buckets[THROTTLE_BPS_READ].avg == 56);
@@ -323,43 +331,47 @@ static void test_is_valid(void)
static void test_have_timer(void)
{
- /* zero the structure */
+ /* zero structures */
memset(&ts, 0, sizeof(ts));
+ memset(&tt, 0, sizeof(tt));
/* no timer set should return false */
- g_assert(!throttle_have_timer(&ts));
+ g_assert(!throttle_timers_are_initialized(&tt));
- /* init the structure */
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
+ /* init structures */
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
/* timer set by init should return true */
- g_assert(throttle_have_timer(&ts));
+ g_assert(throttle_timers_are_initialized(&tt));
- throttle_destroy(&ts);
+ throttle_timers_destroy(&tt);
}
static void test_detach_attach(void)
{
- /* zero the structure */
+ /* zero structures */
memset(&ts, 0, sizeof(ts));
+ memset(&tt, 0, sizeof(tt));
/* init the structure */
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
/* timer set by init should return true */
- g_assert(throttle_have_timer(&ts));
+ g_assert(throttle_timers_are_initialized(&tt));
/* timer should no longer exist after detaching */
- throttle_detach_aio_context(&ts);
- g_assert(!throttle_have_timer(&ts));
+ throttle_timers_detach_aio_context(&tt);
+ g_assert(!throttle_timers_are_initialized(&tt));
/* timer should exist again after attaching */
- throttle_attach_aio_context(&ts, ctx);
- g_assert(throttle_have_timer(&ts));
+ throttle_timers_attach_aio_context(&tt, ctx);
+ g_assert(throttle_timers_are_initialized(&tt));
- throttle_destroy(&ts);
+ throttle_timers_destroy(&tt);
}
static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
@@ -387,9 +399,10 @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
cfg.op_size = op_size;
- throttle_init(&ts, ctx, QEMU_CLOCK_VIRTUAL,
- read_timer_cb, write_timer_cb, &ts);
- throttle_config(&ts, &cfg);
+ throttle_init(&ts);
+ throttle_timers_init(&tt, ctx, QEMU_CLOCK_VIRTUAL,
+ read_timer_cb, write_timer_cb, &ts);
+ throttle_config(&ts, &tt, &cfg);
/* account a read */
throttle_account(&ts, false, size);
@@ -414,7 +427,7 @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
return false;
}
- throttle_destroy(&ts);
+ throttle_timers_destroy(&tt);
return true;
}
@@ -490,23 +503,80 @@ static void test_accounting(void)
(64.0 / 13)));
}
+static void test_groups(void)
+{
+ ThrottleConfig cfg1, cfg2;
+ BlockDriverState *bdrv1, *bdrv2, *bdrv3;
+
+ bdrv1 = bdrv_new();
+ bdrv2 = bdrv_new();
+ bdrv3 = bdrv_new();
+
+ g_assert(bdrv1->throttle_state == NULL);
+ g_assert(bdrv2->throttle_state == NULL);
+ g_assert(bdrv3->throttle_state == NULL);
+
+ throttle_group_register_bs(bdrv1, "bar");
+ throttle_group_register_bs(bdrv2, "foo");
+ throttle_group_register_bs(bdrv3, "bar");
+
+ g_assert(bdrv1->throttle_state != NULL);
+ g_assert(bdrv2->throttle_state != NULL);
+ g_assert(bdrv3->throttle_state != NULL);
+
+ g_assert(!strcmp(throttle_group_get_name(bdrv1), "bar"));
+ g_assert(!strcmp(throttle_group_get_name(bdrv2), "foo"));
+ g_assert(bdrv1->throttle_state == bdrv3->throttle_state);
+
+ /* Setting the config of a group member affects the whole group */
+ memset(&cfg1, 0, sizeof(cfg1));
+ cfg1.buckets[THROTTLE_BPS_READ].avg = 500000;
+ cfg1.buckets[THROTTLE_BPS_WRITE].avg = 285000;
+ cfg1.buckets[THROTTLE_OPS_READ].avg = 20000;
+ cfg1.buckets[THROTTLE_OPS_WRITE].avg = 12000;
+ throttle_group_config(bdrv1, &cfg1);
+
+ throttle_group_get_config(bdrv1, &cfg1);
+ throttle_group_get_config(bdrv3, &cfg2);
+ g_assert(!memcmp(&cfg1, &cfg2, sizeof(cfg1)));
+
+ cfg2.buckets[THROTTLE_BPS_READ].avg = 4547;
+ cfg2.buckets[THROTTLE_BPS_WRITE].avg = 1349;
+ cfg2.buckets[THROTTLE_OPS_READ].avg = 123;
+ cfg2.buckets[THROTTLE_OPS_WRITE].avg = 86;
+ throttle_group_config(bdrv3, &cfg1);
+
+ throttle_group_get_config(bdrv1, &cfg1);
+ throttle_group_get_config(bdrv3, &cfg2);
+ g_assert(!memcmp(&cfg1, &cfg2, sizeof(cfg1)));
+
+ throttle_group_unregister_bs(bdrv1);
+ throttle_group_unregister_bs(bdrv2);
+ throttle_group_unregister_bs(bdrv3);
+
+ g_assert(bdrv1->throttle_state == NULL);
+ g_assert(bdrv2->throttle_state == NULL);
+ g_assert(bdrv3->throttle_state == NULL);
+}
+
int main(int argc, char **argv)
{
- GSource *src;
Error *local_error = NULL;
- init_clocks();
+ qemu_init_main_loop(&local_error);
+ ctx = qemu_get_aio_context();
- ctx = aio_context_new(&local_error);
if (!ctx) {
error_report("Failed to create AIO Context: '%s'",
- error_get_pretty(local_error));
- error_free(local_error);
+ local_error ? error_get_pretty(local_error) :
+ "Failed to initialize the QEMU main loop");
+ if (local_error) {
+ error_free(local_error);
+ }
exit(1);
}
- src = aio_get_g_source(ctx);
- g_source_attach(src, NULL);
- g_source_unref(src);
+
+ bdrv_init();
do {} while (g_main_context_iteration(NULL, false));
@@ -523,6 +593,7 @@ int main(int argc, char **argv)
g_test_add_func("/throttle/config/is_valid", test_is_valid);
g_test_add_func("/throttle/config_functions", test_config_functions);
g_test_add_func("/throttle/accounting", test_accounting);
+ g_test_add_func("/throttle/groups", test_groups);
return g_test_run();
}
OpenPOWER on IntegriCloud