summaryrefslogtreecommitdiffstats
path: root/util/bitops.c
diff options
context:
space:
mode:
authorPeter Lieven <pl@kamp.de>2013-03-26 10:58:34 +0100
committerJuan Quintela <quintela@redhat.com>2013-03-26 13:32:32 +0100
commit49f676a00ab540fac1d2008be26434cf85607722 (patch)
treecce8a4ec1cca6877846d177c9efc17fc26aee98d /util/bitops.c
parent56ded708ec38e4cb75a7c7357480ca34c0dc6875 (diff)
downloadhqemu-49f676a00ab540fac1d2008be26434cf85607722.zip
hqemu-49f676a00ab540fac1d2008be26434cf85607722.tar.gz
bitops: unroll while loop in find_next_bit()
this patch adopts the loop unrolling idea of bitmap_is_zero() to speed up the skipping of large areas with zeros in find_next_bit(). this routine is extensively used to find dirty pages in live migration. testing only the find_next_bit performance on a zeroed bitfield the loop onrolling decreased executing time by approx. 50% on x86_64. Signed-off-by: Peter Lieven <pl@kamp.de> Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'util/bitops.c')
-rw-r--r--util/bitops.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/util/bitops.c b/util/bitops.c
index e72237a..227c38b 100644
--- a/util/bitops.c
+++ b/util/bitops.c
@@ -42,7 +42,23 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
- while (size & ~(BITS_PER_LONG-1)) {
+ while (size >= 4*BITS_PER_LONG) {
+ unsigned long d1, d2, d3;
+ tmp = *p;
+ d1 = *(p+1);
+ d2 = *(p+2);
+ d3 = *(p+3);
+ if (tmp) {
+ goto found_middle;
+ }
+ if (d1 | d2 | d3) {
+ break;
+ }
+ p += 4;
+ result += 4*BITS_PER_LONG;
+ size -= 4*BITS_PER_LONG;
+ }
+ while (size >= BITS_PER_LONG) {
if ((tmp = *(p++))) {
goto found_middle;
}
OpenPOWER on IntegriCloud