summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bochs.c109
-rw-r--r--block/cloop.c81
-rw-r--r--block/curl.c5
-rw-r--r--block/dmg.c275
-rw-r--r--block/parallels.c14
-rw-r--r--block/qcow2-cluster.c11
-rw-r--r--block/qcow2-refcount.c111
-rw-r--r--block/qcow2-snapshot.c50
-rw-r--r--block/qcow2.c145
-rw-r--r--block/qcow2.h52
-rw-r--r--block/vdi.c37
-rw-r--r--block/vhdx.c12
-rw-r--r--block/vpc.c32
-rw-r--r--block/vvfat.c2
14 files changed, 640 insertions, 296 deletions
diff --git a/block/bochs.c b/block/bochs.c
index 4d6403f..826ec12 100644
--- a/block/bochs.c
+++ b/block/bochs.c
@@ -39,56 +39,41 @@
// not allocated: 0xffffffff
// always little-endian
-struct bochs_header_v1 {
- char magic[32]; // "Bochs Virtual HD Image"
- char type[16]; // "Redolog"
- char subtype[16]; // "Undoable" / "Volatile" / "Growing"
- uint32_t version;
- uint32_t header; // size of header
-
- union {
- struct {
- uint32_t catalog; // num of entries
- uint32_t bitmap; // bitmap size
- uint32_t extent; // extent size
- uint64_t disk; // disk size
- char padding[HEADER_SIZE - 64 - 8 - 20];
- } redolog;
- char padding[HEADER_SIZE - 64 - 8];
- } extra;
-};
-
-// always little-endian
struct bochs_header {
- char magic[32]; // "Bochs Virtual HD Image"
- char type[16]; // "Redolog"
- char subtype[16]; // "Undoable" / "Volatile" / "Growing"
+ char magic[32]; /* "Bochs Virtual HD Image" */
+ char type[16]; /* "Redolog" */
+ char subtype[16]; /* "Undoable" / "Volatile" / "Growing" */
uint32_t version;
- uint32_t header; // size of header
+ uint32_t header; /* size of header */
+
+ uint32_t catalog; /* num of entries */
+ uint32_t bitmap; /* bitmap size */
+ uint32_t extent; /* extent size */
union {
- struct {
- uint32_t catalog; // num of entries
- uint32_t bitmap; // bitmap size
- uint32_t extent; // extent size
- uint32_t reserved; // for ???
- uint64_t disk; // disk size
- char padding[HEADER_SIZE - 64 - 8 - 24];
- } redolog;
- char padding[HEADER_SIZE - 64 - 8];
+ struct {
+ uint32_t reserved; /* for ??? */
+ uint64_t disk; /* disk size */
+ char padding[HEADER_SIZE - 64 - 20 - 12];
+ } QEMU_PACKED redolog;
+ struct {
+ uint64_t disk; /* disk size */
+ char padding[HEADER_SIZE - 64 - 20 - 8];
+ } QEMU_PACKED redolog_v1;
+ char padding[HEADER_SIZE - 64 - 20];
} extra;
-};
+} QEMU_PACKED;
typedef struct BDRVBochsState {
CoMutex lock;
uint32_t *catalog_bitmap;
- int catalog_size;
+ uint32_t catalog_size;
- int data_offset;
+ uint32_t data_offset;
- int bitmap_blocks;
- int extent_blocks;
- int extent_size;
+ uint32_t bitmap_blocks;
+ uint32_t extent_blocks;
+ uint32_t extent_size;
} BDRVBochsState;
static int bochs_probe(const uint8_t *buf, int buf_size, const char *filename)
@@ -112,9 +97,8 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVBochsState *s = bs->opaque;
- int i;
+ uint32_t i;
struct bochs_header bochs;
- struct bochs_header_v1 header_v1;
int ret;
bs->read_only = 1; // no write support yet
@@ -134,13 +118,19 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
}
if (le32_to_cpu(bochs.version) == HEADER_V1) {
- memcpy(&header_v1, &bochs, sizeof(bochs));
- bs->total_sectors = le64_to_cpu(header_v1.extra.redolog.disk) / 512;
+ bs->total_sectors = le64_to_cpu(bochs.extra.redolog_v1.disk) / 512;
} else {
- bs->total_sectors = le64_to_cpu(bochs.extra.redolog.disk) / 512;
+ bs->total_sectors = le64_to_cpu(bochs.extra.redolog.disk) / 512;
+ }
+
+ /* Limit to 1M entries to avoid unbounded allocation. This is what is
+ * needed for the largest image that bximage can create (~8 TB). */
+ s->catalog_size = le32_to_cpu(bochs.catalog);
+ if (s->catalog_size > 0x100000) {
+ error_setg(errp, "Catalog size is too large");
+ return -EFBIG;
}
- s->catalog_size = le32_to_cpu(bochs.extra.redolog.catalog);
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
ret = bdrv_pread(bs->file, le32_to_cpu(bochs.header), s->catalog_bitmap,
@@ -154,10 +144,24 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
s->data_offset = le32_to_cpu(bochs.header) + (s->catalog_size * 4);
- s->bitmap_blocks = 1 + (le32_to_cpu(bochs.extra.redolog.bitmap) - 1) / 512;
- s->extent_blocks = 1 + (le32_to_cpu(bochs.extra.redolog.extent) - 1) / 512;
+ s->bitmap_blocks = 1 + (le32_to_cpu(bochs.bitmap) - 1) / 512;
+ s->extent_blocks = 1 + (le32_to_cpu(bochs.extent) - 1) / 512;
+
+ s->extent_size = le32_to_cpu(bochs.extent);
+ if (s->extent_size == 0) {
+ error_setg(errp, "Extent size may not be zero");
+ return -EINVAL;
+ } else if (s->extent_size > 0x800000) {
+ error_setg(errp, "Extent size %" PRIu32 " is too large",
+ s->extent_size);
+ return -EINVAL;
+ }
- s->extent_size = le32_to_cpu(bochs.extra.redolog.extent);
+ if (s->catalog_size < bs->total_sectors / s->extent_size) {
+ error_setg(errp, "Catalog size is too small for this disk size");
+ ret = -EINVAL;
+ goto fail;
+ }
qemu_co_mutex_init(&s->lock);
return 0;
@@ -170,8 +174,8 @@ fail:
static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
{
BDRVBochsState *s = bs->opaque;
- int64_t offset = sector_num * 512;
- int64_t extent_index, extent_offset, bitmap_offset;
+ uint64_t offset = sector_num * 512;
+ uint64_t extent_index, extent_offset, bitmap_offset;
char bitmap_entry;
// seek to sector
@@ -182,8 +186,9 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
return -1; /* not allocated */
}
- bitmap_offset = s->data_offset + (512 * s->catalog_bitmap[extent_index] *
- (s->extent_blocks + s->bitmap_blocks));
+ bitmap_offset = s->data_offset +
+ (512 * (uint64_t) s->catalog_bitmap[extent_index] *
+ (s->extent_blocks + s->bitmap_blocks));
/* read in bitmap for current extent */
if (bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8),
diff --git a/block/cloop.c b/block/cloop.c
index b907023..b6ad50f 100644
--- a/block/cloop.c
+++ b/block/cloop.c
@@ -26,6 +26,9 @@
#include "qemu/module.h"
#include <zlib.h>
+/* Maximum compressed block size */
+#define MAX_BLOCK_SIZE (64 * 1024 * 1024)
+
typedef struct BDRVCloopState {
CoMutex lock;
uint32_t block_size;
@@ -68,6 +71,26 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
return ret;
}
s->block_size = be32_to_cpu(s->block_size);
+ if (s->block_size % 512) {
+ error_setg(errp, "block_size %u must be a multiple of 512",
+ s->block_size);
+ return -EINVAL;
+ }
+ if (s->block_size == 0) {
+ error_setg(errp, "block_size cannot be zero");
+ return -EINVAL;
+ }
+
+ /* cloop's create_compressed_fs.c warns about block sizes beyond 256 KB but
+ * we can accept more. Prevent ridiculous values like 4 GB - 1 since we
+ * need a buffer this big.
+ */
+ if (s->block_size > MAX_BLOCK_SIZE) {
+ error_setg(errp, "block_size %u must be %u MB or less",
+ s->block_size,
+ MAX_BLOCK_SIZE / (1024 * 1024));
+ return -EINVAL;
+ }
ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4);
if (ret < 0) {
@@ -76,7 +99,23 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
s->n_blocks = be32_to_cpu(s->n_blocks);
/* read offsets */
- offsets_size = s->n_blocks * sizeof(uint64_t);
+ if (s->n_blocks > (UINT32_MAX - 1) / sizeof(uint64_t)) {
+ /* Prevent integer overflow */
+ error_setg(errp, "n_blocks %u must be %zu or less",
+ s->n_blocks,
+ (UINT32_MAX - 1) / sizeof(uint64_t));
+ return -EINVAL;
+ }
+ offsets_size = (s->n_blocks + 1) * sizeof(uint64_t);
+ if (offsets_size > 512 * 1024 * 1024) {
+ /* Prevent ridiculous offsets_size which causes memory allocation to
+ * fail or overflows bdrv_pread() size. In practice the 512 MB
+ * offsets[] limit supports 16 TB images at 256 KB block size.
+ */
+ error_setg(errp, "image requires too many offsets, "
+ "try increasing block size");
+ return -EINVAL;
+ }
s->offsets = g_malloc(offsets_size);
ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
@@ -84,13 +123,37 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
- for(i=0;i<s->n_blocks;i++) {
+ for (i = 0; i < s->n_blocks + 1; i++) {
+ uint64_t size;
+
s->offsets[i] = be64_to_cpu(s->offsets[i]);
- if (i > 0) {
- uint32_t size = s->offsets[i] - s->offsets[i - 1];
- if (size > max_compressed_block_size) {
- max_compressed_block_size = size;
- }
+ if (i == 0) {
+ continue;
+ }
+
+ if (s->offsets[i] < s->offsets[i - 1]) {
+ error_setg(errp, "offsets not monotonically increasing at "
+ "index %u, image file is corrupt", i);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ size = s->offsets[i] - s->offsets[i - 1];
+
+ /* Compressed blocks should be smaller than the uncompressed block size
+ * but maybe compression performed poorly so the compressed block is
+ * actually bigger. Clamp down on unrealistic values to prevent
+ * ridiculous s->compressed_block allocation.
+ */
+ if (size > 2 * MAX_BLOCK_SIZE) {
+ error_setg(errp, "invalid compressed block size at index %u, "
+ "image file is corrupt", i);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (size > max_compressed_block_size) {
+ max_compressed_block_size = size;
}
}
@@ -180,9 +243,7 @@ static coroutine_fn int cloop_co_read(BlockDriverState *bs, int64_t sector_num,
static void cloop_close(BlockDriverState *bs)
{
BDRVCloopState *s = bs->opaque;
- if (s->n_blocks > 0) {
- g_free(s->offsets);
- }
+ g_free(s->offsets);
g_free(s->compressed_block);
g_free(s->uncompressed_block);
inflateEnd(&s->zstream);
diff --git a/block/curl.c b/block/curl.c
index 3494c6d..1b9b1f6 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -157,6 +157,11 @@ static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
if (!s || !s->orig_buf)
goto read_end;
+ if (s->buf_off >= s->buf_len) {
+ /* buffer full, read nothing */
+ return 0;
+ }
+ realsize = MIN(realsize, s->buf_len - s->buf_off);
memcpy(s->orig_buf + s->buf_off, ptr, realsize);
s->buf_off += realsize;
diff --git a/block/dmg.c b/block/dmg.c
index d5e9b1f..856402e 100644
--- a/block/dmg.c
+++ b/block/dmg.c
@@ -27,6 +27,14 @@
#include "qemu/module.h"
#include <zlib.h>
+enum {
+ /* Limit chunk sizes to prevent unreasonable amounts of memory being used
+ * or truncating when converting to 32-bit types
+ */
+ DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
+ DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
+};
+
typedef struct BDRVDMGState {
CoMutex lock;
/* each chunk contains a certain number of sectors,
@@ -92,13 +100,44 @@ static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
return 0;
}
+/* Increase max chunk sizes, if necessary. This function is used to calculate
+ * the buffer sizes needed for compressed/uncompressed chunk I/O.
+ */
+static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
+ uint32_t *max_compressed_size,
+ uint32_t *max_sectors_per_chunk)
+{
+ uint32_t compressed_size = 0;
+ uint32_t uncompressed_sectors = 0;
+
+ switch (s->types[chunk]) {
+ case 0x80000005: /* zlib compressed */
+ compressed_size = s->lengths[chunk];
+ uncompressed_sectors = s->sectorcounts[chunk];
+ break;
+ case 1: /* copy */
+ uncompressed_sectors = (s->lengths[chunk] + 511) / 512;
+ break;
+ case 2: /* zero */
+ uncompressed_sectors = s->sectorcounts[chunk];
+ break;
+ }
+
+ if (compressed_size > *max_compressed_size) {
+ *max_compressed_size = compressed_size;
+ }
+ if (uncompressed_sectors > *max_sectors_per_chunk) {
+ *max_sectors_per_chunk = uncompressed_sectors;
+ }
+}
+
static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVDMGState *s = bs->opaque;
- uint64_t info_begin,info_end,last_in_offset,last_out_offset;
+ uint64_t info_begin, info_end, last_in_offset, last_out_offset;
uint32_t count, tmp;
- uint32_t max_compressed_size=1,max_sectors_per_chunk=1,i;
+ uint32_t max_compressed_size = 1, max_sectors_per_chunk = 1, i;
int64_t offset;
int ret;
@@ -160,37 +199,40 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
- if (type == 0x6d697368 && count >= 244) {
- int new_size, chunk_count;
+ if (type == 0x6d697368 && count >= 244) {
+ size_t new_size;
+ uint32_t chunk_count;
offset += 4;
offset += 200;
- chunk_count = (count-204)/40;
- new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
- s->types = g_realloc(s->types, new_size/2);
- s->offsets = g_realloc(s->offsets, new_size);
- s->lengths = g_realloc(s->lengths, new_size);
- s->sectors = g_realloc(s->sectors, new_size);
- s->sectorcounts = g_realloc(s->sectorcounts, new_size);
+ chunk_count = (count - 204) / 40;
+ new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
+ s->types = g_realloc(s->types, new_size / 2);
+ s->offsets = g_realloc(s->offsets, new_size);
+ s->lengths = g_realloc(s->lengths, new_size);
+ s->sectors = g_realloc(s->sectors, new_size);
+ s->sectorcounts = g_realloc(s->sectorcounts, new_size);
for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
ret = read_uint32(bs, offset, &s->types[i]);
if (ret < 0) {
goto fail;
}
- offset += 4;
- if(s->types[i]!=0x80000005 && s->types[i]!=1 && s->types[i]!=2) {
- if(s->types[i]==0xffffffff) {
- last_in_offset = s->offsets[i-1]+s->lengths[i-1];
- last_out_offset = s->sectors[i-1]+s->sectorcounts[i-1];
- }
- chunk_count--;
- i--;
- offset += 36;
- continue;
- }
- offset += 4;
+ offset += 4;
+ if (s->types[i] != 0x80000005 && s->types[i] != 1 &&
+ s->types[i] != 2) {
+ if (s->types[i] == 0xffffffff && i > 0) {
+ last_in_offset = s->offsets[i - 1] + s->lengths[i - 1];
+ last_out_offset = s->sectors[i - 1] +
+ s->sectorcounts[i - 1];
+ }
+ chunk_count--;
+ i--;
+ offset += 36;
+ continue;
+ }
+ offset += 4;
ret = read_uint64(bs, offset, &s->sectors[i]);
if (ret < 0) {
@@ -205,6 +247,14 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
}
offset += 8;
+ if (s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
+ error_report("sector count %" PRIu64 " for chunk %u is "
+ "larger than max (%u)",
+ s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
+ ret = -EINVAL;
+ goto fail;
+ }
+
ret = read_uint64(bs, offset, &s->offsets[i]);
if (ret < 0) {
goto fail;
@@ -218,19 +268,25 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
}
offset += 8;
- if(s->lengths[i]>max_compressed_size)
- max_compressed_size = s->lengths[i];
- if(s->sectorcounts[i]>max_sectors_per_chunk)
- max_sectors_per_chunk = s->sectorcounts[i];
- }
- s->n_chunks+=chunk_count;
- }
+ if (s->lengths[i] > DMG_LENGTHS_MAX) {
+ error_report("length %" PRIu64 " for chunk %u is larger "
+ "than max (%u)",
+ s->lengths[i], i, DMG_LENGTHS_MAX);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ update_max_chunk_size(s, i, &max_compressed_size,
+ &max_sectors_per_chunk);
+ }
+ s->n_chunks += chunk_count;
+ }
}
/* initialize zlib engine */
- s->compressed_chunk = g_malloc(max_compressed_size+1);
- s->uncompressed_chunk = g_malloc(512*max_sectors_per_chunk);
- if(inflateInit(&s->zstream) != Z_OK) {
+ s->compressed_chunk = g_malloc(max_compressed_size + 1);
+ s->uncompressed_chunk = g_malloc(512 * max_sectors_per_chunk);
+ if (inflateInit(&s->zstream) != Z_OK) {
ret = -EINVAL;
goto fail;
}
@@ -252,83 +308,82 @@ fail:
}
static inline int is_sector_in_chunk(BDRVDMGState* s,
- uint32_t chunk_num,int sector_num)
+ uint32_t chunk_num, uint64_t sector_num)
{
- if(chunk_num>=s->n_chunks || s->sectors[chunk_num]>sector_num ||
- s->sectors[chunk_num]+s->sectorcounts[chunk_num]<=sector_num)
- return 0;
- else
- return -1;
+ if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
+ s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
+ return 0;
+ } else {
+ return -1;
+ }
}
-static inline uint32_t search_chunk(BDRVDMGState* s,int sector_num)
+static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
{
/* binary search */
- uint32_t chunk1=0,chunk2=s->n_chunks,chunk3;
- while(chunk1!=chunk2) {
- chunk3 = (chunk1+chunk2)/2;
- if(s->sectors[chunk3]>sector_num)
- chunk2 = chunk3;
- else if(s->sectors[chunk3]+s->sectorcounts[chunk3]>sector_num)
- return chunk3;
- else
- chunk1 = chunk3;
+ uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
+ while (chunk1 != chunk2) {
+ chunk3 = (chunk1 + chunk2) / 2;
+ if (s->sectors[chunk3] > sector_num) {
+ chunk2 = chunk3;
+ } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
+ return chunk3;
+ } else {
+ chunk1 = chunk3;
+ }
}
return s->n_chunks; /* error */
}
-static inline int dmg_read_chunk(BlockDriverState *bs, int sector_num)
+static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
{
BDRVDMGState *s = bs->opaque;
- if(!is_sector_in_chunk(s,s->current_chunk,sector_num)) {
- int ret;
- uint32_t chunk = search_chunk(s,sector_num);
-
- if(chunk>=s->n_chunks)
- return -1;
-
- s->current_chunk = s->n_chunks;
- switch(s->types[chunk]) {
- case 0x80000005: { /* zlib compressed */
- int i;
-
- /* we need to buffer, because only the chunk as whole can be
- * inflated. */
- i=0;
- do {
- ret = bdrv_pread(bs->file, s->offsets[chunk] + i,
- s->compressed_chunk+i, s->lengths[chunk]-i);
- if(ret<0 && errno==EINTR)
- ret=0;
- i+=ret;
- } while(ret>=0 && ret+i<s->lengths[chunk]);
-
- if (ret != s->lengths[chunk])
- return -1;
-
- s->zstream.next_in = s->compressed_chunk;
- s->zstream.avail_in = s->lengths[chunk];
- s->zstream.next_out = s->uncompressed_chunk;
- s->zstream.avail_out = 512*s->sectorcounts[chunk];
- ret = inflateReset(&s->zstream);
- if(ret != Z_OK)
- return -1;
- ret = inflate(&s->zstream, Z_FINISH);
- if(ret != Z_STREAM_END || s->zstream.total_out != 512*s->sectorcounts[chunk])
- return -1;
- break; }
- case 1: /* copy */
- ret = bdrv_pread(bs->file, s->offsets[chunk],
+ if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
+ int ret;
+ uint32_t chunk = search_chunk(s, sector_num);
+
+ if (chunk >= s->n_chunks) {
+ return -1;
+ }
+
+ s->current_chunk = s->n_chunks;
+ switch (s->types[chunk]) {
+ case 0x80000005: { /* zlib compressed */
+ /* we need to buffer, because only the chunk as whole can be
+ * inflated. */
+ ret = bdrv_pread(bs->file, s->offsets[chunk],
+ s->compressed_chunk, s->lengths[chunk]);
+ if (ret != s->lengths[chunk]) {
+ return -1;
+ }
+
+ s->zstream.next_in = s->compressed_chunk;
+ s->zstream.avail_in = s->lengths[chunk];
+ s->zstream.next_out = s->uncompressed_chunk;
+ s->zstream.avail_out = 512 * s->sectorcounts[chunk];
+ ret = inflateReset(&s->zstream);
+ if (ret != Z_OK) {
+ return -1;
+ }
+ ret = inflate(&s->zstream, Z_FINISH);
+ if (ret != Z_STREAM_END ||
+ s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
+ return -1;
+ }
+ break; }
+ case 1: /* copy */
+ ret = bdrv_pread(bs->file, s->offsets[chunk],
s->uncompressed_chunk, s->lengths[chunk]);
- if (ret != s->lengths[chunk])
- return -1;
- break;
- case 2: /* zero */
- memset(s->uncompressed_chunk, 0, 512*s->sectorcounts[chunk]);
- break;
- }
- s->current_chunk = chunk;
+ if (ret != s->lengths[chunk]) {
+ return -1;
+ }
+ break;
+ case 2: /* zero */
+ memset(s->uncompressed_chunk, 0, 512 * s->sectorcounts[chunk]);
+ break;
+ }
+ s->current_chunk = chunk;
}
return 0;
}
@@ -339,12 +394,14 @@ static int dmg_read(BlockDriverState *bs, int64_t sector_num,
BDRVDMGState *s = bs->opaque;
int i;
- for(i=0;i<nb_sectors;i++) {
- uint32_t sector_offset_in_chunk;
- if(dmg_read_chunk(bs, sector_num+i) != 0)
- return -1;
- sector_offset_in_chunk = sector_num+i-s->sectors[s->current_chunk];
- memcpy(buf+i*512,s->uncompressed_chunk+sector_offset_in_chunk*512,512);
+ for (i = 0; i < nb_sectors; i++) {
+ uint32_t sector_offset_in_chunk;
+ if (dmg_read_chunk(bs, sector_num + i) != 0) {
+ return -1;
+ }
+ sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
+ memcpy(buf + i * 512,
+ s->uncompressed_chunk + sector_offset_in_chunk * 512, 512);
}
return 0;
}
@@ -376,12 +433,12 @@ static void dmg_close(BlockDriverState *bs)
}
static BlockDriver bdrv_dmg = {
- .format_name = "dmg",
- .instance_size = sizeof(BDRVDMGState),
- .bdrv_probe = dmg_probe,
- .bdrv_open = dmg_open,
- .bdrv_read = dmg_co_read,
- .bdrv_close = dmg_close,
+ .format_name = "dmg",
+ .instance_size = sizeof(BDRVDMGState),
+ .bdrv_probe = dmg_probe,
+ .bdrv_open = dmg_open,
+ .bdrv_read = dmg_co_read,
+ .bdrv_close = dmg_close,
};
static void bdrv_dmg_init(void)
diff --git a/block/parallels.c b/block/parallels.c
index 3f588f5..1a5bd35 100644
--- a/block/parallels.c
+++ b/block/parallels.c
@@ -49,9 +49,9 @@ typedef struct BDRVParallelsState {
CoMutex lock;
uint32_t *catalog_bitmap;
- int catalog_size;
+ unsigned int catalog_size;
- int tracks;
+ unsigned int tracks;
} BDRVParallelsState;
static int parallels_probe(const uint8_t *buf, int buf_size, const char *filename)
@@ -93,8 +93,18 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
bs->total_sectors = le32_to_cpu(ph.nb_sectors);
s->tracks = le32_to_cpu(ph.tracks);
+ if (s->tracks == 0) {
+ error_setg(errp, "Invalid image: Zero sectors per track");
+ ret = -EINVAL;
+ goto fail;
+ }
s->catalog_size = le32_to_cpu(ph.catalog_entries);
+ if (s->catalog_size > INT_MAX / 4) {
+ error_setg(errp, "Catalog too large");
+ ret = -EFBIG;
+ goto fail;
+ }
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
ret = bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4);
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index 9499df9..60a6910 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -55,7 +55,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
}
}
- if (new_l1_size > INT_MAX) {
+ if (new_l1_size > INT_MAX / sizeof(uint64_t)) {
return -EFBIG;
}
@@ -359,15 +359,6 @@ static int coroutine_fn copy_sectors(BlockDriverState *bs,
struct iovec iov;
int n, ret;
- /*
- * If this is the last cluster and it is only partially used, we must only
- * copy until the end of the image, or bdrv_check_request will fail for the
- * bdrv_read/write calls below.
- */
- if (start_sect + n_end > bs->total_sectors) {
- n_end = bs->total_sectors - start_sect;
- }
-
n = n_end - n_start;
if (n <= 0) {
return 0;
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 4a2df5f..a37ee45 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -28,7 +28,7 @@
#include "qemu/range.h"
#include "qapi/qmp/types.h"
-static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size);
+static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size);
static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
int64_t offset, int64_t length,
int addend, enum qcow2_discard_type type);
@@ -40,8 +40,10 @@ static int QEMU_WARN_UNUSED_RESULT update_refcount(BlockDriverState *bs,
int qcow2_refcount_init(BlockDriverState *bs)
{
BDRVQcowState *s = bs->opaque;
- int ret, refcount_table_size2, i;
+ unsigned int refcount_table_size2, i;
+ int ret;
+ assert(s->refcount_table_size <= INT_MAX / sizeof(uint64_t));
refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
s->refcount_table = g_malloc(refcount_table_size2);
if (s->refcount_table_size > 0) {
@@ -87,7 +89,7 @@ static int load_refcount_block(BlockDriverState *bs,
static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
{
BDRVQcowState *s = bs->opaque;
- int refcount_table_index, block_index;
+ uint64_t refcount_table_index, block_index;
int64_t refcount_block_offset;
int ret;
uint16_t *refcount_block;
@@ -192,10 +194,11 @@ static int alloc_refcount_block(BlockDriverState *bs,
* they can describe them themselves.
*
* - We need to consider that at this point we are inside update_refcounts
- * and doing the initial refcount increase. This means that some clusters
- * have already been allocated by the caller, but their refcount isn't
- * accurate yet. free_cluster_index tells us where this allocation ends
- * as long as we don't overwrite it by freeing clusters.
+ * and potentially doing an initial refcount increase. This means that
+ * some clusters have already been allocated by the caller, but their
+ * refcount isn't accurate yet. If we allocate clusters for metadata, we
+ * need to return -EAGAIN to signal the caller that it needs to restart
+ * the search for free clusters.
*
* - alloc_clusters_noref and qcow2_free_clusters may load a different
* refcount block into the cache
@@ -280,7 +283,10 @@ static int alloc_refcount_block(BlockDriverState *bs,
}
s->refcount_table[refcount_table_index] = new_block;
- return 0;
+
+ /* The new refcount block may be where the caller intended to put its
+ * data, so let it restart the search. */
+ return -EAGAIN;
}
ret = qcow2_cache_put(bs, s->refcount_block_cache, (void**) refcount_block);
@@ -303,8 +309,11 @@ static int alloc_refcount_block(BlockDriverState *bs,
/* Calculate the number of refcount blocks needed so far */
uint64_t refcount_block_clusters = 1 << (s->cluster_bits - REFCOUNT_SHIFT);
- uint64_t blocks_used = (s->free_cluster_index +
- refcount_block_clusters - 1) / refcount_block_clusters;
+ uint64_t blocks_used = DIV_ROUND_UP(cluster_index, refcount_block_clusters);
+
+ if (blocks_used > QCOW_MAX_REFTABLE_SIZE / sizeof(uint64_t)) {
+ return -EFBIG;
+ }
/* And now we need at least one block more for the new metadata */
uint64_t table_size = next_refcount_table_size(s, blocks_used + 1);
@@ -337,8 +346,6 @@ static int alloc_refcount_block(BlockDriverState *bs,
uint16_t *new_blocks = g_malloc0(blocks_clusters * s->cluster_size);
uint64_t *new_table = g_malloc0(table_size * sizeof(uint64_t));
- assert(meta_offset >= (s->free_cluster_index * s->cluster_size));
-
/* Fill the new refcount table */
memcpy(new_table, s->refcount_table,
s->refcount_table_size * sizeof(uint64_t));
@@ -401,18 +408,19 @@ static int alloc_refcount_block(BlockDriverState *bs,
s->refcount_table_size = table_size;
s->refcount_table_offset = table_offset;
- /* Free old table. Remember, we must not change free_cluster_index */
- uint64_t old_free_cluster_index = s->free_cluster_index;
+ /* Free old table. */
qcow2_free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t),
QCOW2_DISCARD_OTHER);
- s->free_cluster_index = old_free_cluster_index;
ret = load_refcount_block(bs, new_block, (void**) refcount_block);
if (ret < 0) {
return ret;
}
- return 0;
+ /* If we were trying to do the initial refcount update for some cluster
+ * allocation, we might have used the same clusters to store newly
+ * allocated metadata. Make the caller search some new space. */
+ return -EAGAIN;
fail_table:
g_free(new_table);
@@ -627,15 +635,16 @@ int qcow2_update_cluster_refcount(BlockDriverState *bs,
/* return < 0 if error */
-static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size)
+static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size)
{
BDRVQcowState *s = bs->opaque;
- int i, nb_clusters, refcount;
+ uint64_t i, nb_clusters;
+ int refcount;
nb_clusters = size_to_clusters(s, size);
retry:
for(i = 0; i < nb_clusters; i++) {
- int64_t next_cluster_index = s->free_cluster_index++;
+ uint64_t next_cluster_index = s->free_cluster_index++;
refcount = get_refcount(bs, next_cluster_index);
if (refcount < 0) {
@@ -652,18 +661,21 @@ retry:
return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
}
-int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size)
+int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size)
{
int64_t offset;
int ret;
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
- offset = alloc_clusters_noref(bs, size);
- if (offset < 0) {
- return offset;
- }
+ do {
+ offset = alloc_clusters_noref(bs, size);
+ if (offset < 0) {
+ return offset;
+ }
+
+ ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
+ } while (ret == -EAGAIN);
- ret = update_refcount(bs, offset, size, 1, QCOW2_DISCARD_NEVER);
if (ret < 0) {
return ret;
}
@@ -676,7 +688,6 @@ int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
{
BDRVQcowState *s = bs->opaque;
uint64_t cluster_index;
- uint64_t old_free_cluster_index;
uint64_t i;
int refcount, ret;
@@ -685,30 +696,28 @@ int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
return 0;
}
- /* Check how many clusters there are free */
- cluster_index = offset >> s->cluster_bits;
- for(i = 0; i < nb_clusters; i++) {
- refcount = get_refcount(bs, cluster_index++);
+ do {
+ /* Check how many clusters there are free */
+ cluster_index = offset >> s->cluster_bits;
+ for(i = 0; i < nb_clusters; i++) {
+ refcount = get_refcount(bs, cluster_index++);
- if (refcount < 0) {
- return refcount;
- } else if (refcount != 0) {
- break;
+ if (refcount < 0) {
+ return refcount;
+ } else if (refcount != 0) {
+ break;
+ }
}
- }
- /* And then allocate them */
- old_free_cluster_index = s->free_cluster_index;
- s->free_cluster_index = cluster_index + i;
+ /* And then allocate them */
+ ret = update_refcount(bs, offset, i << s->cluster_bits, 1,
+ QCOW2_DISCARD_NEVER);
+ } while (ret == -EAGAIN);
- ret = update_refcount(bs, offset, i << s->cluster_bits, 1,
- QCOW2_DISCARD_NEVER);
if (ret < 0) {
return ret;
}
- s->free_cluster_index = old_free_cluster_index;
-
return i;
}
@@ -1011,8 +1020,7 @@ static void inc_refcounts(BlockDriverState *bs,
int64_t offset, int64_t size)
{
BDRVQcowState *s = bs->opaque;
- int64_t start, last, cluster_offset;
- int k;
+ uint64_t start, last, cluster_offset, k;
if (size <= 0)
return;
@@ -1022,11 +1030,7 @@ static void inc_refcounts(BlockDriverState *bs,
for(cluster_offset = start; cluster_offset <= last;
cluster_offset += s->cluster_size) {
k = cluster_offset >> s->cluster_bits;
- if (k < 0) {
- fprintf(stderr, "ERROR: invalid cluster offset=0x%" PRIx64 "\n",
- cluster_offset);
- res->corruptions++;
- } else if (k >= refcount_table_size) {
+ if (k >= refcount_table_size) {
fprintf(stderr, "Warning: cluster offset=0x%" PRIx64 " is after "
"the end of the image file, can't properly check refcounts.\n",
cluster_offset);
@@ -1469,14 +1473,19 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix)
{
BDRVQcowState *s = bs->opaque;
- int64_t size, i, highest_cluster;
- int nb_clusters, refcount1, refcount2;
+ int64_t size, i, highest_cluster, nb_clusters;
+ int refcount1, refcount2;
QCowSnapshot *sn;
uint16_t *refcount_table;
int ret;
size = bdrv_getlength(bs->file);
nb_clusters = size_to_clusters(s, size);
+ if (nb_clusters > INT_MAX) {
+ res->check_errors++;
+ return -EFBIG;
+ }
+
refcount_table = g_malloc0(nb_clusters * sizeof(uint16_t));
res->bfi.total_clusters =
diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c
index 2fc6320..0aa9def 100644
--- a/block/qcow2-snapshot.c
+++ b/block/qcow2-snapshot.c
@@ -26,31 +26,6 @@
#include "block/block_int.h"
#include "block/qcow2.h"
-typedef struct QEMU_PACKED QCowSnapshotHeader {
- /* header is 8 byte aligned */
- uint64_t l1_table_offset;
-
- uint32_t l1_size;
- uint16_t id_str_size;
- uint16_t name_size;
-
- uint32_t date_sec;
- uint32_t date_nsec;
-
- uint64_t vm_clock_nsec;
-
- uint32_t vm_state_size;
- uint32_t extra_data_size; /* for extension */
- /* extra data follows */
- /* id_str follows */
- /* name follows */
-} QCowSnapshotHeader;
-
-typedef struct QEMU_PACKED QCowSnapshotExtraData {
- uint64_t vm_state_size_large;
- uint64_t disk_size;
-} QCowSnapshotExtraData;
-
void qcow2_free_snapshots(BlockDriverState *bs)
{
BDRVQcowState *s = bs->opaque;
@@ -141,8 +116,14 @@ int qcow2_read_snapshots(BlockDriverState *bs)
}
offset += name_size;
sn->name[name_size] = '\0';
+
+ if (offset - s->snapshots_offset > QCOW_MAX_SNAPSHOTS_SIZE) {
+ ret = -EFBIG;
+ goto fail;
+ }
}
+ assert(offset - s->snapshots_offset <= INT_MAX);
s->snapshots_size = offset - s->snapshots_offset;
return 0;
@@ -163,7 +144,7 @@ static int qcow2_write_snapshots(BlockDriverState *bs)
uint32_t nb_snapshots;
uint64_t snapshots_offset;
} QEMU_PACKED header_data;
- int64_t offset, snapshots_offset;
+ int64_t offset, snapshots_offset = 0;
int ret;
/* compute the size of the snapshots */
@@ -175,7 +156,14 @@ static int qcow2_write_snapshots(BlockDriverState *bs)
offset += sizeof(extra);
offset += strlen(sn->id_str);
offset += strlen(sn->name);
+
+ if (offset > QCOW_MAX_SNAPSHOTS_SIZE) {
+ ret = -EFBIG;
+ goto fail;
+ }
}
+
+ assert(offset <= INT_MAX);
snapshots_size = offset;
/* Allocate space for the new snapshot list */
@@ -357,6 +345,10 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
uint64_t *l1_table = NULL;
int64_t l1_table_offset;
+ if (s->nb_snapshots >= QCOW_MAX_SNAPSHOTS) {
+ return -EFBIG;
+ }
+
memset(sn, 0, sizeof(*sn));
/* Generate an ID if it wasn't passed */
@@ -701,7 +693,11 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs,
sn = &s->snapshots[snapshot_index];
/* Allocate and read in the snapshot's L1 table */
- new_l1_bytes = s->l1_size * sizeof(uint64_t);
+ if (sn->l1_size > QCOW_MAX_L1_SIZE) {
+ error_setg(errp, "Snapshot L1 table too large");
+ return -EFBIG;
+ }
+ new_l1_bytes = sn->l1_size * sizeof(uint64_t);
new_l1_table = g_malloc0(align_offset(new_l1_bytes, 512));
ret = bdrv_pread(bs->file, sn->l1_table_offset, new_l1_table, new_l1_bytes);
diff --git a/block/qcow2.c b/block/qcow2.c
index b9dc960..333e26d 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -329,6 +329,32 @@ static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result,
return ret;
}
+static int validate_table_offset(BlockDriverState *bs, uint64_t offset,
+ uint64_t entries, size_t entry_len)
+{
+ BDRVQcowState *s = bs->opaque;
+ uint64_t size;
+
+ /* Use signed INT64_MAX as the maximum even for uint64_t header fields,
+ * because values will be passed to qemu functions taking int64_t. */
+ if (entries > INT64_MAX / entry_len) {
+ return -EINVAL;
+ }
+
+ size = entries * entry_len;
+
+ if (INT64_MAX - size < offset) {
+ return -EINVAL;
+ }
+
+ /* Tables must be cluster aligned */
+ if (offset & (s->cluster_size - 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static QemuOptsList qcow2_runtime_opts = {
.name = "qcow2",
.head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head),
@@ -419,7 +445,8 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
BDRVQcowState *s = bs->opaque;
- int len, i, ret = 0;
+ unsigned int len, i;
+ int ret = 0;
QCowHeader header;
QemuOpts *opts;
Error *local_err = NULL;
@@ -460,6 +487,18 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
s->qcow_version = header.version;
+ /* Initialise cluster size */
+ if (header.cluster_bits < MIN_CLUSTER_BITS ||
+ header.cluster_bits > MAX_CLUSTER_BITS) {
+ error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ s->cluster_bits = header.cluster_bits;
+ s->cluster_size = 1 << s->cluster_bits;
+ s->cluster_sectors = 1 << (s->cluster_bits - 9);
+
/* Initialise version 3 header fields */
if (header.version == 2) {
header.incompatible_features = 0;
@@ -473,6 +512,18 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
be64_to_cpus(&header.autoclear_features);
be32_to_cpus(&header.refcount_order);
be32_to_cpus(&header.header_length);
+
+ if (header.header_length < 104) {
+ error_setg(errp, "qcow2 header too short");
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ if (header.header_length > s->cluster_size) {
+ error_setg(errp, "qcow2 header exceeds cluster size");
+ ret = -EINVAL;
+ goto fail;
}
if (header.header_length > sizeof(header)) {
@@ -487,6 +538,12 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
}
}
+ if (header.backing_file_offset > s->cluster_size) {
+ error_setg(errp, "Invalid backing file offset");
+ ret = -EINVAL;
+ goto fail;
+ }
+
if (header.backing_file_offset) {
ext_end = header.backing_file_offset;
} else {
@@ -506,6 +563,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
s->incompatible_features &
~QCOW2_INCOMPAT_MASK);
ret = -ENOTSUP;
+ g_free(feature_table);
goto fail;
}
@@ -529,12 +587,6 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
}
s->refcount_order = header.refcount_order;
- if (header.cluster_bits < MIN_CLUSTER_BITS ||
- header.cluster_bits > MAX_CLUSTER_BITS) {
- error_setg(errp, "Unsupported cluster size: 2^%i", header.cluster_bits);
- ret = -EINVAL;
- goto fail;
- }
if (header.crypt_method > QCOW_CRYPT_AES) {
error_setg(errp, "Unsupported encryption method: %i",
header.crypt_method);
@@ -545,23 +597,52 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
if (s->crypt_method_header) {
bs->encrypted = 1;
}
- s->cluster_bits = header.cluster_bits;
- s->cluster_size = 1 << s->cluster_bits;
- s->cluster_sectors = 1 << (s->cluster_bits - 9);
+
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
s->l2_size = 1 << s->l2_bits;
bs->total_sectors = header.size / 512;
s->csize_shift = (62 - (s->cluster_bits - 8));
s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
+
s->refcount_table_offset = header.refcount_table_offset;
s->refcount_table_size =
header.refcount_table_clusters << (s->cluster_bits - 3);
- s->snapshots_offset = header.snapshots_offset;
- s->nb_snapshots = header.nb_snapshots;
+ if (header.refcount_table_clusters > qcow2_max_refcount_clusters(s)) {
+ error_setg(errp, "Reference count table too large");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = validate_table_offset(bs, s->refcount_table_offset,
+ s->refcount_table_size, sizeof(uint64_t));
+ if (ret < 0) {
+ error_setg(errp, "Invalid reference count table offset");
+ goto fail;
+ }
+
+ /* Snapshot table offset/length */
+ if (header.nb_snapshots > QCOW_MAX_SNAPSHOTS) {
+ error_setg(errp, "Too many snapshots");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = validate_table_offset(bs, header.snapshots_offset,
+ header.nb_snapshots,
+ sizeof(QCowSnapshotHeader));
+ if (ret < 0) {
+ error_setg(errp, "Invalid snapshot table offset");
+ goto fail;
+ }
/* read the level 1 table */
+ if (header.l1_size > QCOW_MAX_L1_SIZE) {
+ error_setg(errp, "Active L1 table too large");
+ ret = -EFBIG;
+ goto fail;
+ }
s->l1_size = header.l1_size;
l1_vm_state_index = size_to_l1(s, header.size);
@@ -579,7 +660,16 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
ret = -EINVAL;
goto fail;
}
+
+ ret = validate_table_offset(bs, header.l1_table_offset,
+ header.l1_size, sizeof(uint64_t));
+ if (ret < 0) {
+ error_setg(errp, "Invalid L1 table offset");
+ goto fail;
+ }
s->l1_table_offset = header.l1_table_offset;
+
+
if (s->l1_size > 0) {
s->l1_table = g_malloc0(
align_offset(s->l1_size * sizeof(uint64_t), 512));
@@ -625,8 +715,10 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
/* read the backing file name */
if (header.backing_file_offset != 0) {
len = header.backing_file_size;
- if (len > 1023) {
- len = 1023;
+ if (len > MIN(1023, s->cluster_size - header.backing_file_offset)) {
+ error_setg(errp, "Backing file name too long");
+ ret = -EINVAL;
+ goto fail;
}
ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
@@ -637,6 +729,10 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
bs->backing_file[len] = '\0';
}
+ /* Internal snapshots */
+ s->snapshots_offset = header.snapshots_offset;
+ s->nb_snapshots = header.nb_snapshots;
+
ret = qcow2_read_snapshots(bs);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read snapshots");
@@ -745,6 +841,9 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
if (s->l2_table_cache) {
qcow2_cache_destroy(bs, s->l2_table_cache);
}
+ if (s->refcount_block_cache) {
+ qcow2_cache_destroy(bs, s->refcount_block_cache);
+ }
g_free(s->cluster_cache);
qemu_vfree(s->cluster_data);
return ret;
@@ -1432,7 +1531,9 @@ static int preallocate(BlockDriverState *bs)
return ret;
}
- if (meta != NULL) {
+ while (meta) {
+ QCowL2Meta *next = meta->next;
+
ret = qcow2_alloc_cluster_link_l2(bs, meta);
if (ret < 0) {
qcow2_free_any_clusters(bs, meta->alloc_offset,
@@ -1443,6 +1544,9 @@ static int preallocate(BlockDriverState *bs)
/* There are no dependent requests, but we need to remove our
* request from the list of in-flight requests */
QLIST_REMOVE(meta, next_in_flight);
+
+ g_free(meta);
+ meta = next;
}
/* TODO Preallocate data if requested */
@@ -1500,7 +1604,7 @@ static int qcow2_create2(const char *filename, int64_t total_size,
*/
BlockDriverState* bs;
QCowHeader *header;
- uint8_t* refcount_table;
+ uint64_t* refcount_table;
Error *local_err = NULL;
int ret;
@@ -1552,9 +1656,10 @@ static int qcow2_create2(const char *filename, int64_t total_size,
goto out;
}
- /* Write an empty refcount table */
- refcount_table = g_malloc0(cluster_size);
- ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size);
+ /* Write a refcount table with one refcount block */
+ refcount_table = g_malloc0(2 * cluster_size);
+ refcount_table[0] = cpu_to_be64(2 * cluster_size);
+ ret = bdrv_pwrite(bs, cluster_size, refcount_table, 2 * cluster_size);
g_free(refcount_table);
if (ret < 0) {
@@ -1579,7 +1684,7 @@ static int qcow2_create2(const char *filename, int64_t total_size,
goto out;
}
- ret = qcow2_alloc_clusters(bs, 2 * cluster_size);
+ ret = qcow2_alloc_clusters(bs, 3 * cluster_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 "
"header and refcount table");
diff --git a/block/qcow2.h b/block/qcow2.h
index 0b0eac8..b49424b 100644
--- a/block/qcow2.h
+++ b/block/qcow2.h
@@ -38,6 +38,19 @@
#define QCOW_CRYPT_AES 1
#define QCOW_MAX_CRYPT_CLUSTERS 32
+#define QCOW_MAX_SNAPSHOTS 65536
+
+/* 8 MB refcount table is enough for 2 PB images at 64k cluster size
+ * (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
+#define QCOW_MAX_REFTABLE_SIZE 0x800000
+
+/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
+ * (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
+#define QCOW_MAX_L1_SIZE 0x2000000
+
+/* Allow for an average of 1k per snapshot table entry, should be plenty of
+ * space for snapshot names and IDs */
+#define QCOW_MAX_SNAPSHOTS_SIZE (1024 * QCOW_MAX_SNAPSHOTS)
/* indicate that the refcount of the referenced cluster is exactly one. */
#define QCOW_OFLAG_COPIED (1ULL << 63)
@@ -97,6 +110,32 @@ typedef struct QCowHeader {
uint32_t header_length;
} QEMU_PACKED QCowHeader;
+typedef struct QEMU_PACKED QCowSnapshotHeader {
+ /* header is 8 byte aligned */
+ uint64_t l1_table_offset;
+
+ uint32_t l1_size;
+ uint16_t id_str_size;
+ uint16_t name_size;
+
+ uint32_t date_sec;
+ uint32_t date_nsec;
+
+ uint64_t vm_clock_nsec;
+
+ uint32_t vm_state_size;
+ uint32_t extra_data_size; /* for extension */
+ /* extra data follows */
+ /* id_str follows */
+ /* name follows */
+} QCowSnapshotHeader;
+
+typedef struct QEMU_PACKED QCowSnapshotExtraData {
+ uint64_t vm_state_size_large;
+ uint64_t disk_size;
+} QCowSnapshotExtraData;
+
+
typedef struct QCowSnapshot {
uint64_t l1_table_offset;
uint32_t l1_size;
@@ -191,8 +230,8 @@ typedef struct BDRVQcowState {
uint64_t *refcount_table;
uint64_t refcount_table_offset;
uint32_t refcount_table_size;
- int64_t free_cluster_index;
- int64_t free_byte_offset;
+ uint64_t free_cluster_index;
+ uint64_t free_byte_offset;
CoMutex lock;
@@ -202,7 +241,7 @@ typedef struct BDRVQcowState {
AES_KEY aes_decrypt_key;
uint64_t snapshots_offset;
int snapshots_size;
- int nb_snapshots;
+ unsigned int nb_snapshots;
QCowSnapshot *snapshots;
int flags;
@@ -383,6 +422,11 @@ static inline int64_t qcow2_vm_state_offset(BDRVQcowState *s)
return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits);
}
+static inline uint64_t qcow2_max_refcount_clusters(BDRVQcowState *s)
+{
+ return QCOW_MAX_REFTABLE_SIZE >> s->cluster_bits;
+}
+
static inline int qcow2_get_cluster_type(uint64_t l2_entry)
{
if (l2_entry & QCOW_OFLAG_COMPRESSED) {
@@ -431,7 +475,7 @@ void qcow2_refcount_close(BlockDriverState *bs);
int qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
int addend, enum qcow2_discard_type type);
-int64_t qcow2_alloc_clusters(BlockDriverState *bs, int64_t size);
+int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
int qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
int nb_clusters);
int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size);
diff --git a/block/vdi.c b/block/vdi.c
index ac9a025..820cd37 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -120,6 +120,11 @@ typedef unsigned char uuid_t[16];
#define VDI_IS_ALLOCATED(X) ((X) < VDI_DISCARDED)
+/* max blocks in image is (0xffffffff / 4) */
+#define VDI_BLOCKS_IN_IMAGE_MAX 0x3fffffff
+#define VDI_DISK_SIZE_MAX ((uint64_t)VDI_BLOCKS_IN_IMAGE_MAX * \
+ (uint64_t)DEFAULT_CLUSTER_SIZE)
+
#if !defined(CONFIG_UUID)
static inline void uuid_generate(uuid_t out)
{
@@ -385,6 +390,14 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
vdi_header_print(&header);
#endif
+ if (header.disk_size > VDI_DISK_SIZE_MAX) {
+ error_setg(errp, "Unsupported VDI image size (size is 0x%" PRIx64
+ ", max supported is 0x%" PRIx64 ")",
+ header.disk_size, VDI_DISK_SIZE_MAX);
+ ret = -ENOTSUP;
+ goto fail;
+ }
+
if (header.disk_size % SECTOR_SIZE != 0) {
/* 'VBoxManage convertfromraw' can create images with odd disk sizes.
We accept them but round the disk size to the next multiple of
@@ -420,9 +433,9 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
header.sector_size, SECTOR_SIZE);
ret = -ENOTSUP;
goto fail;
- } else if (header.block_size != 1 * MiB) {
- error_setg(errp, "unsupported VDI image (sector size %u is not %u)",
- header.block_size, 1 * MiB);
+ } else if (header.block_size != DEFAULT_CLUSTER_SIZE) {
+ error_setg(errp, "unsupported VDI image (block size %u is not %u)",
+ header.block_size, DEFAULT_CLUSTER_SIZE);
ret = -ENOTSUP;
goto fail;
} else if (header.disk_size >
@@ -441,6 +454,12 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
error_setg(errp, "unsupported VDI image (non-NULL parent UUID)");
ret = -ENOTSUP;
goto fail;
+ } else if (header.blocks_in_image > VDI_BLOCKS_IN_IMAGE_MAX) {
+ error_setg(errp, "unsupported VDI image "
+ "(too many blocks %u, max is %u)",
+ header.blocks_in_image, VDI_BLOCKS_IN_IMAGE_MAX);
+ ret = -ENOTSUP;
+ goto fail;
}
bs->total_sectors = header.disk_size / SECTOR_SIZE;
@@ -689,11 +708,20 @@ static int vdi_create(const char *filename, QEMUOptionParameter *options,
options++;
}
+ if (bytes > VDI_DISK_SIZE_MAX) {
+ result = -ENOTSUP;
+ error_setg(errp, "Unsupported VDI image size (size is 0x%" PRIx64
+ ", max supported is 0x%" PRIx64 ")",
+ bytes, VDI_DISK_SIZE_MAX);
+ goto exit;
+ }
+
fd = qemu_open(filename,
O_WRONLY | O_CREAT | O_TRUNC | O_BINARY | O_LARGEFILE,
0644);
if (fd < 0) {
- return -errno;
+ result = -errno;
+ goto exit;
}
/* We need enough blocks to store the given disk size,
@@ -754,6 +782,7 @@ static int vdi_create(const char *filename, QEMUOptionParameter *options,
result = -errno;
}
+exit:
return result;
}
diff --git a/block/vhdx.c b/block/vhdx.c
index 5390ba6..509baaf 100644
--- a/block/vhdx.c
+++ b/block/vhdx.c
@@ -780,12 +780,20 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
le32_to_cpus(&s->logical_sector_size);
le32_to_cpus(&s->physical_sector_size);
- if (s->logical_sector_size == 0 || s->params.block_size == 0) {
+ if (s->params.block_size < VHDX_BLOCK_SIZE_MIN ||
+ s->params.block_size > VHDX_BLOCK_SIZE_MAX) {
ret = -EINVAL;
goto exit;
}
- /* both block_size and sector_size are guaranteed powers of 2 */
+ /* only 2 supported sector sizes */
+ if (s->logical_sector_size != 512 && s->logical_sector_size != 4096) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Both block_size and sector_size are guaranteed powers of 2, below.
+ Due to range checks above, s->sectors_per_block can never be < 256 */
s->sectors_per_block = s->params.block_size / s->logical_sector_size;
s->chunk_ratio = (VHDX_MAX_SECTORS_PER_BLOCK) *
(uint64_t)s->logical_sector_size /
diff --git a/block/vpc.c b/block/vpc.c
index 82bf248..2e25f57 100644
--- a/block/vpc.c
+++ b/block/vpc.c
@@ -45,6 +45,8 @@ enum vhd_type {
// Seconds since Jan 1, 2000 0:00:00 (UTC)
#define VHD_TIMESTAMP_BASE 946684800
+#define VHD_MAX_SECTORS (65535LL * 255 * 255)
+
// always big-endian
typedef struct vhd_footer {
char creator[8]; // "conectix"
@@ -164,6 +166,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
VHDDynDiskHeader *dyndisk_header;
uint8_t buf[HEADER_SIZE];
uint32_t checksum;
+ uint64_t computed_size;
int disk_type = VHD_DYNAMIC;
int ret;
@@ -222,7 +225,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
}
/* Allow a maximum disk size of approximately 2 TB */
- if (bs->total_sectors >= 65535LL * 255 * 255) {
+ if (bs->total_sectors >= VHD_MAX_SECTORS) {
ret = -EFBIG;
goto fail;
}
@@ -242,10 +245,31 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
}
s->block_size = be32_to_cpu(dyndisk_header->block_size);
+ if (!is_power_of_2(s->block_size) || s->block_size < BDRV_SECTOR_SIZE) {
+ error_setg(errp, "Invalid block size %" PRIu32, s->block_size);
+ ret = -EINVAL;
+ goto fail;
+ }
s->bitmap_size = ((s->block_size / (8 * 512)) + 511) & ~511;
s->max_table_entries = be32_to_cpu(dyndisk_header->max_table_entries);
- s->pagetable = g_malloc(s->max_table_entries * 4);
+
+ if ((bs->total_sectors * 512) / s->block_size > 0xffffffffU) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ if (s->max_table_entries > (VHD_MAX_SECTORS * 512) / s->block_size) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ computed_size = (uint64_t) s->max_table_entries * s->block_size;
+ if (computed_size < bs->total_sectors * 512) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ s->pagetable = qemu_blockalign(bs, s->max_table_entries * 4);
s->bat_offset = be64_to_cpu(dyndisk_header->table_offset);
@@ -298,7 +322,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
return 0;
fail:
- g_free(s->pagetable);
+ qemu_vfree(s->pagetable);
#ifdef CACHE
g_free(s->pageentry_u8);
#endif
@@ -833,7 +857,7 @@ static int vpc_has_zero_init(BlockDriverState *bs)
static void vpc_close(BlockDriverState *bs)
{
BDRVVPCState *s = bs->opaque;
- g_free(s->pagetable);
+ qemu_vfree(s->pagetable);
#ifdef CACHE
g_free(s->pageentry_u8);
#endif
diff --git a/block/vvfat.c b/block/vvfat.c
index f966ea5..1978c9e 100644
--- a/block/vvfat.c
+++ b/block/vvfat.c
@@ -1119,6 +1119,7 @@ DLOG(if (stderr == NULL) {
if (!s->fat_type) {
s->fat_type = 16;
}
+ s->first_sectors_number = 0x40;
cyls = s->fat_type == 12 ? 64 : 1024;
heads = 16;
secs = 63;
@@ -1146,7 +1147,6 @@ DLOG(if (stderr == NULL) {
s->current_cluster=0xffffffff;
- s->first_sectors_number=0x40;
/* read only is the default for safety */
bs->read_only = 1;
s->qcow = s->write_target = NULL;
OpenPOWER on IntegriCloud