summaryrefslogtreecommitdiffstats
path: root/lib/libarchive
diff options
context:
space:
mode:
authorkientzle <kientzle@FreeBSD.org>2005-02-12 23:00:31 +0000
committerkientzle <kientzle@FreeBSD.org>2005-02-12 23:00:31 +0000
commit51b8ea9c30e4fd636efc24f54fa5c846d03c4641 (patch)
treee55d7536a65a5e1887dba9cb44dabd4ff2cf13b8 /lib/libarchive
parent31107ff9be526b7216a47437a397dbc4ab9cdbce (diff)
downloadFreeBSD-src-51b8ea9c30e4fd636efc24f54fa5c846d03c4641.zip
FreeBSD-src-51b8ea9c30e4fd636efc24f54fa5c846d03c4641.tar.gz
Performance optimization, code clarification, and bug workaround.
When reading the bodies of Zip archive entries, request a minimum of 1 byte, rather than a minimum of the full entry size. This is faster (since it does not force the decompression layer to combine reads) and works around a bug in the "none" decompression handler (which I'm testing a separate fix for now). I've also renamed "bytes_read" to "bytes_avail" in several places to more accurately reflect that the value returned from (a->compression_read_ahead) is the number of bytes available, not necessarily the number of bytes requested.
Diffstat (limited to 'lib/libarchive')
-rw-r--r--lib/libarchive/archive_read_support_format_zip.c64
1 files changed, 36 insertions, 28 deletions
diff --git a/lib/libarchive/archive_read_support_format_zip.c b/lib/libarchive/archive_read_support_format_zip.c
index 081acaa..072c0cd 100644
--- a/lib/libarchive/archive_read_support_format_zip.c
+++ b/lib/libarchive/archive_read_support_format_zip.c
@@ -31,6 +31,7 @@ __FBSDID("$FreeBSD$");
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
+#include <time.h>
#ifdef HAVE_ZLIB_H
#include <zlib.h>
#endif
@@ -336,7 +337,7 @@ zip_read_data_none(struct archive *a, const void **buff,
size_t *size, off_t *offset)
{
struct zip *zip;
- ssize_t bytes_read;
+ ssize_t bytes_avail;
zip = *(a->pformat_data);
@@ -346,18 +347,22 @@ zip_read_data_none(struct archive *a, const void **buff,
*offset = zip->entry_offset;
return (ARCHIVE_EOF);
}
-
- bytes_read = (a->compression_read_ahead)(a, buff,
- zip->entry_bytes_remaining);
- if (bytes_read <= 0) {
+ /*
+ * Note: '1' here is a performance optimization.
+ * Recall that the decompression layer returns a count of
+ * available bytes; asking for more than that forces the
+ * decompressor to combine reads by copying data.
+ */
+ bytes_avail = (a->compression_read_ahead)(a, buff, 1);
+ if (bytes_avail <= 0) {
archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated ZIP file data");
return (ARCHIVE_FATAL);
}
- if (bytes_read > zip->entry_bytes_remaining)
- bytes_read = zip->entry_bytes_remaining;
- (a->compression_read_consume)(a, bytes_read);
- *size = bytes_read;
+ if (bytes_avail > zip->entry_bytes_remaining)
+ bytes_avail = zip->entry_bytes_remaining;
+ (a->compression_read_consume)(a, bytes_avail);
+ *size = bytes_avail;
*offset = zip->entry_offset;
zip->entry_offset += *size;
zip->entry_bytes_remaining -= *size;
@@ -370,7 +375,7 @@ zip_read_data_deflate(struct archive *a, const void **buff,
size_t *size, off_t *offset)
{
struct zip *zip;
- ssize_t bytes_read;
+ ssize_t bytes_avail;
const void *compressed_buff;
int r;
@@ -399,16 +404,20 @@ zip_read_data_deflate(struct archive *a, const void **buff,
}
}
- /* Read the next block of compressed data. */
- bytes_read = (a->compression_read_ahead)(a, &compressed_buff,
- zip->entry_bytes_remaining);
- if (bytes_read <= 0) {
+ /*
+ * Note: '1' here is a performance optimization.
+ * Recall that the decompression layer returns a count of
+ * available bytes; asking for more than that forces the
+ * decompressor to combine reads by copying data.
+ */
+ bytes_avail = (a->compression_read_ahead)(a, &compressed_buff, 1);
+ if (bytes_avail <= 0) {
archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated ZIP file body");
return (ARCHIVE_FATAL);
}
- if (bytes_read > zip->entry_bytes_remaining)
- bytes_read = zip->entry_bytes_remaining;
+ if (bytes_avail > zip->entry_bytes_remaining)
+ bytes_avail = zip->entry_bytes_remaining;
/*
* A bug in zlib.h: stream.next_in should be marked 'const'
@@ -417,7 +426,7 @@ zip_read_data_deflate(struct archive *a, const void **buff,
* cast to remove 'const'.
*/
zip->stream.next_in = (void *)(uintptr_t)(const void *)compressed_buff;
- zip->stream.avail_in = bytes_read;
+ zip->stream.avail_in = bytes_avail;
zip->stream.total_in = 0;
zip->stream.next_out = zip->uncompressed_buffer;
zip->stream.avail_out = zip->uncompressed_buffer_size;
@@ -441,9 +450,9 @@ zip_read_data_deflate(struct archive *a, const void **buff,
}
/* Consume as much as the compressor actually used. */
- bytes_read = zip->stream.total_in;
- (a->compression_read_consume)(a, bytes_read);
- zip->entry_bytes_remaining -= bytes_read;
+ bytes_avail = zip->stream.total_in;
+ (a->compression_read_consume)(a, bytes_avail);
+ zip->entry_bytes_remaining -= bytes_avail;
*offset = zip->entry_offset;
@@ -471,7 +480,7 @@ zip_read_data_skip(struct archive *a, const void **buff,
size_t *size, off_t *offset)
{
struct zip *zip;
- ssize_t bytes_read;
+ ssize_t bytes_avail;
zip = *(a->pformat_data);
@@ -483,17 +492,16 @@ zip_read_data_skip(struct archive *a, const void **buff,
/* Skip body of entry. */
while (zip->entry_bytes_remaining > 0) {
- bytes_read = (a->compression_read_ahead)(a, buff,
- zip->entry_bytes_remaining);
- if (bytes_read <= 0) {
+ bytes_avail = (a->compression_read_ahead)(a, buff, 1);
+ if (bytes_avail <= 0) {
archive_set_error(a, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated ZIP file body");
return (ARCHIVE_FATAL);
}
- if (bytes_read > zip->entry_bytes_remaining)
- bytes_read = zip->entry_bytes_remaining;
- (a->compression_read_consume)(a, bytes_read);
- zip->entry_bytes_remaining -= bytes_read;
+ if (bytes_avail > zip->entry_bytes_remaining)
+ bytes_avail = zip->entry_bytes_remaining;
+ (a->compression_read_consume)(a, bytes_avail);
+ zip->entry_bytes_remaining -= bytes_avail;
}
return (ARCHIVE_OK);
}
OpenPOWER on IntegriCloud