summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/ggc-zone.c
diff options
context:
space:
mode:
authorkan <kan@FreeBSD.org>2007-05-19 01:19:51 +0000
committerkan <kan@FreeBSD.org>2007-05-19 01:19:51 +0000
commit1f9ea4d0a40cca64d60cf4dab152349da7b9dddf (patch)
tree0cb530c9c38af219e6dda2994c078b6b2b9ad853 /contrib/gcc/ggc-zone.c
parent4895159b2b4f648051c1f139faa7b6dc50c2bfcb (diff)
downloadFreeBSD-src-1f9ea4d0a40cca64d60cf4dab152349da7b9dddf.zip
FreeBSD-src-1f9ea4d0a40cca64d60cf4dab152349da7b9dddf.tar.gz
GCC 4.2.0 release.
Diffstat (limited to 'contrib/gcc/ggc-zone.c')
-rw-r--r--contrib/gcc/ggc-zone.c2251
1 files changed, 1616 insertions, 635 deletions
diff --git a/contrib/gcc/ggc-zone.c b/contrib/gcc/ggc-zone.c
index 355414f..1d6edfb 100644
--- a/contrib/gcc/ggc-zone.c
+++ b/contrib/gcc/ggc-zone.c
@@ -1,9 +1,10 @@
/* "Bag-of-pages" zone garbage collector for the GNU compiler.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
Free Software Foundation, Inc.
- Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
- (dberlin@dberlin.org)
+ Contributed by Richard Henderson (rth@redhat.com) and Daniel Berlin
+ (dberlin@dberlin.org). Rewritten by Daniel Jacobowitz
+ <dan@codesourcery.com>.
This file is part of GCC.
@@ -19,8 +20,8 @@ for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 59 Temple Place - Suite 330, Boston, MA
-02111-1307, USA. */
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
#include "config.h"
#include "system.h"
@@ -51,6 +52,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#define VALGRIND_MALLOCLIKE_BLOCK(w,x,y,z)
#define VALGRIND_FREELIKE_BLOCK(x,y)
#endif
+
/* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
file open. Prefer either to valloc. */
#ifdef HAVE_MMAP_ANON
@@ -64,56 +66,58 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
# define MAP_ANONYMOUS MAP_ANON
# endif
# define USING_MMAP
-
#endif
#ifdef HAVE_MMAP_DEV_ZERO
-
# include <sys/mman.h>
# ifndef MAP_FAILED
# define MAP_FAILED -1
# endif
# define USING_MMAP
-
#endif
#ifndef USING_MMAP
-#error "Zone collector requires mmap"
+#error Zone collector requires mmap
#endif
#if (GCC_VERSION < 3001)
#define prefetch(X) ((void) X)
+#define prefetchw(X) ((void) X)
#else
#define prefetch(X) __builtin_prefetch (X)
+#define prefetchw(X) __builtin_prefetch (X, 1, 3)
#endif
-/* NOTES:
+/* FUTURE NOTES:
+
If we track inter-zone pointers, we can mark single zones at a
time.
+
If we have a zone where we guarantee no inter-zone pointers, we
could mark that zone separately.
+
The garbage zone should not be marked, and we should return 1 in
ggc_set_mark for any object in the garbage zone, which cuts off
marking quickly. */
-/* Stategy:
+
+/* Strategy:
This garbage-collecting allocator segregates objects into zones.
It also segregates objects into "large" and "small" bins. Large
- objects are greater or equal to page size.
+ objects are greater than page size.
- Pages for small objects are broken up into chunks, each of which
- are described by a struct alloc_chunk. One can walk over all
- chunks on the page by adding the chunk size to the chunk's data
- address. The free space for a page exists in the free chunk bins.
-
- Each page-entry also has a context depth, which is used to track
- pushing and popping of allocation contexts. Only objects allocated
- in the current (highest-numbered) context may be collected.
+ Pages for small objects are broken up into chunks. The page has
+ a bitmap which marks the start position of each chunk (whether
+ allocated or free). Free chunks are on one of the zone's free
+ lists and contain a pointer to the next free chunk. Chunks in
+ most of the free lists have a fixed size determined by the
+ free list. Chunks in the "other" sized free list have their size
+ stored right after their chain pointer.
Empty pages (of all sizes) are kept on a single page cache list,
and are considered first when new pages are required; they are
deallocated at the start of the next collection if they haven't
- been recycled by then. */
+ been recycled by then. The free page list is currently per-zone. */
/* Define GGC_DEBUG_LEVEL to print debugging information.
0: No debugging output.
@@ -126,78 +130,89 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#ifndef HOST_BITS_PER_PTR
#define HOST_BITS_PER_PTR HOST_BITS_PER_LONG
#endif
-#ifdef COOKIE_CHECKING
-#define CHUNK_MAGIC 0x95321123
-#define DEADCHUNK_MAGIC 0x12817317
-#endif
-
-/* This structure manages small chunks. When the chunk is free, it's
- linked with other chunks via free_next. When the chunk is allocated,
- the data starts at u. Large chunks are allocated one at a time to
- their own page, and so don't come in here.
- The "type" field is a placeholder for a future change to do
- generational collection. At present it is 0 when free and
- and 1 when allocated. */
+/* This structure manages small free chunks. The SIZE field is only
+ initialized if the chunk is in the "other" sized free list. Large
+ chunks are allocated one at a time to their own page, and so don't
+ come in here. */
struct alloc_chunk {
-#ifdef COOKIE_CHECKING
- unsigned int magic;
-#endif
- unsigned int type:1;
- unsigned int typecode:14;
- unsigned int large:1;
- unsigned int size:15;
- unsigned int mark:1;
- union {
- struct alloc_chunk *next_free;
- char data[1];
+ struct alloc_chunk *next_free;
+ unsigned int size;
+};
- /* Make sure the data is sufficiently aligned. */
- HOST_WIDEST_INT align_i;
-#ifdef HAVE_LONG_DOUBLE
- long double align_d;
-#else
- double align_d;
+/* The size of the fixed-size portion of a small page descriptor. */
+#define PAGE_OVERHEAD (offsetof (struct small_page_entry, alloc_bits))
+
+/* The collector's idea of the page size. This must be a power of two
+ no larger than the system page size, because pages must be aligned
+ to this amount and are tracked at this granularity in the page
+ table. We choose a size at compile time for efficiency.
+
+ We could make a better guess at compile time if PAGE_SIZE is a
+ constant in system headers, and PAGE_SHIFT is defined... */
+#define GGC_PAGE_SIZE 4096
+#define GGC_PAGE_MASK (GGC_PAGE_SIZE - 1)
+#define GGC_PAGE_SHIFT 12
+
+#if 0
+/* Alternative definitions which use the runtime page size. */
+#define GGC_PAGE_SIZE G.pagesize
+#define GGC_PAGE_MASK G.page_mask
+#define GGC_PAGE_SHIFT G.lg_pagesize
#endif
- } u;
-} __attribute__ ((packed));
-#define CHUNK_OVERHEAD (offsetof (struct alloc_chunk, u))
+/* The size of a small page managed by the garbage collector. This
+ must currently be GGC_PAGE_SIZE, but with a few changes could
+ be any multiple of it to reduce certain kinds of overhead. */
+#define SMALL_PAGE_SIZE GGC_PAGE_SIZE
-/* We maintain several bins of free lists for chunks for very small
- objects. We never exhaustively search other bins -- if we don't
- find one of the proper size, we allocate from the "larger" bin. */
+/* Free bin information. These numbers may be in need of re-tuning.
+ In general, decreasing the number of free bins would seem to
+ increase the time it takes to allocate... */
-/* Decreasing the number of free bins increases the time it takes to allocate.
- Similar with increasing max_free_bin_size without increasing num_free_bins.
+/* FIXME: We can't use anything but MAX_ALIGNMENT for the bin size
+ today. */
- After much histogramming of allocation sizes and time spent on gc,
- on a PowerPC G4 7450 - 667 mhz, and a Pentium 4 - 2.8ghz,
- these were determined to be the optimal values. */
#define NUM_FREE_BINS 64
-#define MAX_FREE_BIN_SIZE 256
-#define FREE_BIN_DELTA (MAX_FREE_BIN_SIZE / NUM_FREE_BINS)
-#define SIZE_BIN_UP(SIZE) (((SIZE) + FREE_BIN_DELTA - 1) / FREE_BIN_DELTA)
+#define FREE_BIN_DELTA MAX_ALIGNMENT
#define SIZE_BIN_DOWN(SIZE) ((SIZE) / FREE_BIN_DELTA)
-/* Marker used as chunk->size for a large object. Should correspond
- to the size of the bitfield above. */
-#define LARGE_OBJECT_SIZE 0x7fff
+/* Allocation and marking parameters. */
+
+/* The smallest allocatable unit to keep track of. */
+#define BYTES_PER_ALLOC_BIT MAX_ALIGNMENT
+
+/* The smallest markable unit. If we require each allocated object
+ to contain at least two allocatable units, we can use half as many
+ bits for the mark bitmap. But this adds considerable complexity
+ to sweeping. */
+#define BYTES_PER_MARK_BIT BYTES_PER_ALLOC_BIT
+
+#define BYTES_PER_MARK_WORD (8 * BYTES_PER_MARK_BIT * sizeof (mark_type))
/* We use this structure to determine the alignment required for
- allocations. For power-of-two sized allocations, that's not a
- problem, but it does matter for odd-sized allocations. */
+ allocations.
+
+ There are several things wrong with this estimation of alignment.
+
+ The maximum alignment for a structure is often less than the
+ maximum alignment for a basic data type; for instance, on some
+ targets long long must be aligned to sizeof (int) in a structure
+ and sizeof (long long) in a variable. i386-linux is one example;
+ Darwin is another (sometimes, depending on the compiler in use).
+
+ Also, long double is not included. Nothing in GCC uses long
+ double, so we assume that this is OK. On powerpc-darwin, adding
+ long double would bring the maximum alignment up to 16 bytes,
+ and until we need long double (or to vectorize compiler operations)
+ that's painfully wasteful. This will need to change, some day. */
struct max_alignment {
char c;
union {
HOST_WIDEST_INT i;
-#ifdef HAVE_LONG_DOUBLE
- long double d;
-#else
double d;
-#endif
} u;
};
@@ -205,43 +220,128 @@ struct max_alignment {
#define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
-/* Compute the smallest nonnegative number which when added to X gives
- a multiple of F. */
-
-#define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
-
/* Compute the smallest multiple of F that is >= X. */
#define ROUND_UP(x, f) (CEIL (x, f) * (f))
+/* Types to use for the allocation and mark bitmaps. It might be
+ a good idea to add ffsl to libiberty and use unsigned long
+ instead; that could speed us up where long is wider than int. */
-/* A page_entry records the status of an allocation page. */
+typedef unsigned int alloc_type;
+typedef unsigned int mark_type;
+#define alloc_ffs(x) ffs(x)
+
+/* A page_entry records the status of an allocation page. This is the
+ common data between all three kinds of pages - small, large, and
+ PCH. */
typedef struct page_entry
{
- /* The next page-entry with objects of the same size, or NULL if
- this is the last page-entry. */
- struct page_entry *next;
+ /* The address at which the memory is allocated. */
+ char *page;
- /* The number of bytes allocated. (This will always be a multiple
- of the host system page size.) */
- size_t bytes;
+ /* The zone that this page entry belongs to. */
+ struct alloc_zone *zone;
+#ifdef GATHER_STATISTICS
/* How many collections we've survived. */
size_t survived;
-
- /* The address at which the memory is allocated. */
- char *page;
-
- /* Context depth of this page. */
- unsigned short context_depth;
+#endif
/* Does this page contain small objects, or one large object? */
bool large_p;
- /* The zone that this page entry belongs to. */
- struct alloc_zone *zone;
+ /* Is this page part of the loaded PCH? */
+ bool pch_p;
} page_entry;
+/* Additional data needed for small pages. */
+struct small_page_entry
+{
+ struct page_entry common;
+
+ /* The next small page entry, or NULL if this is the last. */
+ struct small_page_entry *next;
+
+ /* If currently marking this zone, a pointer to the mark bits
+ for this page. If we aren't currently marking this zone,
+ this pointer may be stale (pointing to freed memory). */
+ mark_type *mark_bits;
+
+ /* The allocation bitmap. This array extends far enough to have
+ one bit for every BYTES_PER_ALLOC_BIT bytes in the page. */
+ alloc_type alloc_bits[1];
+};
+
+/* Additional data needed for large pages. */
+struct large_page_entry
+{
+ struct page_entry common;
+
+ /* The next large page entry, or NULL if this is the last. */
+ struct large_page_entry *next;
+
+ /* The number of bytes allocated, not including the page entry. */
+ size_t bytes;
+
+ /* The previous page in the list, so that we can unlink this one. */
+ struct large_page_entry *prev;
+
+ /* During marking, is this object marked? */
+ bool mark_p;
+};
+
+/* A two-level tree is used to look up the page-entry for a given
+ pointer. Two chunks of the pointer's bits are extracted to index
+ the first and second levels of the tree, as follows:
+
+ HOST_PAGE_SIZE_BITS
+ 32 | |
+ msb +----------------+----+------+------+ lsb
+ | | |
+ PAGE_L1_BITS |
+ | |
+ PAGE_L2_BITS
+
+ The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
+ pages are aligned on system page boundaries. The next most
+ significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
+ index values in the lookup table, respectively.
+
+ For 32-bit architectures and the settings below, there are no
+ leftover bits. For architectures with wider pointers, the lookup
+ tree points to a list of pages, which must be scanned to find the
+ correct one. */
+
+#define PAGE_L1_BITS (8)
+#define PAGE_L2_BITS (32 - PAGE_L1_BITS - GGC_PAGE_SHIFT)
+#define PAGE_L1_SIZE ((size_t) 1 << PAGE_L1_BITS)
+#define PAGE_L2_SIZE ((size_t) 1 << PAGE_L2_BITS)
+
+#define LOOKUP_L1(p) \
+ (((size_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
+
+#define LOOKUP_L2(p) \
+ (((size_t) (p) >> GGC_PAGE_SHIFT) & ((1 << PAGE_L2_BITS) - 1))
+
+#if HOST_BITS_PER_PTR <= 32
+
+/* On 32-bit hosts, we use a two level page table, as pictured above. */
+typedef page_entry **page_table[PAGE_L1_SIZE];
+
+#else
+
+/* On 64-bit hosts, we use the same two level page tables plus a linked
+ list that disambiguates the top 32-bits. There will almost always be
+ exactly one entry in the list. */
+typedef struct page_table_chain
+{
+ struct page_table_chain *next;
+ size_t high_bits;
+ page_entry **table[PAGE_L1_SIZE];
+} *page_table;
+
+#endif
/* The global variables. */
static struct globals
@@ -249,52 +349,77 @@ static struct globals
/* The linked list of zones. */
struct alloc_zone *zones;
- /* The system's page size. */
+ /* Lookup table for associating allocation pages with object addresses. */
+ page_table lookup;
+
+ /* The system's page size, and related constants. */
size_t pagesize;
size_t lg_pagesize;
+ size_t page_mask;
+
+ /* The size to allocate for a small page entry. This includes
+ the size of the structure and the size of the allocation
+ bitmap. */
+ size_t small_page_overhead;
- /* A file descriptor open to /dev/zero for reading. */
#if defined (HAVE_MMAP_DEV_ZERO)
+ /* A file descriptor open to /dev/zero for reading. */
int dev_zero_fd;
#endif
+ /* Allocate pages in chunks of this size, to throttle calls to memory
+ allocation routines. The first page is used, the rest go onto the
+ free list. */
+ size_t quire_size;
+
/* The file descriptor for debugging output. */
FILE *debug_file;
} G;
-/* The zone allocation structure. */
+/* A zone allocation structure. There is one of these for every
+ distinct allocation zone. */
struct alloc_zone
{
- /* Name of the zone. */
- const char *name;
-
- /* Linked list of pages in a zone. */
- page_entry *pages;
+ /* The most recent free chunk is saved here, instead of in the linked
+ free list, to decrease list manipulation. It is most likely that we
+ will want this one. */
+ char *cached_free;
+ size_t cached_free_size;
/* Linked lists of free storage. Slots 1 ... NUM_FREE_BINS have chunks of size
FREE_BIN_DELTA. All other chunks are in slot 0. */
struct alloc_chunk *free_chunks[NUM_FREE_BINS + 1];
- /* Bytes currently allocated. */
+ /* The highest bin index which might be non-empty. It may turn out
+ to be empty, in which case we have to search downwards. */
+ size_t high_free_bin;
+
+ /* Bytes currently allocated in this zone. */
size_t allocated;
- /* Bytes currently allocated at the end of the last collection. */
- size_t allocated_last_gc;
+ /* Linked list of the small pages in this zone. */
+ struct small_page_entry *pages;
- /* Total amount of memory mapped. */
- size_t bytes_mapped;
+ /* Doubly linked list of large pages in this zone. */
+ struct large_page_entry *large_pages;
- /* Bit N set if any allocations have been done at context depth N. */
- unsigned long context_depth_allocations;
+ /* If we are currently marking this zone, a pointer to the mark bits. */
+ mark_type *mark_bits;
- /* Bit N set if any collections have been done at context depth N. */
- unsigned long context_depth_collections;
+ /* Name of the zone. */
+ const char *name;
+
+ /* The number of small pages currently allocated in this zone. */
+ size_t n_small_pages;
- /* The current depth in the context stack. */
- unsigned short context_depth;
+ /* Bytes allocated at the end of the last collection. */
+ size_t allocated_last_gc;
+
+ /* Total amount of memory mapped. */
+ size_t bytes_mapped;
/* A cache of free system pages. */
- page_entry *free_pages;
+ struct small_page_entry *free_pages;
/* Next zone in the linked list of zones. */
struct alloc_zone *next_zone;
@@ -304,49 +429,335 @@ struct alloc_zone
/* True if this zone should be destroyed after the next collection. */
bool dead;
+
+#ifdef GATHER_STATISTICS
+ struct
+ {
+ /* Total memory allocated with ggc_alloc. */
+ unsigned long long total_allocated;
+ /* Total overhead for memory to be allocated with ggc_alloc. */
+ unsigned long long total_overhead;
+
+ /* Total allocations and overhead for sizes less than 32, 64 and 128.
+ These sizes are interesting because they are typical cache line
+ sizes. */
+
+ unsigned long long total_allocated_under32;
+ unsigned long long total_overhead_under32;
+
+ unsigned long long total_allocated_under64;
+ unsigned long long total_overhead_under64;
+
+ unsigned long long total_allocated_under128;
+ unsigned long long total_overhead_under128;
+ } stats;
+#endif
} main_zone;
-struct alloc_zone *rtl_zone;
-struct alloc_zone *garbage_zone;
-struct alloc_zone *tree_zone;
+/* Some default zones. */
+struct alloc_zone rtl_zone;
+struct alloc_zone tree_zone;
+struct alloc_zone tree_id_zone;
-/* Allocate pages in chunks of this size, to throttle calls to memory
- allocation routines. The first page is used, the rest go onto the
- free list. This cannot be larger than HOST_BITS_PER_INT for the
- in_use bitmask for page_group. */
-#define GGC_QUIRE_SIZE 16
+/* The PCH zone does not need a normal zone structure, and it does
+ not live on the linked list of zones. */
+struct pch_zone
+{
+ /* The start of the PCH zone. NULL if there is none. */
+ char *page;
+
+ /* The end of the PCH zone. NULL if there is none. */
+ char *end;
+
+ /* The size of the PCH zone. 0 if there is none. */
+ size_t bytes;
+
+ /* The allocation bitmap for the PCH zone. */
+ alloc_type *alloc_bits;
+
+ /* If we are currently marking, the mark bitmap for the PCH zone.
+ When it is first read in, we could avoid marking the PCH,
+ because it will not contain any pointers to GC memory outside
+ of the PCH; however, the PCH is currently mapped as writable,
+ so we must mark it in case new pointers are added. */
+ mark_type *mark_bits;
+} pch_zone;
-static int ggc_allocated_p (const void *);
#ifdef USING_MMAP
static char *alloc_anon (char *, size_t, struct alloc_zone *);
#endif
-static struct page_entry * alloc_small_page ( struct alloc_zone *);
-static struct page_entry * alloc_large_page (size_t, struct alloc_zone *);
-static void free_chunk (struct alloc_chunk *, size_t, struct alloc_zone *);
-static void free_page (struct page_entry *);
+static struct small_page_entry * alloc_small_page (struct alloc_zone *);
+static struct large_page_entry * alloc_large_page (size_t, struct alloc_zone *);
+static void free_chunk (char *, size_t, struct alloc_zone *);
+static void free_small_page (struct small_page_entry *);
+static void free_large_page (struct large_page_entry *);
static void release_pages (struct alloc_zone *);
static void sweep_pages (struct alloc_zone *);
-static void * ggc_alloc_zone_1 (size_t, struct alloc_zone *, short);
static bool ggc_collect_1 (struct alloc_zone *, bool);
-static void check_cookies (void);
+static void new_ggc_zone_1 (struct alloc_zone *, const char *);
+/* Traverse the page table and find the entry for a page.
+ Die (probably) if the object wasn't allocated via GC. */
-/* Returns nonzero if P was allocated in GC'able memory. */
+static inline page_entry *
+lookup_page_table_entry (const void *p)
+{
+ page_entry ***base;
+ size_t L1, L2;
-static inline int
-ggc_allocated_p (const void *p)
+#if HOST_BITS_PER_PTR <= 32
+ base = &G.lookup[0];
+#else
+ page_table table = G.lookup;
+ size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
+ while (table->high_bits != high_bits)
+ table = table->next;
+ base = &table->table[0];
+#endif
+
+ /* Extract the level 1 and 2 indices. */
+ L1 = LOOKUP_L1 (p);
+ L2 = LOOKUP_L2 (p);
+
+ return base[L1][L2];
+}
+
+/* Set the page table entry for the page that starts at P. If ENTRY
+ is NULL, clear the entry. */
+
+static void
+set_page_table_entry (void *p, page_entry *entry)
{
- struct alloc_chunk *chunk;
- chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
-#ifdef COOKIE_CHECKING
- if (chunk->magic != CHUNK_MAGIC)
- abort ();
+ page_entry ***base;
+ size_t L1, L2;
+
+#if HOST_BITS_PER_PTR <= 32
+ base = &G.lookup[0];
+#else
+ page_table table;
+ size_t high_bits = (size_t) p & ~ (size_t) 0xffffffff;
+ for (table = G.lookup; table; table = table->next)
+ if (table->high_bits == high_bits)
+ goto found;
+
+ /* Not found -- allocate a new table. */
+ table = xcalloc (1, sizeof(*table));
+ table->next = G.lookup;
+ table->high_bits = high_bits;
+ G.lookup = table;
+found:
+ base = &table->table[0];
#endif
- if (chunk->type == 1)
- return true;
- return false;
+
+ /* Extract the level 1 and 2 indices. */
+ L1 = LOOKUP_L1 (p);
+ L2 = LOOKUP_L2 (p);
+
+ if (base[L1] == NULL)
+ base[L1] = xcalloc (PAGE_L2_SIZE, sizeof (page_entry *));
+
+ base[L1][L2] = entry;
+}
+
+/* Find the page table entry associated with OBJECT. */
+
+static inline struct page_entry *
+zone_get_object_page (const void *object)
+{
+ return lookup_page_table_entry (object);
+}
+
+/* Find which element of the alloc_bits array OBJECT should be
+ recorded in. */
+static inline unsigned int
+zone_get_object_alloc_word (const void *object)
+{
+ return (((size_t) object & (GGC_PAGE_SIZE - 1))
+ / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
+}
+
+/* Find which bit of the appropriate word in the alloc_bits array
+ OBJECT should be recorded in. */
+static inline unsigned int
+zone_get_object_alloc_bit (const void *object)
+{
+ return (((size_t) object / BYTES_PER_ALLOC_BIT)
+ % (8 * sizeof (alloc_type)));
+}
+
+/* Find which element of the mark_bits array OBJECT should be recorded
+ in. */
+static inline unsigned int
+zone_get_object_mark_word (const void *object)
+{
+ return (((size_t) object & (GGC_PAGE_SIZE - 1))
+ / (8 * sizeof (mark_type) * BYTES_PER_MARK_BIT));
+}
+
+/* Find which bit of the appropriate word in the mark_bits array
+ OBJECT should be recorded in. */
+static inline unsigned int
+zone_get_object_mark_bit (const void *object)
+{
+ return (((size_t) object / BYTES_PER_MARK_BIT)
+ % (8 * sizeof (mark_type)));
+}
+
+/* Set the allocation bit corresponding to OBJECT in its page's
+ bitmap. Used to split this object from the preceding one. */
+static inline void
+zone_set_object_alloc_bit (const void *object)
+{
+ struct small_page_entry *page
+ = (struct small_page_entry *) zone_get_object_page (object);
+ unsigned int start_word = zone_get_object_alloc_word (object);
+ unsigned int start_bit = zone_get_object_alloc_bit (object);
+
+ page->alloc_bits[start_word] |= 1L << start_bit;
}
+/* Clear the allocation bit corresponding to OBJECT in PAGE's
+ bitmap. Used to coalesce this object with the preceding
+ one. */
+static inline void
+zone_clear_object_alloc_bit (struct small_page_entry *page,
+ const void *object)
+{
+ unsigned int start_word = zone_get_object_alloc_word (object);
+ unsigned int start_bit = zone_get_object_alloc_bit (object);
+
+ /* Would xor be quicker? */
+ page->alloc_bits[start_word] &= ~(1L << start_bit);
+}
+
+/* Find the size of the object which starts at START_WORD and
+ START_BIT in ALLOC_BITS, which is at most MAX_SIZE bytes.
+ Helper function for ggc_get_size and zone_find_object_size. */
+
+static inline size_t
+zone_object_size_1 (alloc_type *alloc_bits,
+ size_t start_word, size_t start_bit,
+ size_t max_size)
+{
+ size_t size;
+ alloc_type alloc_word;
+ int indx;
+
+ /* Load the first word. */
+ alloc_word = alloc_bits[start_word++];
+
+ /* If that was the last bit in this word, we'll want to continue
+ with the next word. Otherwise, handle the rest of this word. */
+ if (start_bit)
+ {
+ indx = alloc_ffs (alloc_word >> start_bit);
+ if (indx)
+ /* indx is 1-based. We started at the bit after the object's
+ start, but we also ended at the bit after the object's end.
+ It cancels out. */
+ return indx * BYTES_PER_ALLOC_BIT;
+
+ /* The extra 1 accounts for the starting unit, before start_bit. */
+ size = (sizeof (alloc_type) * 8 - start_bit + 1) * BYTES_PER_ALLOC_BIT;
+
+ if (size >= max_size)
+ return max_size;
+
+ alloc_word = alloc_bits[start_word++];
+ }
+ else
+ size = BYTES_PER_ALLOC_BIT;
+
+ while (alloc_word == 0)
+ {
+ size += sizeof (alloc_type) * 8 * BYTES_PER_ALLOC_BIT;
+ if (size >= max_size)
+ return max_size;
+ alloc_word = alloc_bits[start_word++];
+ }
+
+ indx = alloc_ffs (alloc_word);
+ return size + (indx - 1) * BYTES_PER_ALLOC_BIT;
+}
+
+/* Find the size of OBJECT on small page PAGE. */
+
+static inline size_t
+zone_find_object_size (struct small_page_entry *page,
+ const void *object)
+{
+ const char *object_midptr = (const char *) object + BYTES_PER_ALLOC_BIT;
+ unsigned int start_word = zone_get_object_alloc_word (object_midptr);
+ unsigned int start_bit = zone_get_object_alloc_bit (object_midptr);
+ size_t max_size = (page->common.page + SMALL_PAGE_SIZE
+ - (char *) object);
+
+ return zone_object_size_1 (page->alloc_bits, start_word, start_bit,
+ max_size);
+}
+
+/* Allocate the mark bits for every zone, and set the pointers on each
+ page. */
+static void
+zone_allocate_marks (void)
+{
+ struct alloc_zone *zone;
+
+ for (zone = G.zones; zone; zone = zone->next_zone)
+ {
+ struct small_page_entry *page;
+ mark_type *cur_marks;
+ size_t mark_words, mark_words_per_page;
+#ifdef ENABLE_CHECKING
+ size_t n = 0;
+#endif
+
+ mark_words_per_page
+ = (GGC_PAGE_SIZE + BYTES_PER_MARK_WORD - 1) / BYTES_PER_MARK_WORD;
+ mark_words = zone->n_small_pages * mark_words_per_page;
+ zone->mark_bits = (mark_type *) xcalloc (sizeof (mark_type),
+ mark_words);
+ cur_marks = zone->mark_bits;
+ for (page = zone->pages; page; page = page->next)
+ {
+ page->mark_bits = cur_marks;
+ cur_marks += mark_words_per_page;
+#ifdef ENABLE_CHECKING
+ n++;
+#endif
+ }
+#ifdef ENABLE_CHECKING
+ gcc_assert (n == zone->n_small_pages);
+#endif
+ }
+
+ /* We don't collect the PCH zone, but we do have to mark it
+ (for now). */
+ if (pch_zone.bytes)
+ pch_zone.mark_bits
+ = (mark_type *) xcalloc (sizeof (mark_type),
+ CEIL (pch_zone.bytes, BYTES_PER_MARK_WORD));
+}
+
+/* After marking and sweeping, release the memory used for mark bits. */
+static void
+zone_free_marks (void)
+{
+ struct alloc_zone *zone;
+
+ for (zone = G.zones; zone; zone = zone->next_zone)
+ if (zone->mark_bits)
+ {
+ free (zone->mark_bits);
+ zone->mark_bits = NULL;
+ }
+
+ if (pch_zone.bytes)
+ {
+ free (pch_zone.mark_bits);
+ pch_zone.mark_bits = NULL;
+ }
+}
#ifdef USING_MMAP
/* Allocate SIZE bytes of anonymous memory, preferably near PREF,
@@ -364,7 +775,6 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE, G.dev_zero_fd, 0);
#endif
- VALGRIND_MALLOCLIKE_BLOCK(page, size, 0, 0);
if (page == (char *) MAP_FAILED)
{
@@ -374,24 +784,23 @@ alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, struct alloc_zone *zone)
/* Remember that we allocated this memory. */
zone->bytes_mapped += size;
+
/* Pretend we don't have access to the allocated pages. We'll enable
access to smaller pieces of the area in ggc_alloc. Discard the
handle to avoid handle leak. */
VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (page, size));
+
return page;
}
#endif
-/* Allocate a new page for allocating objects of size 2^ORDER,
- and return an entry for it. */
+/* Allocate a new page for allocating small objects in ZONE, and
+ return an entry for it. */
-static inline struct page_entry *
+static struct small_page_entry *
alloc_small_page (struct alloc_zone *zone)
{
- struct page_entry *entry;
- char *page;
-
- page = NULL;
+ struct small_page_entry *entry;
/* Check the list of free pages for one we can use. */
entry = zone->free_pages;
@@ -399,80 +808,82 @@ alloc_small_page (struct alloc_zone *zone)
{
/* Recycle the allocated memory from this page ... */
zone->free_pages = entry->next;
- page = entry->page;
-
-
}
-#ifdef USING_MMAP
else
{
/* We want just one page. Allocate a bunch of them and put the
extras on the freelist. (Can only do this optimization with
mmap for backing store.) */
- struct page_entry *e, *f = zone->free_pages;
+ struct small_page_entry *e, *f = zone->free_pages;
int i;
+ char *page;
- page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, zone);
+ page = alloc_anon (NULL, GGC_PAGE_SIZE * G.quire_size, zone);
/* This loop counts down so that the chain will be in ascending
memory order. */
- for (i = GGC_QUIRE_SIZE - 1; i >= 1; i--)
+ for (i = G.quire_size - 1; i >= 1; i--)
{
- e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
- e->bytes = G.pagesize;
- e->page = page + (i << G.lg_pagesize);
+ e = xcalloc (1, G.small_page_overhead);
+ e->common.page = page + (i << GGC_PAGE_SHIFT);
+ e->common.zone = zone;
e->next = f;
f = e;
+ set_page_table_entry (e->common.page, &e->common);
}
zone->free_pages = f;
+
+ entry = xcalloc (1, G.small_page_overhead);
+ entry->common.page = page;
+ entry->common.zone = zone;
+ set_page_table_entry (page, &entry->common);
}
-#endif
- if (entry == NULL)
- entry = (struct page_entry *) xmalloc (sizeof (struct page_entry));
- entry->next = 0;
- entry->bytes = G.pagesize;
- entry->page = page;
- entry->context_depth = zone->context_depth;
- entry->large_p = false;
- entry->zone = zone;
- zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
+ zone->n_small_pages++;
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
- "Allocating %s page at %p, data %p-%p\n", entry->zone->name,
- (PTR) entry, page, page + G.pagesize - 1);
+ "Allocating %s page at %p, data %p-%p\n",
+ entry->common.zone->name, (PTR) entry, entry->common.page,
+ entry->common.page + SMALL_PAGE_SIZE - 1);
return entry;
}
-/* Compute the smallest multiple of F that is >= X. */
-
-#define ROUND_UP(x, f) (CEIL (x, f) * (f))
/* Allocate a large page of size SIZE in ZONE. */
-static inline struct page_entry *
+static struct large_page_entry *
alloc_large_page (size_t size, struct alloc_zone *zone)
{
- struct page_entry *entry;
+ struct large_page_entry *entry;
char *page;
- size = ROUND_UP (size, 1024);
- page = (char *) xmalloc (size + CHUNK_OVERHEAD + sizeof (struct page_entry));
- entry = (struct page_entry *) (page + size + CHUNK_OVERHEAD);
+ size_t needed_size;
- entry->next = 0;
+ needed_size = size + sizeof (struct large_page_entry);
+ page = xmalloc (needed_size);
+
+ entry = (struct large_page_entry *) page;
+
+ entry->next = NULL;
+ entry->common.page = page + sizeof (struct large_page_entry);
+ entry->common.large_p = true;
+ entry->common.pch_p = false;
+ entry->common.zone = zone;
+#ifdef GATHER_STATISTICS
+ entry->common.survived = 0;
+#endif
+ entry->mark_p = false;
entry->bytes = size;
- entry->page = page;
- entry->context_depth = zone->context_depth;
- entry->large_p = true;
- entry->zone = zone;
- zone->context_depth_allocations |= (unsigned long)1 << zone->context_depth;
+ entry->prev = NULL;
+
+ set_page_table_entry (entry->common.page, &entry->common);
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
- "Allocating %s large page at %p, data %p-%p\n", entry->zone->name,
- (PTR) entry, page, page + size - 1);
+ "Allocating %s large page at %p, data %p-%p\n",
+ entry->common.zone->name, (PTR) entry, entry->common.page,
+ entry->common.page + SMALL_PAGE_SIZE - 1);
return entry;
}
@@ -481,27 +892,41 @@ alloc_large_page (size_t size, struct alloc_zone *zone)
/* For a page that is no longer needed, put it on the free page list. */
static inline void
-free_page (page_entry *entry)
+free_small_page (struct small_page_entry *entry)
{
if (GGC_DEBUG_LEVEL >= 2)
fprintf (G.debug_file,
- "Deallocating %s page at %p, data %p-%p\n", entry->zone->name, (PTR) entry,
- entry->page, entry->page + entry->bytes - 1);
+ "Deallocating %s page at %p, data %p-%p\n",
+ entry->common.zone->name, (PTR) entry,
+ entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
- if (entry->large_p)
- {
- free (entry->page);
- VALGRIND_FREELIKE_BLOCK (entry->page, entry->bytes);
- }
- else
- {
- /* Mark the page as inaccessible. Discard the handle to
- avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->page, entry->bytes));
+ gcc_assert (!entry->common.large_p);
- entry->next = entry->zone->free_pages;
- entry->zone->free_pages = entry;
- }
+ /* Mark the page as inaccessible. Discard the handle to
+ avoid handle leak. */
+ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (entry->common.page,
+ SMALL_PAGE_SIZE));
+
+ entry->next = entry->common.zone->free_pages;
+ entry->common.zone->free_pages = entry;
+ entry->common.zone->n_small_pages--;
+}
+
+/* Release a large page that is no longer needed. */
+
+static inline void
+free_large_page (struct large_page_entry *entry)
+{
+ if (GGC_DEBUG_LEVEL >= 2)
+ fprintf (G.debug_file,
+ "Deallocating %s page at %p, data %p-%p\n",
+ entry->common.zone->name, (PTR) entry,
+ entry->common.page, entry->common.page + SMALL_PAGE_SIZE - 1);
+
+ gcc_assert (entry->common.large_p);
+
+ set_page_table_entry (entry->common.page, NULL);
+ free (entry);
}
/* Release the free page cache to the system. */
@@ -510,7 +935,7 @@ static void
release_pages (struct alloc_zone *zone)
{
#ifdef USING_MMAP
- page_entry *p, *next;
+ struct small_page_entry *p, *next;
char *start;
size_t len;
@@ -519,17 +944,17 @@ release_pages (struct alloc_zone *zone)
while (p)
{
- start = p->page;
+ start = p->common.page;
next = p->next;
- len = p->bytes;
- free (p);
+ len = SMALL_PAGE_SIZE;
+ set_page_table_entry (p->common.page, NULL);
p = next;
- while (p && p->page == start + len)
+ while (p && p->common.page == start + len)
{
next = p->next;
- len += p->bytes;
- free (p);
+ len += SMALL_PAGE_SIZE;
+ set_page_table_entry (p->common.page, NULL);
p = next;
}
@@ -541,73 +966,157 @@ release_pages (struct alloc_zone *zone)
#endif
}
-/* Place CHUNK of size SIZE on the free list for ZONE. */
+/* Place the block at PTR of size SIZE on the free list for ZONE. */
static inline void
-free_chunk (struct alloc_chunk *chunk, size_t size, struct alloc_zone *zone)
+free_chunk (char *ptr, size_t size, struct alloc_zone *zone)
{
+ struct alloc_chunk *chunk = (struct alloc_chunk *) ptr;
size_t bin = 0;
bin = SIZE_BIN_DOWN (size);
- if (bin == 0)
- abort ();
+ gcc_assert (bin != 0);
if (bin > NUM_FREE_BINS)
- bin = 0;
-#ifdef COOKIE_CHECKING
- if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
- abort ();
- chunk->magic = DEADCHUNK_MAGIC;
-#endif
- chunk->u.next_free = zone->free_chunks[bin];
+ {
+ bin = 0;
+ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
+ chunk->size = size;
+ chunk->next_free = zone->free_chunks[bin];
+ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk),
+ size - sizeof (struct alloc_chunk)));
+ }
+ else
+ {
+ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk *)));
+ chunk->next_free = zone->free_chunks[bin];
+ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (ptr + sizeof (struct alloc_chunk *),
+ size - sizeof (struct alloc_chunk *)));
+ }
+
zone->free_chunks[bin] = chunk;
+ if (bin > zone->high_free_bin)
+ zone->high_free_bin = bin;
if (GGC_DEBUG_LEVEL >= 3)
fprintf (G.debug_file, "Deallocating object, chunk=%p\n", (void *)chunk);
- VALGRIND_DISCARD (VALGRIND_MAKE_READABLE (chunk, sizeof (struct alloc_chunk)));
}
-/* Allocate a chunk of memory of SIZE bytes. */
+/* Allocate a chunk of memory of at least ORIG_SIZE bytes, in ZONE. */
-static void *
-ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
+void *
+ggc_alloc_zone_stat (size_t orig_size, struct alloc_zone *zone
+ MEM_STAT_DECL)
{
- size_t bin = 0;
- size_t lsize = 0;
- struct page_entry *entry;
- struct alloc_chunk *chunk, *lchunk, **pp;
+ size_t bin;
+ size_t csize;
+ struct small_page_entry *entry;
+ struct alloc_chunk *chunk, **pp;
void *result;
+ size_t size = orig_size;
+
+ /* Make sure that zero-sized allocations get a unique and freeable
+ pointer. */
+ if (size == 0)
+ size = MAX_ALIGNMENT;
+ else
+ size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
+
+ /* Try to allocate the object from several different sources. Each
+ of these cases is responsible for setting RESULT and SIZE to
+ describe the allocated block, before jumping to FOUND. If a
+ chunk is split, the allocate bit for the new chunk should also be
+ set.
+
+ Large objects are handled specially. However, they'll just fail
+ the next couple of conditions, so we can wait to check for them
+ below. The large object case is relatively rare (< 1%), so this
+ is a win. */
+
+ /* First try to split the last chunk we allocated. For best
+ fragmentation behavior it would be better to look for a
+ free bin of the appropriate size for a small object. However,
+ we're unlikely (1% - 7%) to find one, and this gives better
+ locality behavior anyway. This case handles the lion's share
+ of all calls to this function. */
+ if (size <= zone->cached_free_size)
+ {
+ result = zone->cached_free;
+
+ zone->cached_free_size -= size;
+ if (zone->cached_free_size)
+ {
+ zone->cached_free += size;
+ zone_set_object_alloc_bit (zone->cached_free);
+ }
+
+ goto found;
+ }
+
+ /* Next, try to find a free bin of the exactly correct size. */
- /* Align size, so that we're assured of aligned allocations. */
- if (size < FREE_BIN_DELTA)
- size = FREE_BIN_DELTA;
- size = (size + MAX_ALIGNMENT - 1) & -MAX_ALIGNMENT;
+ /* We want to round SIZE up, rather than down, but we know it's
+ already aligned to at least FREE_BIN_DELTA, so we can just
+ shift. */
+ bin = SIZE_BIN_DOWN (size);
- /* Large objects are handled specially. */
- if (size >= G.pagesize - 2*CHUNK_OVERHEAD - FREE_BIN_DELTA)
+ if (bin <= NUM_FREE_BINS
+ && (chunk = zone->free_chunks[bin]) != NULL)
{
- size = ROUND_UP (size, 1024);
- entry = alloc_large_page (size, zone);
- entry->survived = 0;
- entry->next = entry->zone->pages;
- entry->zone->pages = entry;
+ /* We have a chunk of the right size. Pull it off the free list
+ and use it. */
- chunk = (struct alloc_chunk *) entry->page;
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
- chunk->large = 1;
- chunk->size = CEIL (size, 1024);
+ zone->free_chunks[bin] = chunk->next_free;
+
+ /* NOTE: SIZE is only guaranteed to be right if MAX_ALIGNMENT
+ == FREE_BIN_DELTA. */
+ result = chunk;
+
+ /* The allocation bits are already set correctly. HIGH_FREE_BIN
+ may now be wrong, if this was the last chunk in the high bin.
+ Rather than fixing it up now, wait until we need to search
+ the free bins. */
goto found;
}
- /* First look for a tiny object already segregated into its own
- size bucket. */
- bin = SIZE_BIN_UP (size);
- if (bin <= NUM_FREE_BINS)
+ /* Next, if there wasn't a chunk of the ideal size, look for a chunk
+ to split. We can find one in the too-big bin, or in the largest
+ sized bin with a chunk in it. Try the largest normal-sized bin
+ first. */
+
+ if (zone->high_free_bin > bin)
{
- chunk = zone->free_chunks[bin];
- if (chunk)
+ /* Find the highest numbered free bin. It will be at or below
+ the watermark. */
+ while (zone->high_free_bin > bin
+ && zone->free_chunks[zone->high_free_bin] == NULL)
+ zone->high_free_bin--;
+
+ if (zone->high_free_bin > bin)
{
- zone->free_chunks[bin] = chunk->u.next_free;
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
+ size_t tbin = zone->high_free_bin;
+ chunk = zone->free_chunks[tbin];
+
+ /* Remove the chunk from its previous bin. */
+ zone->free_chunks[tbin] = chunk->next_free;
+
+ result = (char *) chunk;
+
+ /* Save the rest of the chunk for future allocation. */
+ if (zone->cached_free_size)
+ free_chunk (zone->cached_free, zone->cached_free_size, zone);
+
+ chunk = (struct alloc_chunk *) ((char *) result + size);
+ zone->cached_free = (char *) chunk;
+ zone->cached_free_size = (tbin - bin) * FREE_BIN_DELTA;
+
+ /* Mark the new free chunk as an object, so that we can
+ find the size of the newly allocated object. */
+ zone_set_object_alloc_bit (chunk);
+
+ /* HIGH_FREE_BIN may now be wrong, if this was the last
+ chunk in the high bin. Rather than fixing it up now,
+ wait until we need to search the free bins. */
+
goto found;
}
}
@@ -618,80 +1127,141 @@ ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
chunk = *pp;
while (chunk && chunk->size < size)
{
- pp = &chunk->u.next_free;
+ pp = &chunk->next_free;
chunk = *pp;
}
- /* Failing that, allocate new storage. */
- if (!chunk)
+ if (chunk)
{
- entry = alloc_small_page (zone);
- entry->next = entry->zone->pages;
- entry->zone->pages = entry;
+ /* Remove the chunk from its previous bin. */
+ *pp = chunk->next_free;
- chunk = (struct alloc_chunk *) entry->page;
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
- chunk->size = G.pagesize - CHUNK_OVERHEAD;
- chunk->large = 0;
+ result = (char *) chunk;
+
+ /* Save the rest of the chunk for future allocation, if there's any
+ left over. */
+ csize = chunk->size;
+ if (csize > size)
+ {
+ if (zone->cached_free_size)
+ free_chunk (zone->cached_free, zone->cached_free_size, zone);
+
+ chunk = (struct alloc_chunk *) ((char *) result + size);
+ zone->cached_free = (char *) chunk;
+ zone->cached_free_size = csize - size;
+
+ /* Mark the new free chunk as an object. */
+ zone_set_object_alloc_bit (chunk);
+ }
+
+ goto found;
}
- else
+
+ /* Handle large allocations. We could choose any threshold between
+ GGC_PAGE_SIZE - sizeof (struct large_page_entry) and
+ GGC_PAGE_SIZE. It can't be smaller, because then it wouldn't
+ be guaranteed to have a unique entry in the lookup table. Large
+ allocations will always fall through to here. */
+ if (size > GGC_PAGE_SIZE)
{
- *pp = chunk->u.next_free;
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
- chunk->large = 0;
+ struct large_page_entry *entry = alloc_large_page (size, zone);
+
+#ifdef GATHER_STATISTICS
+ entry->common.survived = 0;
+#endif
+
+ entry->next = zone->large_pages;
+ if (zone->large_pages)
+ zone->large_pages->prev = entry;
+ zone->large_pages = entry;
+
+ result = entry->common.page;
+
+ goto found;
}
- /* Release extra memory from a chunk that's too big. */
- lsize = chunk->size - size;
- if (lsize >= CHUNK_OVERHEAD + FREE_BIN_DELTA)
+
+ /* Failing everything above, allocate a new small page. */
+
+ entry = alloc_small_page (zone);
+ entry->next = zone->pages;
+ zone->pages = entry;
+
+ /* Mark the first chunk in the new page. */
+ entry->alloc_bits[0] = 1;
+
+ result = entry->common.page;
+ if (size < SMALL_PAGE_SIZE)
{
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (chunk, sizeof (struct alloc_chunk)));
- chunk->size = size;
+ if (zone->cached_free_size)
+ free_chunk (zone->cached_free, zone->cached_free_size, zone);
- lsize -= CHUNK_OVERHEAD;
- lchunk = (struct alloc_chunk *)(chunk->u.data + size);
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (lchunk, sizeof (struct alloc_chunk)));
-#ifdef COOKIE_CHECKING
- lchunk->magic = CHUNK_MAGIC;
-#endif
- lchunk->type = 0;
- lchunk->mark = 0;
- lchunk->size = lsize;
- lchunk->large = 0;
- free_chunk (lchunk, lsize, zone);
+ zone->cached_free = (char *) result + size;
+ zone->cached_free_size = SMALL_PAGE_SIZE - size;
+
+ /* Mark the new free chunk as an object. */
+ zone_set_object_alloc_bit (zone->cached_free);
}
- /* Calculate the object's address. */
+
found:
-#ifdef COOKIE_CHECKING
- chunk->magic = CHUNK_MAGIC;
-#endif
- chunk->type = 1;
- chunk->mark = 0;
- chunk->typecode = type;
- result = chunk->u.data;
-#ifdef ENABLE_GC_CHECKING
- /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
- exact same semantics in presence of memory bugs, regardless of
- ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
- handle to avoid handle leak. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
+ /* We could save TYPE in the chunk, but we don't use that for
+ anything yet. If we wanted to, we could do it by adding it
+ either before the beginning of the chunk or after its end,
+ and adjusting the size and pointer appropriately. */
+ /* We'll probably write to this after we return. */
+ prefetchw (result);
+
+#ifdef ENABLE_GC_CHECKING
/* `Poison' the entire allocated object. */
+ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
memset (result, 0xaf, size);
+ VALGRIND_DISCARD (VALGRIND_MAKE_NOACCESS (result + orig_size,
+ size - orig_size));
#endif
/* Tell Valgrind that the memory is there, but its content isn't
defined. The bytes at the end of the object are still marked
unaccessible. */
- VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, size));
+ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (result, orig_size));
/* Keep track of how many bytes are being allocated. This
information is used in deciding when to collect. */
- zone->allocated += size + CHUNK_OVERHEAD;
+ zone->allocated += size;
+
+ timevar_ggc_mem_total += size;
+
+#ifdef GATHER_STATISTICS
+ ggc_record_overhead (orig_size, size - orig_size, result PASS_MEM_STAT);
+
+ {
+ size_t object_size = size;
+ size_t overhead = object_size - orig_size;
+
+ zone->stats.total_overhead += overhead;
+ zone->stats.total_allocated += object_size;
+
+ if (orig_size <= 32)
+ {
+ zone->stats.total_overhead_under32 += overhead;
+ zone->stats.total_allocated_under32 += object_size;
+ }
+ if (orig_size <= 64)
+ {
+ zone->stats.total_overhead_under64 += overhead;
+ zone->stats.total_allocated_under64 += object_size;
+ }
+ if (orig_size <= 128)
+ {
+ zone->stats.total_overhead_under128 += overhead;
+ zone->stats.total_allocated_under128 += object_size;
+ }
+ }
+#endif
if (GGC_DEBUG_LEVEL >= 3)
- fprintf (G.debug_file, "Allocating object, chunk=%p size=%lu at %p\n",
- (void *)chunk, (unsigned long) size, result);
+ fprintf (G.debug_file, "Allocating object, size=%lu at %p\n",
+ (unsigned long) size, result);
return result;
}
@@ -700,38 +1270,91 @@ ggc_alloc_zone_1 (size_t size, struct alloc_zone *zone, short type)
for that type. */
void *
-ggc_alloc_typed (enum gt_types_enum gte, size_t size)
+ggc_alloc_typed_stat (enum gt_types_enum gte, size_t size
+ MEM_STAT_DECL)
{
switch (gte)
{
case gt_ggc_e_14lang_tree_node:
- return ggc_alloc_zone_1 (size, tree_zone, gte);
+ return ggc_alloc_zone_pass_stat (size, &tree_zone);
case gt_ggc_e_7rtx_def:
- return ggc_alloc_zone_1 (size, rtl_zone, gte);
+ return ggc_alloc_zone_pass_stat (size, &rtl_zone);
case gt_ggc_e_9rtvec_def:
- return ggc_alloc_zone_1 (size, rtl_zone, gte);
+ return ggc_alloc_zone_pass_stat (size, &rtl_zone);
default:
- return ggc_alloc_zone_1 (size, &main_zone, gte);
+ return ggc_alloc_zone_pass_stat (size, &main_zone);
}
}
/* Normal ggc_alloc simply allocates into the main zone. */
void *
-ggc_alloc (size_t size)
+ggc_alloc_stat (size_t size MEM_STAT_DECL)
{
- return ggc_alloc_zone_1 (size, &main_zone, -1);
+ return ggc_alloc_zone_pass_stat (size, &main_zone);
}
-/* Zone allocation allocates into the specified zone. */
+/* Poison the chunk. */
+#ifdef ENABLE_GC_CHECKING
+#define poison_region(PTR, SIZE) \
+ memset ((PTR), 0xa5, (SIZE))
+#else
+#define poison_region(PTR, SIZE)
+#endif
-void *
-ggc_alloc_zone (size_t size, struct alloc_zone *zone)
+/* Free the object at P. */
+
+void
+ggc_free (void *p)
{
- return ggc_alloc_zone_1 (size, zone, -1);
+ struct page_entry *page;
+
+#ifdef GATHER_STATISTICS
+ ggc_free_overhead (p);
+#endif
+
+ poison_region (p, ggc_get_size (p));
+
+ page = zone_get_object_page (p);
+
+ if (page->large_p)
+ {
+ struct large_page_entry *large_page
+ = (struct large_page_entry *) page;
+
+ /* Remove the page from the linked list. */
+ if (large_page->prev)
+ large_page->prev->next = large_page->next;
+ else
+ {
+ gcc_assert (large_page->common.zone->large_pages == large_page);
+ large_page->common.zone->large_pages = large_page->next;
+ }
+ if (large_page->next)
+ large_page->next->prev = large_page->prev;
+
+ large_page->common.zone->allocated -= large_page->bytes;
+
+ /* Release the memory associated with this object. */
+ free_large_page (large_page);
+ }
+ else if (page->pch_p)
+ /* Don't do anything. We won't allocate a new object from the
+ PCH zone so there's no point in releasing anything. */
+ ;
+ else
+ {
+ size_t size = ggc_get_size (p);
+
+ page->zone->allocated -= size;
+
+ /* Add the chunk to the free list. We don't bother with coalescing,
+ since we are likely to want a chunk of this size again. */
+ free_chunk (p, size, page->zone);
+ }
}
/* If P is not marked, mark it and return false. Otherwise return true.
@@ -741,16 +1364,42 @@ ggc_alloc_zone (size_t size, struct alloc_zone *zone)
int
ggc_set_mark (const void *p)
{
- struct alloc_chunk *chunk;
+ struct page_entry *page;
+ const char *ptr = (const char *) p;
- chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
-#ifdef COOKIE_CHECKING
- if (chunk->magic != CHUNK_MAGIC)
- abort ();
-#endif
- if (chunk->mark)
- return 1;
- chunk->mark = 1;
+ page = zone_get_object_page (p);
+
+ if (page->pch_p)
+ {
+ size_t mark_word, mark_bit, offset;
+ offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
+ mark_word = offset / (8 * sizeof (mark_type));
+ mark_bit = offset % (8 * sizeof (mark_type));
+
+ if (pch_zone.mark_bits[mark_word] & (1 << mark_bit))
+ return 1;
+ pch_zone.mark_bits[mark_word] |= (1 << mark_bit);
+ }
+ else if (page->large_p)
+ {
+ struct large_page_entry *large_page
+ = (struct large_page_entry *) page;
+
+ if (large_page->mark_p)
+ return 1;
+ large_page->mark_p = true;
+ }
+ else
+ {
+ struct small_page_entry *small_page
+ = (struct small_page_entry *) page;
+
+ if (small_page->mark_bits[zone_get_object_mark_word (p)]
+ & (1 << zone_get_object_mark_bit (p)))
+ return 1;
+ small_page->mark_bits[zone_get_object_mark_word (p)]
+ |= (1 << zone_get_object_mark_bit (p));
+ }
if (GGC_DEBUG_LEVEL >= 4)
fprintf (G.debug_file, "Marking %p\n", p);
@@ -765,14 +1414,36 @@ ggc_set_mark (const void *p)
int
ggc_marked_p (const void *p)
{
- struct alloc_chunk *chunk;
+ struct page_entry *page;
+ const char *ptr = p;
- chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
-#ifdef COOKIE_CHECKING
- if (chunk->magic != CHUNK_MAGIC)
- abort ();
-#endif
- return chunk->mark;
+ page = zone_get_object_page (p);
+
+ if (page->pch_p)
+ {
+ size_t mark_word, mark_bit, offset;
+ offset = (ptr - pch_zone.page) / BYTES_PER_MARK_BIT;
+ mark_word = offset / (8 * sizeof (mark_type));
+ mark_bit = offset % (8 * sizeof (mark_type));
+
+ return (pch_zone.mark_bits[mark_word] & (1 << mark_bit)) != 0;
+ }
+
+ if (page->large_p)
+ {
+ struct large_page_entry *large_page
+ = (struct large_page_entry *) page;
+
+ return large_page->mark_p;
+ }
+ else
+ {
+ struct small_page_entry *small_page
+ = (struct small_page_entry *) page;
+
+ return 0 != (small_page->mark_bits[zone_get_object_mark_word (p)]
+ & (1 << zone_get_object_mark_bit (p)));
+ }
}
/* Return the size of the gc-able object P. */
@@ -780,38 +1451,67 @@ ggc_marked_p (const void *p)
size_t
ggc_get_size (const void *p)
{
- struct alloc_chunk *chunk;
+ struct page_entry *page;
+ const char *ptr = (const char *) p;
- chunk = (struct alloc_chunk *) ((char *)p - CHUNK_OVERHEAD);
-#ifdef COOKIE_CHECKING
- if (chunk->magic != CHUNK_MAGIC)
- abort ();
-#endif
- if (chunk->large)
- return chunk->size * 1024;
+ page = zone_get_object_page (p);
+
+ if (page->pch_p)
+ {
+ size_t alloc_word, alloc_bit, offset, max_size;
+ offset = (ptr - pch_zone.page) / BYTES_PER_ALLOC_BIT + 1;
+ alloc_word = offset / (8 * sizeof (alloc_type));
+ alloc_bit = offset % (8 * sizeof (alloc_type));
+ max_size = pch_zone.bytes - (ptr - pch_zone.page);
+ return zone_object_size_1 (pch_zone.alloc_bits, alloc_word, alloc_bit,
+ max_size);
+ }
- return chunk->size;
+ if (page->large_p)
+ return ((struct large_page_entry *)page)->bytes;
+ else
+ return zone_find_object_size ((struct small_page_entry *) page, p);
}
/* Initialize the ggc-zone-mmap allocator. */
void
init_ggc (void)
{
+ /* The allocation size must be greater than BYTES_PER_MARK_BIT, and
+ a multiple of both BYTES_PER_ALLOC_BIT and FREE_BIN_DELTA, for
+ the current assumptions to hold. */
+
+ gcc_assert (FREE_BIN_DELTA == MAX_ALIGNMENT);
+
/* Set up the main zone by hand. */
main_zone.name = "Main zone";
G.zones = &main_zone;
/* Allocate the default zones. */
- rtl_zone = new_ggc_zone ("RTL zone");
- tree_zone = new_ggc_zone ("Tree zone");
- garbage_zone = new_ggc_zone ("Garbage zone");
+ new_ggc_zone_1 (&rtl_zone, "RTL zone");
+ new_ggc_zone_1 (&tree_zone, "Tree zone");
+ new_ggc_zone_1 (&tree_id_zone, "Tree identifier zone");
G.pagesize = getpagesize();
G.lg_pagesize = exact_log2 (G.pagesize);
+ G.page_mask = ~(G.pagesize - 1);
+
+ /* Require the system page size to be a multiple of GGC_PAGE_SIZE. */
+ gcc_assert ((G.pagesize & (GGC_PAGE_SIZE - 1)) == 0);
+
+ /* Allocate 16 system pages at a time. */
+ G.quire_size = 16 * G.pagesize / GGC_PAGE_SIZE;
+
+ /* Calculate the size of the allocation bitmap and other overhead. */
+ /* Right now we allocate bits for the page header and bitmap. These
+ are wasted, but a little tricky to eliminate. */
+ G.small_page_overhead
+ = PAGE_OVERHEAD + (GGC_PAGE_SIZE / BYTES_PER_ALLOC_BIT / 8);
+ /* G.small_page_overhead = ROUND_UP (G.small_page_overhead, MAX_ALIGNMENT); */
+
#ifdef HAVE_MMAP_DEV_ZERO
G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
- if (G.dev_zero_fd == -1)
- abort ();
+ gcc_assert (G.dev_zero_fd != -1);
#endif
#if 0
@@ -828,36 +1528,49 @@ init_ggc (void)
hork badly if we tried to use it. */
{
char *p = alloc_anon (NULL, G.pagesize, &main_zone);
- struct page_entry *e;
+ struct small_page_entry *e;
if ((size_t)p & (G.pagesize - 1))
{
/* How losing. Discard this one and try another. If we still
can't get something useful, give up. */
p = alloc_anon (NULL, G.pagesize, &main_zone);
- if ((size_t)p & (G.pagesize - 1))
- abort ();
+ gcc_assert (!((size_t)p & (G.pagesize - 1)));
}
- /* We have a good page, might as well hold onto it... */
- e = (struct page_entry *) xmalloc (sizeof (struct page_entry));
- e->bytes = G.pagesize;
- e->page = p;
- e->next = main_zone.free_pages;
- main_zone.free_pages = e;
+ if (GGC_PAGE_SIZE == G.pagesize)
+ {
+ /* We have a good page, might as well hold onto it... */
+ e = xcalloc (1, G.small_page_overhead);
+ e->common.page = p;
+ e->common.zone = &main_zone;
+ e->next = main_zone.free_pages;
+ set_page_table_entry (e->common.page, &e->common);
+ main_zone.free_pages = e;
+ }
+ else
+ {
+ munmap (p, G.pagesize);
+ }
}
#endif
}
/* Start a new GGC zone. */
-struct alloc_zone *
-new_ggc_zone (const char * name)
+static void
+new_ggc_zone_1 (struct alloc_zone *new_zone, const char * name)
{
- struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
new_zone->name = name;
new_zone->next_zone = G.zones->next_zone;
G.zones->next_zone = new_zone;
+}
+
+struct alloc_zone *
+new_ggc_zone (const char * name)
+{
+ struct alloc_zone *new_zone = xcalloc (1, sizeof (struct alloc_zone));
+ new_ggc_zone_1 (new_zone, name);
return new_zone;
}
@@ -868,185 +1581,183 @@ destroy_ggc_zone (struct alloc_zone * dead_zone)
struct alloc_zone *z;
for (z = G.zones; z && z->next_zone != dead_zone; z = z->next_zone)
- /* Just find that zone. */ ;
+ /* Just find that zone. */
+ continue;
-#ifdef ENABLE_CHECKING
/* We should have found the zone in the list. Anything else is fatal. */
- if (!z)
- abort ();
-#endif
+ gcc_assert (z);
/* z is dead, baby. z is dead. */
- z->dead= true;
-}
-
-/* Increment the `GC context'. Objects allocated in an outer context
- are never freed, eliminating the need to register their roots. */
-
-void
-ggc_push_context (void)
-{
- struct alloc_zone *zone;
- for (zone = G.zones; zone; zone = zone->next_zone)
- ++(zone->context_depth);
- /* Die on wrap. */
- if (main_zone.context_depth >= HOST_BITS_PER_LONG)
- abort ();
-}
-
-/* Decrement the `GC context'. All objects allocated since the
- previous ggc_push_context are migrated to the outer context. */
-
-static void
-ggc_pop_context_1 (struct alloc_zone *zone)
-{
- unsigned long omask;
- unsigned depth;
- page_entry *p;
-
- depth = --(zone->context_depth);
- omask = (unsigned long)1 << (depth + 1);
-
- if (!((zone->context_depth_allocations | zone->context_depth_collections) & omask))
- return;
-
- zone->context_depth_allocations |= (zone->context_depth_allocations & omask) >> 1;
- zone->context_depth_allocations &= omask - 1;
- zone->context_depth_collections &= omask - 1;
-
- /* Any remaining pages in the popped context are lowered to the new
- current context; i.e. objects allocated in the popped context and
- left over are imported into the previous context. */
- for (p = zone->pages; p != NULL; p = p->next)
- if (p->context_depth > depth)
- p->context_depth = depth;
-}
-
-/* Pop all the zone contexts. */
-
-void
-ggc_pop_context (void)
-{
- struct alloc_zone *zone;
- for (zone = G.zones; zone; zone = zone->next_zone)
- ggc_pop_context_1 (zone);
+ z->dead = true;
}
-/* Poison the chunk. */
-#ifdef ENABLE_GC_CHECKING
-#define poison_chunk(CHUNK, SIZE) \
- memset ((CHUNK)->u.data, 0xa5, (SIZE))
-#else
-#define poison_chunk(CHUNK, SIZE)
-#endif
/* Free all empty pages and objects within a page for a given zone */
static void
sweep_pages (struct alloc_zone *zone)
{
- page_entry **pp, *p, *next;
- struct alloc_chunk *chunk, *last_free, *end;
- size_t last_free_size, allocated = 0;
+ struct large_page_entry **lpp, *lp, *lnext;
+ struct small_page_entry **spp, *sp, *snext;
+ char *last_free;
+ size_t allocated = 0;
bool nomarksinpage;
+
/* First, reset the free_chunks lists, since we are going to
re-free free chunks in hopes of coalescing them into large chunks. */
memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
- pp = &zone->pages;
- for (p = zone->pages; p ; p = next)
+ zone->high_free_bin = 0;
+ zone->cached_free = NULL;
+ zone->cached_free_size = 0;
+
+ /* Large pages are all or none affairs. Either they are completely
+ empty, or they are completely full. */
+ lpp = &zone->large_pages;
+ for (lp = zone->large_pages; lp != NULL; lp = lnext)
{
- next = p->next;
- /* Large pages are all or none affairs. Either they are
- completely empty, or they are completely full.
-
- XXX: Should we bother to increment allocated. */
- if (p->large_p)
+ gcc_assert (lp->common.large_p);
+
+ lnext = lp->next;
+
+#ifdef GATHER_STATISTICS
+ /* This page has now survived another collection. */
+ lp->common.survived++;
+#endif
+
+ if (lp->mark_p)
{
- if (((struct alloc_chunk *)p->page)->mark == 1)
- {
- ((struct alloc_chunk *)p->page)->mark = 0;
- }
- else
- {
- *pp = next;
+ lp->mark_p = false;
+ allocated += lp->bytes;
+ lpp = &lp->next;
+ }
+ else
+ {
+ *lpp = lnext;
#ifdef ENABLE_GC_CHECKING
/* Poison the page. */
- memset (p->page, 0xb5, p->bytes);
+ memset (lp->common.page, 0xb5, SMALL_PAGE_SIZE);
#endif
- free_page (p);
- }
- continue;
+ if (lp->prev)
+ lp->prev->next = lp->next;
+ if (lp->next)
+ lp->next->prev = lp->prev;
+ free_large_page (lp);
}
+ }
+ spp = &zone->pages;
+ for (sp = zone->pages; sp != NULL; sp = snext)
+ {
+ char *object, *last_object;
+ char *end;
+ alloc_type *alloc_word_p;
+ mark_type *mark_word_p;
+
+ gcc_assert (!sp->common.large_p);
+
+ snext = sp->next;
+
+#ifdef GATHER_STATISTICS
/* This page has now survived another collection. */
- p->survived++;
+ sp->common.survived++;
+#endif
- /* Which leaves full and partial pages. Step through all chunks,
- consolidate those that are free and insert them into the free
- lists. Note that consolidation slows down collection
- slightly. */
+ /* Step through all chunks, consolidate those that are free and
+ insert them into the free lists. Note that consolidation
+ slows down collection slightly. */
- chunk = (struct alloc_chunk *)p->page;
- end = (struct alloc_chunk *)(p->page + G.pagesize);
+ last_object = object = sp->common.page;
+ end = sp->common.page + SMALL_PAGE_SIZE;
last_free = NULL;
- last_free_size = 0;
nomarksinpage = true;
+ mark_word_p = sp->mark_bits;
+ alloc_word_p = sp->alloc_bits;
+
+ gcc_assert (BYTES_PER_ALLOC_BIT == BYTES_PER_MARK_BIT);
+
+ object = sp->common.page;
do
{
- prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
- if (chunk->mark || p->context_depth < zone->context_depth)
+ unsigned int i, n;
+ alloc_type alloc_word;
+ mark_type mark_word;
+
+ alloc_word = *alloc_word_p++;
+ mark_word = *mark_word_p++;
+
+ if (mark_word)
+ nomarksinpage = false;
+
+ /* There ought to be some way to do this without looping... */
+ i = 0;
+ while ((n = alloc_ffs (alloc_word)) != 0)
{
- nomarksinpage = false;
- if (last_free)
+ /* Extend the current state for n - 1 bits. We can't
+ shift alloc_word by n, even though it isn't used in the
+ loop, in case only the highest bit was set. */
+ alloc_word >>= n - 1;
+ mark_word >>= n - 1;
+ object += BYTES_PER_MARK_BIT * (n - 1);
+
+ if (mark_word & 1)
{
- last_free->type = 0;
- last_free->size = last_free_size;
- last_free->mark = 0;
- poison_chunk (last_free, last_free_size);
- free_chunk (last_free, last_free_size, zone);
- last_free = NULL;
- }
- if (chunk->mark)
- {
- allocated += chunk->size + CHUNK_OVERHEAD;
- }
- chunk->mark = 0;
- }
- else
- {
- if (last_free)
- {
- last_free_size += CHUNK_OVERHEAD + chunk->size;
+ if (last_free)
+ {
+ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
+ object
+ - last_free));
+ poison_region (last_free, object - last_free);
+ free_chunk (last_free, object - last_free, zone);
+ last_free = NULL;
+ }
+ else
+ allocated += object - last_object;
+ last_object = object;
}
else
{
- last_free = chunk;
- last_free_size = chunk->size;
+ if (last_free == NULL)
+ {
+ last_free = object;
+ allocated += object - last_object;
+ }
+ else
+ zone_clear_object_alloc_bit (sp, object);
}
+
+ /* Shift to just after the alloc bit we handled. */
+ alloc_word >>= 1;
+ mark_word >>= 1;
+ object += BYTES_PER_MARK_BIT;
+
+ i += n;
}
- chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
+ object += BYTES_PER_MARK_BIT * (8 * sizeof (alloc_type) - i);
}
- while (chunk < end);
+ while (object < end);
if (nomarksinpage)
{
- *pp = next;
+ *spp = snext;
#ifdef ENABLE_GC_CHECKING
+ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (sp->common.page, SMALL_PAGE_SIZE));
/* Poison the page. */
- memset (p->page, 0xb5, p->bytes);
+ memset (sp->common.page, 0xb5, SMALL_PAGE_SIZE);
#endif
- free_page (p);
+ free_small_page (sp);
continue;
}
else if (last_free)
{
- last_free->type = 0;
- last_free->size = last_free_size;
- last_free->mark = 0;
- poison_chunk (last_free, last_free_size);
- free_chunk (last_free, last_free_size, zone);
+ VALGRIND_DISCARD (VALGRIND_MAKE_WRITABLE (last_free,
+ object - last_free));
+ poison_region (last_free, object - last_free);
+ free_chunk (last_free, object - last_free, zone);
}
- pp = &p->next;
+ else
+ allocated += object - last_object;
+
+ spp = &sp->next;
}
zone->allocated = allocated;
@@ -1060,20 +1771,30 @@ sweep_pages (struct alloc_zone *zone)
static bool
ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
{
- if (!zone->dead)
- {
- /* Avoid frequent unnecessary work by skipping collection if the
- total allocations haven't expanded much since the last
- collection. */
- float allocated_last_gc =
- MAX (zone->allocated_last_gc,
- (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
-
- float min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
+#if 0
+ /* */
+ {
+ int i;
+ for (i = 0; i < NUM_FREE_BINS + 1; i++)
+ {
+ struct alloc_chunk *chunk;
+ int n, tot;
- if (zone->allocated < allocated_last_gc + min_expand)
- return false;
- }
+ n = 0;
+ tot = 0;
+ chunk = zone->free_chunks[i];
+ while (chunk)
+ {
+ n++;
+ tot += chunk->size;
+ chunk = chunk->next_free;
+ }
+ fprintf (stderr, "Bin %d: %d free chunks (%d bytes)\n",
+ i, n, tot);
+ }
+ }
+ /* */
+#endif
if (!quiet_flag)
fprintf (stderr, " {%s GC %luk -> ",
@@ -1087,11 +1808,15 @@ ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
reuse in the interim. */
release_pages (zone);
- /* Indicate that we've seen collections at this context depth. */
- zone->context_depth_collections
- = ((unsigned long)1 << (zone->context_depth + 1)) - 1;
if (need_marking)
- ggc_mark_roots ();
+ {
+ zone_allocate_marks ();
+ ggc_mark_roots ();
+#ifdef GATHER_STATISTICS
+ ggc_prune_overhead_list ();
+#endif
+ }
+
sweep_pages (zone);
zone->was_collected = true;
zone->allocated_last_gc = zone->allocated;
@@ -1101,6 +1826,7 @@ ggc_collect_1 (struct alloc_zone *zone, bool need_marking)
return true;
}
+#ifdef GATHER_STATISTICS
/* Calculate the average page survival rate in terms of number of
collections. */
@@ -1109,46 +1835,22 @@ calculate_average_page_survival (struct alloc_zone *zone)
{
float count = 0.0;
float survival = 0.0;
- page_entry *p;
+ struct small_page_entry *p;
+ struct large_page_entry *lp;
for (p = zone->pages; p; p = p->next)
{
count += 1.0;
- survival += p->survived;
+ survival += p->common.survived;
}
- return survival/count;
-}
-
-/* Check the magic cookies all of the chunks contain, to make sure we
- aren't doing anything stupid, like stomping on alloc_chunk
- structures. */
-
-static inline void
-check_cookies (void)
-{
-#ifdef COOKIE_CHECKING
- page_entry *p;
- struct alloc_zone *zone;
-
- for (zone = G.zones; zone; zone = zone->next_zone)
+ for (lp = zone->large_pages; lp; lp = lp->next)
{
- for (p = zone->pages; p; p = p->next)
- {
- if (!p->large_p)
- {
- struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
- struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
- do
- {
- if (chunk->magic != CHUNK_MAGIC && chunk->magic != DEADCHUNK_MAGIC)
- abort ();
- chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
- }
- while (chunk < end);
- }
- }
+ count += 1.0;
+ survival += lp->common.survived;
}
-#endif
+ return survival/count;
}
+#endif
+
/* Top level collection routine. */
void
@@ -1156,10 +1858,31 @@ ggc_collect (void)
{
struct alloc_zone *zone;
bool marked = false;
- float f;
timevar_push (TV_GC);
- check_cookies ();
+
+ if (!ggc_force_collect)
+ {
+ float allocated_last_gc = 0, allocated = 0, min_expand;
+
+ for (zone = G.zones; zone; zone = zone->next_zone)
+ {
+ allocated_last_gc += zone->allocated_last_gc;
+ allocated += zone->allocated;
+ }
+
+ allocated_last_gc =
+ MAX (allocated_last_gc,
+ (size_t) PARAM_VALUE (GGC_MIN_HEAPSIZE) * 1024);
+ min_expand = allocated_last_gc * PARAM_VALUE (GGC_MIN_EXPAND) / 100;
+
+ if (allocated < allocated_last_gc + min_expand)
+ {
+ timevar_pop (TV_GC);
+ return;
+ }
+ }
+
/* Start by possibly collecting the main zone. */
main_zone.was_collected = false;
marked |= ggc_collect_1 (&main_zone, true);
@@ -1172,6 +1895,8 @@ ggc_collect (void)
marking. So if we mark twice as often as we used to, we'll be
twice as slow. Hopefully we'll avoid this cost when we mark
zone-at-a-time. */
+ /* NOTE drow/2004-07-28: We now always collect the main zone, but
+ keep this code in case the heuristics are further refined. */
if (main_zone.was_collected)
{
@@ -1179,12 +1904,12 @@ ggc_collect (void)
for (zone = main_zone.next_zone; zone; zone = zone->next_zone)
{
- check_cookies ();
zone->was_collected = false;
marked |= ggc_collect_1 (zone, !marked);
}
}
+#ifdef GATHER_STATISTICS
/* Print page survival stats, if someone wants them. */
if (GGC_DEBUG_LEVEL >= 2)
{
@@ -1192,47 +1917,16 @@ ggc_collect (void)
{
if (zone->was_collected)
{
- f = calculate_average_page_survival (zone);
+ float f = calculate_average_page_survival (zone);
printf ("Average page survival in zone `%s' is %f\n",
zone->name, f);
}
}
}
+#endif
- /* Since we don't mark zone at a time right now, marking in any
- zone means marking in every zone. So we have to clear all the
- marks in all the zones that weren't collected already. */
if (marked)
- {
- page_entry *p;
- for (zone = G.zones; zone; zone = zone->next_zone)
- {
- if (zone->was_collected)
- continue;
- for (p = zone->pages; p; p = p->next)
- {
- if (!p->large_p)
- {
- struct alloc_chunk *chunk = (struct alloc_chunk *)p->page;
- struct alloc_chunk *end = (struct alloc_chunk *)(p->page + G.pagesize);
- do
- {
- prefetch ((struct alloc_chunk *)(chunk->u.data + chunk->size));
- if (chunk->mark || p->context_depth < zone->context_depth)
- {
- chunk->mark = 0;
- }
- chunk = (struct alloc_chunk *)(chunk->u.data + chunk->size);
- }
- while (chunk < end);
- }
- else
- {
- ((struct alloc_chunk *)p->page)->mark = 0;
- }
- }
- }
- }
+ zone_free_marks ();
/* Free dead zones. */
for (zone = G.zones; zone && zone->next_zone; zone = zone->next_zone)
@@ -1244,8 +1938,7 @@ ggc_collect (void)
printf ("Zone `%s' is dead and will be freed.\n", dead_zone->name);
/* The zone must be empty. */
- if (dead_zone->allocated != 0)
- abort ();
+ gcc_assert (!dead_zone->allocated);
/* Unchain the dead zone, release all its pages and free it. */
zone->next_zone = zone->next_zone->next_zone;
@@ -1258,23 +1951,206 @@ ggc_collect (void)
}
/* Print allocation statistics. */
+#define SCALE(x) ((unsigned long) ((x) < 1024*10 \
+ ? (x) \
+ : ((x) < 1024*1024*10 \
+ ? (x) / 1024 \
+ : (x) / (1024*1024))))
+#define LABEL(x) ((x) < 1024*10 ? ' ' : ((x) < 1024*1024*10 ? 'k' : 'M'))
void
ggc_print_statistics (void)
{
+ struct alloc_zone *zone;
+ struct ggc_statistics stats;
+ size_t total_overhead = 0, total_allocated = 0, total_bytes_mapped = 0;
+ size_t pte_overhead, i;
+
+ /* Clear the statistics. */
+ memset (&stats, 0, sizeof (stats));
+
+ /* Make sure collection will really occur. */
+ ggc_force_collect = true;
+
+ /* Collect and print the statistics common across collectors. */
+ ggc_print_common_statistics (stderr, &stats);
+
+ ggc_force_collect = false;
+
+ /* Release free pages so that we will not count the bytes allocated
+ there as part of the total allocated memory. */
+ for (zone = G.zones; zone; zone = zone->next_zone)
+ release_pages (zone);
+
+ /* Collect some information about the various sizes of
+ allocation. */
+ fprintf (stderr,
+ "Memory still allocated at the end of the compilation process\n");
+
+ fprintf (stderr, "%20s %10s %10s %10s\n",
+ "Zone", "Allocated", "Used", "Overhead");
+ for (zone = G.zones; zone; zone = zone->next_zone)
+ {
+ struct large_page_entry *large_page;
+ size_t overhead, allocated, in_use;
+
+ /* Skip empty zones. */
+ if (!zone->pages && !zone->large_pages)
+ continue;
+
+ allocated = in_use = 0;
+
+ overhead = sizeof (struct alloc_zone);
+
+ for (large_page = zone->large_pages; large_page != NULL;
+ large_page = large_page->next)
+ {
+ allocated += large_page->bytes;
+ in_use += large_page->bytes;
+ overhead += sizeof (struct large_page_entry);
+ }
+
+ /* There's no easy way to walk through the small pages finding
+ used and unused objects. Instead, add all the pages, and
+ subtract out the free list. */
+
+ allocated += GGC_PAGE_SIZE * zone->n_small_pages;
+ in_use += GGC_PAGE_SIZE * zone->n_small_pages;
+ overhead += G.small_page_overhead * zone->n_small_pages;
+
+ for (i = 0; i <= NUM_FREE_BINS; i++)
+ {
+ struct alloc_chunk *chunk = zone->free_chunks[i];
+ while (chunk)
+ {
+ in_use -= ggc_get_size (chunk);
+ chunk = chunk->next_free;
+ }
+ }
+
+ fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n",
+ zone->name,
+ SCALE (allocated), LABEL (allocated),
+ SCALE (in_use), LABEL (in_use),
+ SCALE (overhead), LABEL (overhead));
+
+ gcc_assert (in_use == zone->allocated);
+
+ total_overhead += overhead;
+ total_allocated += zone->allocated;
+ total_bytes_mapped += zone->bytes_mapped;
+ }
+
+ /* Count the size of the page table as best we can. */
+#if HOST_BITS_PER_PTR <= 32
+ pte_overhead = sizeof (G.lookup);
+ for (i = 0; i < PAGE_L1_SIZE; i++)
+ if (G.lookup[i])
+ pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
+#else
+ {
+ page_table table = G.lookup;
+ pte_overhead = 0;
+ while (table)
+ {
+ pte_overhead += sizeof (*table);
+ for (i = 0; i < PAGE_L1_SIZE; i++)
+ if (table->table[i])
+ pte_overhead += PAGE_L2_SIZE * sizeof (struct page_entry *);
+ table = table->next;
+ }
+ }
+#endif
+ fprintf (stderr, "%20s %11s %11s %10lu%c\n", "Page Table",
+ "", "", SCALE (pte_overhead), LABEL (pte_overhead));
+ total_overhead += pte_overhead;
+
+ fprintf (stderr, "%20s %10lu%c %10lu%c %10lu%c\n", "Total",
+ SCALE (total_bytes_mapped), LABEL (total_bytes_mapped),
+ SCALE (total_allocated), LABEL(total_allocated),
+ SCALE (total_overhead), LABEL (total_overhead));
+
+#ifdef GATHER_STATISTICS
+ {
+ unsigned long long all_overhead = 0, all_allocated = 0;
+ unsigned long long all_overhead_under32 = 0, all_allocated_under32 = 0;
+ unsigned long long all_overhead_under64 = 0, all_allocated_under64 = 0;
+ unsigned long long all_overhead_under128 = 0, all_allocated_under128 = 0;
+
+ fprintf (stderr, "\nTotal allocations and overheads during the compilation process\n");
+
+ for (zone = G.zones; zone; zone = zone->next_zone)
+ {
+ all_overhead += zone->stats.total_overhead;
+ all_allocated += zone->stats.total_allocated;
+
+ all_allocated_under32 += zone->stats.total_allocated_under32;
+ all_overhead_under32 += zone->stats.total_overhead_under32;
+
+ all_allocated_under64 += zone->stats.total_allocated_under64;
+ all_overhead_under64 += zone->stats.total_overhead_under64;
+
+ all_allocated_under128 += zone->stats.total_allocated_under128;
+ all_overhead_under128 += zone->stats.total_overhead_under128;
+
+ fprintf (stderr, "%20s: %10lld\n",
+ zone->name, zone->stats.total_allocated);
+ }
+
+ fprintf (stderr, "\n");
+
+ fprintf (stderr, "Total Overhead: %10lld\n",
+ all_overhead);
+ fprintf (stderr, "Total Allocated: %10lld\n",
+ all_allocated);
+
+ fprintf (stderr, "Total Overhead under 32B: %10lld\n",
+ all_overhead_under32);
+ fprintf (stderr, "Total Allocated under 32B: %10lld\n",
+ all_allocated_under32);
+ fprintf (stderr, "Total Overhead under 64B: %10lld\n",
+ all_overhead_under64);
+ fprintf (stderr, "Total Allocated under 64B: %10lld\n",
+ all_allocated_under64);
+ fprintf (stderr, "Total Overhead under 128B: %10lld\n",
+ all_overhead_under128);
+ fprintf (stderr, "Total Allocated under 128B: %10lld\n",
+ all_allocated_under128);
+ }
+#endif
}
+/* Precompiled header support. */
+
+/* For precompiled headers, we sort objects based on their type. We
+ also sort various objects into their own buckets; currently this
+ covers strings and IDENTIFIER_NODE trees. The choices of how
+ to sort buckets have not yet been tuned. */
+
+#define NUM_PCH_BUCKETS (gt_types_enum_last + 3)
+
+#define OTHER_BUCKET (gt_types_enum_last + 0)
+#define IDENTIFIER_BUCKET (gt_types_enum_last + 1)
+#define STRING_BUCKET (gt_types_enum_last + 2)
+
+struct ggc_pch_ondisk
+{
+ size_t total;
+ size_t type_totals[NUM_PCH_BUCKETS];
+};
+
struct ggc_pch_data
{
- struct ggc_pch_ondisk
- {
- unsigned total;
- } d;
+ struct ggc_pch_ondisk d;
size_t base;
- size_t written;
+ size_t orig_base;
+ size_t alloc_size;
+ alloc_type *alloc_bits;
+ size_t type_bases[NUM_PCH_BUCKETS];
+ size_t start_offset;
};
-/* Initialize the PCH datastructure. */
+/* Initialize the PCH data structure. */
struct ggc_pch_data *
init_ggc_pch (void)
@@ -1282,18 +2158,40 @@ init_ggc_pch (void)
return xcalloc (sizeof (struct ggc_pch_data), 1);
}
+/* Return which of the page-aligned buckets the object at X, with type
+ TYPE, should be sorted into in the PCH. Strings will have
+ IS_STRING set and TYPE will be gt_types_enum_last. Other objects
+ of unknown type will also have TYPE equal to gt_types_enum_last. */
+
+static int
+pch_bucket (void *x, enum gt_types_enum type,
+ bool is_string)
+{
+ /* Sort identifiers into their own bucket, to improve locality
+ when searching the identifier hash table. */
+ if (type == gt_ggc_e_14lang_tree_node
+ && TREE_CODE ((tree) x) == IDENTIFIER_NODE)
+ return IDENTIFIER_BUCKET;
+ else if (type == gt_types_enum_last)
+ {
+ if (is_string)
+ return STRING_BUCKET;
+ return OTHER_BUCKET;
+ }
+ return type;
+}
+
/* Add the size of object X to the size of the PCH data. */
void
ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
- size_t size, bool is_string)
+ size_t size, bool is_string, enum gt_types_enum type)
{
- if (!is_string)
- {
- d->d.total += size + CHUNK_OVERHEAD;
- }
- else
- d->d.total += size;
+ /* NOTE: Right now we don't need to align up the size of any objects.
+ Strings can be unaligned, and everything else is allocated to a
+ MAX_ALIGNMENT boundary already. */
+
+ d->d.type_totals[pch_bucket (x, type, is_string)] += size;
}
/* Return the total size of the PCH data. */
@@ -1301,98 +2199,181 @@ ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
size_t
ggc_pch_total_size (struct ggc_pch_data *d)
{
- return d->d.total;
+ enum gt_types_enum i;
+ size_t alloc_size, total_size;
+
+ total_size = 0;
+ for (i = 0; i < NUM_PCH_BUCKETS; i++)
+ {
+ d->d.type_totals[i] = ROUND_UP (d->d.type_totals[i], GGC_PAGE_SIZE);
+ total_size += d->d.type_totals[i];
+ }
+ d->d.total = total_size;
+
+ /* Include the size of the allocation bitmap. */
+ alloc_size = CEIL (d->d.total, BYTES_PER_ALLOC_BIT * 8);
+ alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
+ d->alloc_size = alloc_size;
+
+ return d->d.total + alloc_size;
}
/* Set the base address for the objects in the PCH file. */
void
-ggc_pch_this_base (struct ggc_pch_data *d, void *base)
+ggc_pch_this_base (struct ggc_pch_data *d, void *base_)
{
- d->base = (size_t) base;
+ int i;
+ size_t base = (size_t) base_;
+
+ d->base = d->orig_base = base;
+ for (i = 0; i < NUM_PCH_BUCKETS; i++)
+ {
+ d->type_bases[i] = base;
+ base += d->d.type_totals[i];
+ }
+
+ if (d->alloc_bits == NULL)
+ d->alloc_bits = xcalloc (1, d->alloc_size);
}
/* Allocate a place for object X of size SIZE in the PCH file. */
char *
ggc_pch_alloc_object (struct ggc_pch_data *d, void *x,
- size_t size, bool is_string)
+ size_t size, bool is_string,
+ enum gt_types_enum type)
{
+ size_t alloc_word, alloc_bit;
char *result;
- result = (char *)d->base;
- if (!is_string)
- {
- struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
- if (chunk->large)
- d->base += ggc_get_size (x) + CHUNK_OVERHEAD;
- else
- d->base += chunk->size + CHUNK_OVERHEAD;
- return result + CHUNK_OVERHEAD;
- }
- else
- {
- d->base += size;
- return result;
- }
-
+ int bucket = pch_bucket (x, type, is_string);
+
+ /* Record the start of the object in the allocation bitmap. We
+ can't assert that the allocation bit is previously clear, because
+ strings may violate the invariant that they are at least
+ BYTES_PER_ALLOC_BIT long. This is harmless - ggc_get_size
+ should not be called for strings. */
+ alloc_word = ((d->type_bases[bucket] - d->orig_base)
+ / (8 * sizeof (alloc_type) * BYTES_PER_ALLOC_BIT));
+ alloc_bit = ((d->type_bases[bucket] - d->orig_base)
+ / BYTES_PER_ALLOC_BIT) % (8 * sizeof (alloc_type));
+ d->alloc_bits[alloc_word] |= 1L << alloc_bit;
+
+ /* Place the object at the current pointer for this bucket. */
+ result = (char *) d->type_bases[bucket];
+ d->type_bases[bucket] += size;
+ return result;
}
/* Prepare to write out the PCH data to file F. */
void
-ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
- FILE *f ATTRIBUTE_UNUSED)
+ggc_pch_prepare_write (struct ggc_pch_data *d,
+ FILE *f)
{
- /* Nothing to do. */
+ /* We seek around a lot while writing. Record where the end
+ of the padding in the PCH file is, so that we can
+ locate each object's offset. */
+ d->start_offset = ftell (f);
}
/* Write out object X of SIZE to file F. */
void
-ggc_pch_write_object (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
- FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
- size_t size, bool is_string)
+ggc_pch_write_object (struct ggc_pch_data *d,
+ FILE *f, void *x, void *newx,
+ size_t size, bool is_string ATTRIBUTE_UNUSED)
{
- if (!is_string)
- {
- struct alloc_chunk *chunk = (struct alloc_chunk *) ((char *)x - CHUNK_OVERHEAD);
- size = ggc_get_size (x);
- if (fwrite (chunk, size + CHUNK_OVERHEAD, 1, f) != 1)
- fatal_error ("can't write PCH file: %m");
- d->written += size + CHUNK_OVERHEAD;
- }
- else
- {
- if (fwrite (x, size, 1, f) != 1)
- fatal_error ("can't write PCH file: %m");
- d->written += size;
- }
- if (d->written == d->d.total
- && fseek (f, ROUND_UP_VALUE (d->d.total, G.pagesize), SEEK_CUR) != 0)
+ if (fseek (f, (size_t) newx - d->orig_base + d->start_offset, SEEK_SET) != 0)
+ fatal_error ("can't seek PCH file: %m");
+
+ if (fwrite (x, size, 1, f) != 1)
fatal_error ("can't write PCH file: %m");
}
void
ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
{
+ /* Write out the allocation bitmap. */
+ if (fseek (f, d->start_offset + d->d.total, SEEK_SET) != 0)
+ fatal_error ("can't seek PCH file: %m");
+
+ if (fwrite (d->alloc_bits, d->alloc_size, 1, f) != 1)
+ fatal_error ("can't write PCH fle: %m");
+
+ /* Done with the PCH, so write out our footer. */
if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
fatal_error ("can't write PCH file: %m");
+
+ free (d->alloc_bits);
free (d);
}
+
+/* The PCH file from F has been mapped at ADDR. Read in any
+ additional data from the file and set up the GC state. */
+
void
ggc_pch_read (FILE *f, void *addr)
{
struct ggc_pch_ondisk d;
- struct page_entry *entry;
- struct alloc_zone *pch_zone;
+ size_t alloc_size;
+ struct alloc_zone *zone;
+ struct page_entry *pch_page;
+ char *p;
+
if (fread (&d, sizeof (d), 1, f) != 1)
fatal_error ("can't read PCH file: %m");
- entry = xcalloc (1, sizeof (struct page_entry));
- entry->bytes = d.total;
- entry->page = addr;
- entry->context_depth = 0;
- pch_zone = new_ggc_zone ("PCH zone");
- entry->zone = pch_zone;
- entry->next = entry->zone->pages;
- entry->zone->pages = entry;
+
+ alloc_size = CEIL (d.total, BYTES_PER_ALLOC_BIT * 8);
+ alloc_size = ROUND_UP (alloc_size, MAX_ALIGNMENT);
+
+ pch_zone.bytes = d.total;
+ pch_zone.alloc_bits = (alloc_type *) ((char *) addr + pch_zone.bytes);
+ pch_zone.page = (char *) addr;
+ pch_zone.end = (char *) pch_zone.alloc_bits;
+
+ /* We've just read in a PCH file. So, every object that used to be
+ allocated is now free. */
+ for (zone = G.zones; zone; zone = zone->next_zone)
+ {
+ struct small_page_entry *page, *next_page;
+ struct large_page_entry *large_page, *next_large_page;
+
+ zone->allocated = 0;
+
+ /* Clear the zone's free chunk list. */
+ memset (zone->free_chunks, 0, sizeof (zone->free_chunks));
+ zone->high_free_bin = 0;
+ zone->cached_free = NULL;
+ zone->cached_free_size = 0;
+
+ /* Move all the small pages onto the free list. */
+ for (page = zone->pages; page != NULL; page = next_page)
+ {
+ next_page = page->next;
+ memset (page->alloc_bits, 0,
+ G.small_page_overhead - PAGE_OVERHEAD);
+ free_small_page (page);
+ }
+
+ /* Discard all the large pages. */
+ for (large_page = zone->large_pages; large_page != NULL;
+ large_page = next_large_page)
+ {
+ next_large_page = large_page->next;
+ free_large_page (large_page);
+ }
+
+ zone->pages = NULL;
+ zone->large_pages = NULL;
+ }
+
+ /* Allocate the dummy page entry for the PCH, and set all pages
+ mapped into the PCH to reference it. */
+ pch_page = xcalloc (1, sizeof (struct page_entry));
+ pch_page->page = pch_zone.page;
+ pch_page->pch_p = true;
+
+ for (p = pch_zone.page; p < pch_zone.end; p += GGC_PAGE_SIZE)
+ set_page_table_entry (p, pch_page);
}
OpenPOWER on IntegriCloud