summaryrefslogtreecommitdiff
path: root/allchblk.c
diff options
context:
space:
mode:
authorhboehm <hboehm>2009-08-12 02:42:01 +0200
committerIvan Maidanski <ivmai@mail.ru>2011-07-26 19:06:45 +0200
commitab125444763381f2cab78e20780297473f271ede (patch)
tree406f7ad1ee9e008e0dc9f45944abf841336d0aa8 /allchblk.c
parentc56de32b70d0c0d99185a4c044a4616edc1c235a (diff)
2009-08-11 Hans Boehm <Hans.Boehm@hp.com>
(Replacement for Ivan Maidanski's diff99_cvs. Hopefully fixes the same bugs, and then some.) * allchblk.c (GC_merge_unmapped): Don't assume that adjacent free blocks have different mapping status. Correctly handle gap between blocks. (GC_split_block): Remove dead code setting hb_flags. Add comment. (GC_allochblk): Split blocks also in generational-only mode. * os_dep.c (GC_unmap_gap): Dont really use munmap.
Diffstat (limited to 'allchblk.c')
-rw-r--r--allchblk.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/allchblk.c b/allchblk.c
index b723a7f..627ef90 100644
--- a/allchblk.c
+++ b/allchblk.c
@@ -431,27 +431,32 @@ void GC_merge_unmapped(void)
if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
&& (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
/* no pot. overflow */) {
- if (IS_MAPPED(hhdr)) {
- GC_ASSERT(!IS_MAPPED(nexthdr));
+ /* Note that we usually try to avoid adjacent free blocks */
+ /* that are either both mapped or both unmapped. But that */
+ /* isn't guaranteed to hold since we remap blocks when we */
+ /* split them, and don't merge at that point. It may also */
+ /* not hold if the merged block would be too big. */
+ if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
/* make both consistent, so that we can merge */
if (size > nextsize) {
GC_remap((ptr_t)next, nextsize);
} else {
GC_unmap((ptr_t)h, size);
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
hhdr -> hb_flags |= WAS_UNMAPPED;
}
- } else if (IS_MAPPED(nexthdr)) {
- GC_ASSERT(!IS_MAPPED(hhdr));
+ } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
if (size > nextsize) {
GC_unmap((ptr_t)next, nextsize);
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
} else {
GC_remap((ptr_t)h, size);
hhdr -> hb_flags &= ~WAS_UNMAPPED;
hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
}
- } else {
+ } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
/* Unmap any gap in the middle */
- GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
+ GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
}
/* If they are both unmapped, we merge, but leave unmapped. */
GC_remove_from_fl(hhdr, i);
@@ -515,6 +520,8 @@ STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
*
* Nhdr is not completely filled in, since it is about to allocated.
* It may in fact end up on the wrong free list for its size.
+ * That's not a disaster, since n is about to be allocated
+ * by our caller.
* (Hence adding it to a free list is silly. But this path is hopefully
* rare enough that it doesn't matter. The code is cleaner this way.)
*/
@@ -541,11 +548,6 @@ STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
}
INCR_FREE_BYTES(index, -(signed_word)h_size);
FREE_ASSERT(GC_free_bytes[index] > 0);
-# ifdef GC_ASSERTIONS
- nhdr -> hb_flags &= ~FREE_BLK;
- /* Don't fail test for consecutive */
- /* free blocks in GC_add_to_fl. */
-# endif
# ifdef USE_MUNMAP
hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
# endif
@@ -588,7 +590,7 @@ GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
if (0 != result) return result;
if (GC_use_entire_heap || GC_dont_gc
|| USED_HEAP_SIZE < GC_requested_heapsize
- || TRUE_INCREMENTAL || !GC_should_collect()) {
+ || GC_incremental || !GC_should_collect()) {
/* Should use more of the heap, even if it requires splitting. */
split_limit = N_HBLK_FLS;
} else {