From d8ecfa39797fc7ab882bb228778bff22f42f4c0d Mon Sep 17 00:00:00 2001 From: Dejice Jacob Date: Mon, 25 Oct 2021 17:21:32 +0100 Subject: [PATCH] Prevent memory coalescing memory if cap-bounds do not span full length During a GC sweep, coalescing contiguous memory that spans two separate system OS allocations (e.g. using mmap()), causes memory leakage in CHERI systems. This is because the capability used to represent such coalesced (expanded) memory regions has memory bounds derived from the original allocation syscall (smaller than the size of the coalesced memory). To prevent a client being given a capability with invalid bounds, such coalescing is prevented. 1. Add hblk pointer within header during re-allocation of GC'd memory. 2. Prevent coalescing in either directions when capability bounds do not span the size of the coalesced memory. 3. Derive only valid pointers when coalescing memory --- allchblk.c | 17 ++++++++++++++++- headers.c | 4 ++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/allchblk.c b/allchblk.c index e94e829d8..86ac0b8b3 100644 --- a/allchblk.c +++ b/allchblk.c @@ -17,6 +17,9 @@ #include "private/gc_priv.h" #include +#if defined(__CHERI_PURE_CAPABILITY__) +# include +#endif #ifdef GC_USE_ENTIRE_HEAP int GC_use_entire_heap = TRUE; @@ -604,6 +607,7 @@ STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr, WARN("Header allocation failed: dropping block\n", 0); return(0); } + rest_hdr -> hb_block = rest; rest_hdr -> hb_sz = total_size - bytes; rest_hdr -> hb_flags = 0; # ifdef GC_ASSERTIONS @@ -964,6 +968,13 @@ GC_INNER void GC_freehblk(struct hblk *hbp) /* Coalesce with successor, if possible */ if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr) && (signed_word)(hhdr -> hb_sz + nexthdr -> hb_sz) > 0 +# if defined(__CHERI_PURE_CAPABILITY__) + /* Bounds of capability should span entire coalesced memory */ + /* Bounds being larger than blk-size is OK; bounded by the imprecision */ + /* of original capability obtained from system memory */ + && (cheri_base_get(hbp) <= cheri_address_get(next)) + && (cheri_base_get(hbp) + cheri_length_get(hbp)) >= (cheri_address_get(next) + nexthdr->hb_sz) +# endif /* no overflow */) { GC_remove_from_fl(nexthdr); hhdr -> hb_sz += nexthdr -> hb_sz; @@ -973,7 +984,11 @@ GC_INNER void GC_freehblk(struct hblk *hbp) if (prev /* != NULL */) { /* CPPCHECK */ prevhdr = HDR(prev); if (IS_MAPPED(prevhdr) - && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0) { + && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0 +# if defined(__CHERI_PURE_CAPABILITY__) + && (cheri_base_get(hbp) <= cheri_address_get(prev)) +# endif + /* no overflow */ ) { GC_remove_from_fl(prevhdr); prevhdr -> hb_sz += hhdr -> hb_sz; # ifdef USE_MUNMAP diff --git a/headers.c b/headers.c index c71449f17..23b12e988 100644 --- a/headers.c +++ b/headers.c @@ -390,9 +390,13 @@ GC_INNER struct hblk * GC_prev_block(struct hblk *h) } else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { j -= (signed_word)hhdr; } else { + #if !defined(__CHERI_PURE_CAPABILITY__) return((struct hblk *) (((bi -> key << LOG_BOTTOM_SZ) + j) << LOG_HBLKSIZE)); + #else + return((struct hblk *)hhdr->hb_block); + #endif } } j = BOTTOM_SZ - 1;