From b623659cf22e792ca95d9d0ed002374accfc14db Mon Sep 17 00:00:00 2001 From: Paul Guyot Date: Wed, 23 Aug 2023 08:13:15 +0200 Subject: [PATCH] Fix usage of live in opcodes and no longer preserve registers by default Many opcodes have a live parameter which is the number of registers that should be preserved by GC operations. Take this into account and conversely stop preserving registers by default. This affects significantly the safety of `memory_ensure_free` when writing nifs. Indeed, arguments (which come from registers) are no longer preserved. However, no platform-specific nif was found that needed to be fixed. If required, calls to `memory_ensure_free` or `memory_ensure_free_opt` should be replaced with calls to `memory_ensure_free_with_roots`. Signed-off-by: Paul Guyot --- CHANGELOG.md | 1 + UPDATING.md | 7 + src/libAtomVM/bif.c | 149 ++++---- src/libAtomVM/memory.c | 7 - src/libAtomVM/nifs.c | 55 +-- src/libAtomVM/opcodesswitch.h | 319 ++++++++---------- src/libAtomVM/stacktrace.c | 11 +- src/libAtomVM/stacktrace.h | 9 +- .../esp32/components/avm_sys/platform_nifs.c | 1 + 9 files changed, 273 insertions(+), 286 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 750101e7c..6b7801d11 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Fixed support for big endian CPUs (such as some MIPS CPUs). - Fixed STM32 not aborting when `AVM_ABORT()` is used - Fixed a bug that would leave the STM32 trapped in a loop on hard faults, rather than aborting +- Fixed interpretation of live for opcodes, thus altering GC semantics for nifs. See also [UPDATING](UPDATING.md). ### Added diff --git a/UPDATING.md b/UPDATING.md index 4b335fa7f..ade664117 100644 --- a/UPDATING.md +++ b/UPDATING.md @@ -6,6 +6,13 @@ # AtomVM Update Instructions +## v0.6.0-alpha.1 -> v0.6.0-alpha.2 + +- Registers are no longer preserved by GC by default when invoking nifs, as part of the fix +of interpretation of the emulator of the live parameter of many opcodes. NIFs may need +to call `memory_ensure_free_with_roots` and pass their arguments are roots, instead of +`memory_ensure_free` or `memory_ensure_free_opt`. + ## v0.6.0-alpha.0 -> v0.6.0-alpha.1 - **Libraries (or boot .avm file) from latest version must be used**. Standard library from diff --git a/src/libAtomVM/bif.c b/src/libAtomVM/bif.c index f116821ee..88d8f26da 100644 --- a/src/libAtomVM/bif.c +++ b/src/libAtomVM/bif.c @@ -249,6 +249,7 @@ term bif_erlang_map_size_1(Context *ctx, int live, term arg1) UNUSED(live); if (!UNLIKELY(term_is_map(arg1))) { + // We don't need to preserve registers as we're raising if (UNLIKELY(memory_ensure_free_with_roots(ctx, 3, 1, &arg1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -265,6 +266,7 @@ term bif_erlang_map_size_1(Context *ctx, int live, term arg1) term bif_erlang_map_get_2(Context *ctx, term arg1, term arg2) { if (!UNLIKELY(term_is_map(arg2))) { + // We don't need to preserve registers as we're raising if (UNLIKELY(memory_ensure_free_with_roots(ctx, 3, 1, &arg2, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -277,6 +279,7 @@ term bif_erlang_map_get_2(Context *ctx, term arg1, term arg2) int pos = term_find_map_pos(arg2, arg1, ctx->global); if (pos == TERM_MAP_NOT_FOUND) { + // We don't need to preserve registers as we're raising if (UNLIKELY(memory_ensure_free_with_roots(ctx, 3, 1, &arg1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -291,9 +294,9 @@ term bif_erlang_map_get_2(Context *ctx, term arg1, term arg2) return term_get_map_value(arg2, pos); } -static inline term make_boxed_int(Context *ctx, avm_int_t value) +static inline term make_boxed_int(Context *ctx, uint32_t live, avm_int_t value) { - if (UNLIKELY(memory_ensure_free_opt(ctx, BOXED_INT_SIZE, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, BOXED_INT_SIZE, live, ctx->x, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -301,9 +304,9 @@ static inline term make_boxed_int(Context *ctx, avm_int_t value) } #if BOXED_TERMS_REQUIRED_FOR_INT64 > 1 -static inline term make_boxed_int64(Context *ctx, avm_int64_t value) +static inline term make_boxed_int64(Context *ctx, uint32_t live, avm_int64_t value) { - if (UNLIKELY(memory_ensure_free_opt(ctx, BOXED_INT64_SIZE, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, BOXED_INT64_SIZE, live, ctx->x, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -311,10 +314,10 @@ static inline term make_boxed_int64(Context *ctx, avm_int64_t value) } #endif -static inline term make_maybe_boxed_int(Context *ctx, avm_int_t value) +static inline term make_maybe_boxed_int(Context *ctx, uint32_t live, avm_int_t value) { if ((value < MIN_NOT_BOXED_INT) || (value > MAX_NOT_BOXED_INT)) { - return make_boxed_int(ctx, value); + return make_boxed_int(ctx, live, value); } else { return term_from_int(value); @@ -322,13 +325,13 @@ static inline term make_maybe_boxed_int(Context *ctx, avm_int_t value) } #if BOXED_TERMS_REQUIRED_FOR_INT64 > 1 -static inline term make_maybe_boxed_int64(Context *ctx, avm_int64_t value) +static inline term make_maybe_boxed_int64(Context *ctx, uint32_t live, avm_int64_t value) { if ((value < AVM_INT_MIN) || (value > AVM_INT_MAX)) { - return make_boxed_int64(ctx, value); + return make_boxed_int64(ctx, live, value); } else if ((value < MIN_NOT_BOXED_INT) || (value > MAX_NOT_BOXED_INT)) { - return make_boxed_int(ctx, value); + return make_boxed_int(ctx, live, value); } else { return term_from_int(value); @@ -336,15 +339,15 @@ static inline term make_maybe_boxed_int64(Context *ctx, avm_int64_t value) } #endif -static term add_overflow_helper(Context *ctx, term arg1, term arg2) +static term add_overflow_helper(Context *ctx, uint32_t live, term arg1, term arg2) { avm_int_t val1 = term_to_int(arg1); avm_int_t val2 = term_to_int(arg2); - return make_boxed_int(ctx, val1 + val2); + return make_boxed_int(ctx, live, val1 + val2); } -static term add_boxed_helper(Context *ctx, term arg1, term arg2) +static term add_boxed_helper(Context *ctx, uint32_t live, term arg1, term arg2) { int use_float = 0; int size = 0; @@ -374,7 +377,7 @@ static term add_boxed_helper(Context *ctx, term arg1, term arg2) RAISE_ERROR(BADARITH_ATOM); } - if (UNLIKELY(memory_ensure_free_opt(ctx, FLOAT_SIZE, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, FLOAT_SIZE, live, ctx->x, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } return term_from_float(fresult, &ctx->heap); @@ -394,7 +397,7 @@ static term add_boxed_helper(Context *ctx, term arg1, term arg2) if (BUILTIN_ADD_OVERFLOW_INT(val1, val2, &res)) { #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 avm_int64_t res64 = (avm_int64_t) val1 + (avm_int64_t) val2; - return make_boxed_int64(ctx, res64); + return make_boxed_int64(ctx, live, res64); #elif BOXED_TERMS_REQUIRED_FOR_INT64 == 1 TRACE("overflow: arg1: " AVM_INT64_FMT ", arg2: " AVM_INT64_FMT "\n", arg1, arg2); @@ -404,7 +407,7 @@ static term add_boxed_helper(Context *ctx, term arg1, term arg2) #endif } - return make_maybe_boxed_int(ctx, res); + return make_maybe_boxed_int(ctx, live, res); } #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 @@ -419,7 +422,7 @@ static term add_boxed_helper(Context *ctx, term arg1, term arg2) RAISE_ERROR(OVERFLOW_ATOM); } - return make_maybe_boxed_int64(ctx, res); + return make_maybe_boxed_int64(ctx, live, res); } #endif @@ -438,22 +441,22 @@ term bif_erlang_add_2(Context *ctx, int live, term arg1, term arg2) if (!BUILTIN_ADD_OVERFLOW((avm_int_t) (arg1 & ~TERM_INTEGER_TAG), (avm_int_t) (arg2 & ~TERM_INTEGER_TAG), &res)) { return res | TERM_INTEGER_TAG; } else { - return add_overflow_helper(ctx, arg1, arg2); + return add_overflow_helper(ctx, live, arg1, arg2); } } else { - return add_boxed_helper(ctx, arg1, arg2); + return add_boxed_helper(ctx, live, arg1, arg2); } } -static term sub_overflow_helper(Context *ctx, term arg1, term arg2) +static term sub_overflow_helper(Context *ctx, uint32_t live, term arg1, term arg2) { avm_int_t val1 = term_to_int(arg1); avm_int_t val2 = term_to_int(arg2); - return make_boxed_int(ctx, val1 - val2); + return make_boxed_int(ctx, live, val1 - val2); } -static term sub_boxed_helper(Context *ctx, term arg1, term arg2) +static term sub_boxed_helper(Context *ctx, uint32_t live, term arg1, term arg2) { int use_float = 0; int size = 0; @@ -482,7 +485,7 @@ static term sub_boxed_helper(Context *ctx, term arg1, term arg2) if (UNLIKELY(!isfinite(fresult))) { RAISE_ERROR(BADARITH_ATOM); } - if (UNLIKELY(memory_ensure_free_opt(ctx, FLOAT_SIZE, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, FLOAT_SIZE, live, ctx->x, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } return term_from_float(fresult, &ctx->heap); @@ -502,7 +505,7 @@ static term sub_boxed_helper(Context *ctx, term arg1, term arg2) if (BUILTIN_SUB_OVERFLOW_INT(val1, val2, &res)) { #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 avm_int64_t res64 = (avm_int64_t) val1 - (avm_int64_t) val2; - return make_boxed_int64(ctx, res64); + return make_boxed_int64(ctx, live, res64); #elif BOXED_TERMS_REQUIRED_FOR_INT64 == 1 TRACE("overflow: arg1: " AVM_INT64_FMT ", arg2: " AVM_INT64_FMT "\n", arg1, arg2); @@ -512,7 +515,7 @@ static term sub_boxed_helper(Context *ctx, term arg1, term arg2) #endif } - return make_maybe_boxed_int(ctx, res); + return make_maybe_boxed_int(ctx, live, res); } #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 @@ -527,7 +530,7 @@ static term sub_boxed_helper(Context *ctx, term arg1, term arg2) RAISE_ERROR(OVERFLOW_ATOM); } - return make_maybe_boxed_int64(ctx, res); + return make_maybe_boxed_int64(ctx, live, res); } #endif @@ -546,14 +549,14 @@ term bif_erlang_sub_2(Context *ctx, int live, term arg1, term arg2) if (!BUILTIN_SUB_OVERFLOW((avm_int_t) (arg1 & ~TERM_INTEGER_TAG), (avm_int_t) (arg2 & ~TERM_INTEGER_TAG), &res)) { return res | TERM_INTEGER_TAG; } else { - return sub_overflow_helper(ctx, arg1, arg2); + return sub_overflow_helper(ctx, live, arg1, arg2); } } else { - return sub_boxed_helper(ctx, arg1, arg2); + return sub_boxed_helper(ctx, live, arg1, arg2); } } -static term mul_overflow_helper(Context *ctx, term arg1, term arg2) +static term mul_overflow_helper(Context *ctx, uint32_t live, term arg1, term arg2) { avm_int_t val1 = term_to_int(arg1); avm_int_t val2 = term_to_int(arg2); @@ -564,11 +567,11 @@ static term mul_overflow_helper(Context *ctx, term arg1, term arg2) #endif if (!BUILTIN_MUL_OVERFLOW_INT(val1, val2, &res)) { - return make_boxed_int(ctx, res); + return make_boxed_int(ctx, live, res); #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 } else if (!BUILTIN_MUL_OVERFLOW_INT64((avm_int64_t) val1, (avm_int64_t) val2, &res64)) { - return make_boxed_int64(ctx, res64); + return make_boxed_int64(ctx, live, res64); #endif } else { @@ -576,7 +579,7 @@ static term mul_overflow_helper(Context *ctx, term arg1, term arg2) } } -static term mul_boxed_helper(Context *ctx, term arg1, term arg2) +static term mul_boxed_helper(Context *ctx, uint32_t live, term arg1, term arg2) { int use_float = 0; int size = 0; @@ -605,7 +608,7 @@ static term mul_boxed_helper(Context *ctx, term arg1, term arg2) if (UNLIKELY(!isfinite(fresult))) { RAISE_ERROR(BADARITH_ATOM); } - if (UNLIKELY(memory_ensure_free_opt(ctx, FLOAT_SIZE, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, FLOAT_SIZE, live, ctx->x, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } return term_from_float(fresult, &ctx->heap); @@ -625,7 +628,7 @@ static term mul_boxed_helper(Context *ctx, term arg1, term arg2) if (BUILTIN_MUL_OVERFLOW_INT(val1, val2, &res)) { #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 avm_int64_t res64 = (avm_int64_t) val1 * (avm_int64_t) val2; - return make_boxed_int64(ctx, res64); + return make_boxed_int64(ctx, live, res64); #elif BOXED_TERMS_REQUIRED_FOR_INT64 == 1 TRACE("overflow: arg1: " AVM_INT64_FMT ", arg2: " AVM_INT64_FMT "\n", arg1, arg2); @@ -635,7 +638,7 @@ static term mul_boxed_helper(Context *ctx, term arg1, term arg2) #endif } - return make_maybe_boxed_int(ctx, res); + return make_maybe_boxed_int(ctx, live, res); } #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 @@ -650,7 +653,7 @@ static term mul_boxed_helper(Context *ctx, term arg1, term arg2) RAISE_ERROR(OVERFLOW_ATOM); } - return make_maybe_boxed_int64(ctx, res); + return make_maybe_boxed_int64(ctx, live, res); } #endif @@ -670,14 +673,14 @@ term bif_erlang_mul_2(Context *ctx, int live, term arg1, term arg2) if (!BUILTIN_MUL_OVERFLOW(a, b, &res)) { return res | TERM_INTEGER_TAG; } else { - return mul_overflow_helper(ctx, arg1, arg2); + return mul_overflow_helper(ctx, live, arg1, arg2); } } else { - return mul_boxed_helper(ctx, arg1, arg2); + return mul_boxed_helper(ctx, live, arg1, arg2); } } -static term div_boxed_helper(Context *ctx, term arg1, term arg2) +static term div_boxed_helper(Context *ctx, uint32_t live, term arg1, term arg2) { int size = 0; if (term_is_boxed_integer(arg1)) { @@ -707,7 +710,7 @@ static term div_boxed_helper(Context *ctx, term arg1, term arg2) } else if (UNLIKELY((val2 == -1) && (val1 == AVM_INT_MIN))) { #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 - return make_boxed_int64(ctx, -((avm_int64_t) AVM_INT_MIN)); + return make_boxed_int64(ctx, live, -((avm_int64_t) AVM_INT_MIN)); #elif BOXED_TERMS_REQUIRED_FOR_INT64 == 1 TRACE("overflow: arg1: 0x%lx, arg2: 0x%lx\n", arg1, arg2); @@ -715,7 +718,7 @@ static term div_boxed_helper(Context *ctx, term arg1, term arg2) #endif } else { - return make_maybe_boxed_int(ctx, val1 / val2); + return make_maybe_boxed_int(ctx, live, val1 / val2); } } @@ -732,7 +735,7 @@ static term div_boxed_helper(Context *ctx, term arg1, term arg2) RAISE_ERROR(OVERFLOW_ATOM); } else { - return make_maybe_boxed_int64(ctx, val1 / val2); + return make_maybe_boxed_int64(ctx, live, val1 / val2); } } #endif @@ -751,7 +754,7 @@ term bif_erlang_div_2(Context *ctx, int live, term arg1, term arg2) if (operand_b != 0) { avm_int_t res = term_to_int(arg1) / operand_b; if (UNLIKELY(res == -MIN_NOT_BOXED_INT)) { - return make_boxed_int(ctx, -MIN_NOT_BOXED_INT); + return make_boxed_int(ctx, live, -MIN_NOT_BOXED_INT); } else { return term_from_int(res); @@ -761,11 +764,11 @@ term bif_erlang_div_2(Context *ctx, int live, term arg1, term arg2) } } else { - return div_boxed_helper(ctx, arg1, arg2); + return div_boxed_helper(ctx, live, arg1, arg2); } } -static term neg_boxed_helper(Context *ctx, term arg1) +static term neg_boxed_helper(Context *ctx, uint32_t live, term arg1) { if (term_is_float(arg1)) { avm_float_t farg1 = term_conv_to_float(arg1); @@ -773,7 +776,7 @@ static term neg_boxed_helper(Context *ctx, term arg1) if (UNLIKELY(!isfinite(fresult))) { RAISE_ERROR(BADARITH_ATOM); } - if (UNLIKELY(memory_ensure_free_opt(ctx, FLOAT_SIZE, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, FLOAT_SIZE, live, ctx->x, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } return term_from_float(fresult, &ctx->heap); @@ -793,7 +796,7 @@ static term neg_boxed_helper(Context *ctx, term arg1) case AVM_INT_MIN: #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 - return make_boxed_int64(ctx, -((avm_int64_t) val)); + return make_boxed_int64(ctx, live, -((avm_int64_t) val)); #elif BOXED_TERMS_REQUIRED_FOR_INT64 == 1 TRACE("overflow: val: " AVM_INT_FMT "\n", val); @@ -804,7 +807,7 @@ static term neg_boxed_helper(Context *ctx, term arg1) #endif default: - return make_boxed_int(ctx, -val); + return make_boxed_int(ctx, live, -val); } } @@ -817,7 +820,7 @@ static term neg_boxed_helper(Context *ctx, term arg1) RAISE_ERROR(OVERFLOW_ATOM); } else { - return make_boxed_int64(ctx, -val); + return make_boxed_int64(ctx, live, -val); } } #endif @@ -837,16 +840,16 @@ term bif_erlang_neg_1(Context *ctx, int live, term arg1) if (LIKELY(term_is_integer(arg1))) { avm_int_t int_val = term_to_int(arg1); if (UNLIKELY(int_val == MIN_NOT_BOXED_INT)) { - return make_boxed_int(ctx, -MIN_NOT_BOXED_INT); + return make_boxed_int(ctx, live, -MIN_NOT_BOXED_INT); } else { return term_from_int(-int_val); } } else { - return neg_boxed_helper(ctx, arg1); + return neg_boxed_helper(ctx, live, arg1); } } -static term abs_boxed_helper(Context *ctx, term arg1) +static term abs_boxed_helper(Context *ctx, uint32_t live, term arg1) { if (term_is_float(arg1)) { avm_float_t farg1 = term_conv_to_float(arg1); @@ -860,7 +863,7 @@ static term abs_boxed_helper(Context *ctx, term arg1) if (UNLIKELY(!isfinite(fresult))) { RAISE_ERROR(BADARITH_ATOM); } - if (UNLIKELY(memory_ensure_free_opt(ctx, FLOAT_SIZE, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, FLOAT_SIZE, live, ctx->x, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } return term_from_float(fresult, &ctx->heap); @@ -880,7 +883,7 @@ static term abs_boxed_helper(Context *ctx, term arg1) if (val == AVM_INT_MIN) { #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 - return make_boxed_int64(ctx, -((avm_int64_t) val)); + return make_boxed_int64(ctx, live, -((avm_int64_t) val)); #elif BOXED_TERMS_REQUIRED_FOR_INT64 == 1 TRACE("overflow: val: " AVM_INT_FMT "\n", val); @@ -891,7 +894,7 @@ static term abs_boxed_helper(Context *ctx, term arg1) #endif } else { - return make_boxed_int(ctx, -val); + return make_boxed_int(ctx, live, -val); } } @@ -907,7 +910,7 @@ static term abs_boxed_helper(Context *ctx, term arg1) RAISE_ERROR(OVERFLOW_ATOM); } else { - return make_boxed_int64(ctx, -val); + return make_boxed_int64(ctx, live, -val); } } #endif @@ -929,7 +932,7 @@ term bif_erlang_abs_1(Context *ctx, int live, term arg1) if (int_val < 0) { if (UNLIKELY(int_val == MIN_NOT_BOXED_INT)) { - return make_boxed_int(ctx, -MIN_NOT_BOXED_INT); + return make_boxed_int(ctx, live, -MIN_NOT_BOXED_INT); } else { return term_from_int(-int_val); } @@ -938,11 +941,11 @@ term bif_erlang_abs_1(Context *ctx, int live, term arg1) } } else { - return abs_boxed_helper(ctx, arg1); + return abs_boxed_helper(ctx, live, arg1); } } -static term rem_boxed_helper(Context *ctx, term arg1, term arg2) +static term rem_boxed_helper(Context *ctx, uint32_t live, term arg1, term arg2) { int size = 0; if (term_is_boxed_integer(arg1)) { @@ -971,7 +974,7 @@ static term rem_boxed_helper(Context *ctx, term arg1, term arg2) RAISE_ERROR(BADARITH_ATOM); } - return make_maybe_boxed_int(ctx, val1 % val2); + return make_maybe_boxed_int(ctx, live, val1 % val2); } #if BOXED_TERMS_REQUIRED_FOR_INT64 == 2 @@ -983,7 +986,7 @@ static term rem_boxed_helper(Context *ctx, term arg1, term arg2) RAISE_ERROR(BADARITH_ATOM); } - return make_maybe_boxed_int64(ctx, val1 % val2); + return make_maybe_boxed_int64(ctx, live, val1 % val2); } #endif @@ -1006,7 +1009,7 @@ term bif_erlang_rem_2(Context *ctx, int live, term arg1, term arg2) } } else { - return rem_boxed_helper(ctx, arg1, arg2); + return rem_boxed_helper(ctx, live, arg1, arg2); } } @@ -1028,9 +1031,9 @@ term bif_erlang_ceil_1(Context *ctx, int live, term arg1) #endif #if BOXED_TERMS_REQUIRED_FOR_INT64 > 1 - return make_maybe_boxed_int64(ctx, result); + return make_maybe_boxed_int64(ctx, live, result); #else - return make_maybe_boxed_int(ctx, result); + return make_maybe_boxed_int(ctx, live, result); #endif } @@ -1060,9 +1063,9 @@ term bif_erlang_floor_1(Context *ctx, int live, term arg1) #endif #if BOXED_TERMS_REQUIRED_FOR_INT64 > 1 - return make_maybe_boxed_int64(ctx, result); + return make_maybe_boxed_int64(ctx, live, result); #else - return make_maybe_boxed_int(ctx, result); + return make_maybe_boxed_int(ctx, live, result); #endif } @@ -1092,9 +1095,9 @@ term bif_erlang_round_1(Context *ctx, int live, term arg1) #endif #if BOXED_TERMS_REQUIRED_FOR_INT64 > 1 - return make_maybe_boxed_int64(ctx, result); + return make_maybe_boxed_int64(ctx, live, result); #else - return make_maybe_boxed_int(ctx, result); + return make_maybe_boxed_int(ctx, live, result); #endif } @@ -1124,9 +1127,9 @@ term bif_erlang_trunc_1(Context *ctx, int live, term arg1) #endif #if BOXED_TERMS_REQUIRED_FOR_INT64 > 1 - return make_maybe_boxed_int64(ctx, result); + return make_maybe_boxed_int64(ctx, live, result); #else - return make_maybe_boxed_int(ctx, result); + return make_maybe_boxed_int(ctx, live, result); #endif } @@ -1153,9 +1156,9 @@ static inline term bitwise_helper(Context *ctx, int live, term arg1, term arg2, int64_t result = op(a, b); #if BOXED_TERMS_REQUIRED_FOR_INT64 > 1 - return make_maybe_boxed_int64(ctx, result); + return make_maybe_boxed_int64(ctx, live, result); #else - return make_maybe_boxed_int(ctx, result); + return make_maybe_boxed_int(ctx, live, result); #endif } @@ -1216,9 +1219,9 @@ static inline term bitshift_helper(Context *ctx, int live, term arg1, term arg2, int64_t result = op(a, b); #if BOXED_TERMS_REQUIRED_FOR_INT64 > 1 - return make_maybe_boxed_int64(ctx, result); + return make_maybe_boxed_int64(ctx, live, result); #else - return make_maybe_boxed_int(ctx, result); + return make_maybe_boxed_int(ctx, live, result); #endif } diff --git a/src/libAtomVM/memory.c b/src/libAtomVM/memory.c index e21557ab5..2aab5d63f 100644 --- a/src/libAtomVM/memory.c +++ b/src/libAtomVM/memory.c @@ -248,13 +248,6 @@ static enum MemoryGCResult memory_gc(Context *ctx, size_t new_size, size_t num_r term *new_heap = ctx->heap.heap_start; TRACE("- Allocated %i words for new heap at address 0x%p\n", (int) new_size, (void *) new_heap); - TRACE("- Running copy GC on registers\n"); - for (int i = 0; i < MAX_REG; i++) { - term new_root = memory_shallow_copy_term(old_root_fragment, ctx->x[i], &ctx->heap.heap_ptr, true); - ctx->x[i] = new_root; - } - TRACE("- after registers, heap.heap_ptr now is at %p, heap.heap_start = %p\n", (void *) ctx->heap.heap_ptr, (void *) ctx->heap.heap_start); - TRACE("- Running copy GC on stack (stack size: %i)\n", (int) (old_stack_ptr - ctx->e)); term *stack_ptr = new_heap + new_size; while (old_stack_ptr > ctx->e) { diff --git a/src/libAtomVM/nifs.c b/src/libAtomVM/nifs.c index b9b76689c..000828317 100644 --- a/src/libAtomVM/nifs.c +++ b/src/libAtomVM/nifs.c @@ -41,6 +41,7 @@ #include "globalcontext.h" #include "interop.h" #include "mailbox.h" +#include "memory.h" #include "module.h" #include "platform_nifs.h" #include "port.h" @@ -1370,7 +1371,7 @@ static term nif_erlang_concat_2(Context *ctx, int argc, term argv[]) if (UNLIKELY(!proper)) { RAISE_ERROR(BADARG_ATOM); } - if (UNLIKELY(memory_ensure_free_opt(ctx, len * 2, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, len * 2, argc, argv, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -1639,7 +1640,7 @@ static term nif_erlang_insert_element_3(Context *ctx, int argc, term argv[]) } int new_tuple_size = old_tuple_size + 1; - if (UNLIKELY(memory_ensure_free_opt(ctx, new_tuple_size + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, new_tuple_size + 1, 2, argv + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term new_tuple = term_alloc_tuple(new_tuple_size, &ctx->heap); @@ -1677,7 +1678,7 @@ static term nif_erlang_delete_element_2(Context *ctx, int argc, term argv[]) } int new_tuple_size = old_tuple_size - 1; - if (UNLIKELY(memory_ensure_free_opt(ctx, new_tuple_size + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, new_tuple_size + 1, 1, argv + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term new_tuple = term_alloc_tuple(new_tuple_size, &ctx->heap); @@ -1711,7 +1712,7 @@ static term nif_erlang_setelement_3(Context *ctx, int argc, term argv[]) RAISE_ERROR(BADARG_ATOM); } - if (UNLIKELY(memory_ensure_free_opt(ctx, tuple_size + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, tuple_size + 1, 2, argv + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term new_tuple = term_alloc_tuple(tuple_size, &ctx->heap); @@ -1735,7 +1736,7 @@ static term nif_erlang_tuple_to_list_1(Context *ctx, int argc, term argv[]) int tuple_size = term_get_tuple_arity(argv[0]); - if (UNLIKELY(memory_ensure_free_opt(ctx, tuple_size * 2, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, tuple_size * 2, 1, argv, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -1761,7 +1762,7 @@ static term nif_erlang_list_to_tuple_1(Context *ctx, int argc, term argv[]) RAISE_ERROR(BADARG_ATOM); } - if (UNLIKELY(memory_ensure_free_opt(ctx, TUPLE_SIZE(len), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, TUPLE_SIZE(len), 1, argv, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term tuple = term_alloc_tuple(len, &ctx->heap); @@ -1911,11 +1912,11 @@ static term nif_erlang_binary_to_list_1(Context *ctx, int argc, term argv[]) VALIDATE_VALUE(value, term_is_binary); int bin_size = term_binary_size(value); - if (UNLIKELY(memory_ensure_free_opt(ctx, bin_size * 2, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, bin_size * 2, 1, &value, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - const uint8_t *bin_data = (const uint8_t *) term_binary_data(argv[0]); + const uint8_t *bin_data = (const uint8_t *) term_binary_data(value); term prev = term_nil(); for (int i = bin_size - 1; i >= 0; i--) { @@ -2357,7 +2358,7 @@ static term nif_erlang_list_to_binary_1(Context *ctx, int argc, term argv[]) buf_allocated = false; } - if (UNLIKELY(memory_ensure_free_opt(ctx, term_binary_heap_size(bin_size), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, term_binary_heap_size(bin_size), 1, argv, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { if (buf_allocated) { free(bin_buf); } @@ -2885,10 +2886,10 @@ static term nif_binary_part_3(Context *ctx, int argc, term argv[]) } size_t size = term_sub_binary_heap_size(bin_term, len); - if (UNLIKELY(memory_ensure_free_opt(ctx, size, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, size, 1, &bin_term, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - return term_maybe_create_sub_binary(argv[0], pos, len, &ctx->heap, ctx->global); + return term_maybe_create_sub_binary(bin_term, pos, len, &ctx->heap, ctx->global); } static term nif_binary_split_2(Context *ctx, int argc, term argv[]) @@ -2923,7 +2924,7 @@ static term nif_binary_split_2(Context *ctx, int argc, term argv[]) size_t rest_size_in_terms = term_sub_binary_heap_size(bin_term, rest_size); // + 4 which is the result cons - if (UNLIKELY(memory_ensure_free_opt(ctx, tok_size_in_terms + rest_size_in_terms + 4, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, tok_size_in_terms + rest_size_in_terms + 4, 1, argv, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -2937,7 +2938,7 @@ static term nif_binary_split_2(Context *ctx, int argc, term argv[]) return result_list; } else { - if (UNLIKELY(memory_ensure_free_opt(ctx, 2, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, 2, 1, argv, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -3268,9 +3269,10 @@ static term nif_erlang_monitor(Context *ctx, int argc, term argv[]) { UNUSED(argc); + term object_type = argv[0]; term target_pid = argv[1]; - if (argv[0] != PROCESS_ATOM && argv[0] != PORT_ATOM) { + if (object_type != PROCESS_ATOM && object_type != PORT_ATOM) { RAISE_ERROR(BADARG_ATOM); } @@ -3288,14 +3290,14 @@ static term nif_erlang_monitor(Context *ctx, int argc, term argv[]) term down_message_tuple = term_alloc_tuple(5, &ctx->heap); term_put_tuple_element(down_message_tuple, 0, DOWN_ATOM); term_put_tuple_element(down_message_tuple, 1, ref); - term_put_tuple_element(down_message_tuple, 2, argv[0]); - term_put_tuple_element(down_message_tuple, 3, argv[1]); + term_put_tuple_element(down_message_tuple, 2, object_type); + term_put_tuple_element(down_message_tuple, 3, target_pid); term_put_tuple_element(down_message_tuple, 4, NOPROC_ATOM); mailbox_send(ctx, down_message_tuple); return ref; } - if ((argv[0] == PROCESS_ATOM && target->native_handler != NULL) || (argv[0] == PORT_ATOM && target->native_handler == NULL)) { + if ((object_type == PROCESS_ATOM && target->native_handler != NULL) || (object_type == PORT_ATOM && target->native_handler == NULL)) { RAISE_ERROR(BADARG_ATOM); } term callee_pid = term_from_local_process_id(ctx->process_id); @@ -3922,12 +3924,12 @@ static term base64_encode(Context *ctx, int argc, term argv[], bool return_binar size_t heap_free = return_binary ? term_binary_heap_size(dst_size_with_pad) : 2*dst_size_with_pad; - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_free, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_free, 1, &src, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } // src may have been invalidated by GC - if (term_is_binary(argv[0])) { - src_pos = (uint8_t *) term_binary_data(argv[0]); + if (term_is_binary(src)) { + src_pos = (uint8_t *) term_binary_data(src); } term dst; uint8_t *dst_pos; @@ -4070,7 +4072,7 @@ static term base64_decode(Context *ctx, int argc, term argv[], bool return_binar size_t heap_free = return_binary ? term_binary_heap_size(dst_size) : 2*dst_size; - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_free, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_free, 1, &src, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term dst = term_invalid_term(); @@ -4086,8 +4088,8 @@ static term base64_decode(Context *ctx, int argc, term argv[], bool return_binar } dst_pos = dst_buf; } - if (term_is_binary(argv[0])) { - src_pos = (uint8_t *) term_binary_data(argv[0]); + if (term_is_binary(src)) { + src_pos = (uint8_t *) term_binary_data(src); } size_t n = src_size - pad; for (size_t i = 0; i < n; ++i) { @@ -4265,12 +4267,11 @@ static term nif_maps_next(Context *ctx, int argc, term argv[]) return NONE_ATOM; } - if (UNLIKELY(memory_ensure_free_opt(ctx, 6, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, 6, 1, &iterator, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } // recompute all the terms we need (after possible GC) - iterator = argv[0]; map = term_get_list_tail(iterator); term key = term_get_map_key(map, pos); term value = term_get_map_value(map, pos); @@ -4316,7 +4317,7 @@ static term nif_unicode_characters_to_list(Context *ctx, int argc, term argv[]) free(chars); RAISE_ERROR(BADARG_ATOM); } - if (UNLIKELY(memory_ensure_free(ctx, needed_terms) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, needed_terms, 1, argv, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { free(chars); RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -4376,7 +4377,7 @@ static term nif_unicode_characters_to_binary(Context *ctx, int argc, term argv[] if (UNLIKELY(conv_result == UnicodeBadArg)) { RAISE_ERROR(BADARG_ATOM); } - if (UNLIKELY(memory_ensure_free(ctx, needed_terms) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, needed_terms, 1, argv, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term result = term_create_uninitialized_binary(len, &ctx->heap, ctx->global); diff --git a/src/libAtomVM/opcodesswitch.h b/src/libAtomVM/opcodesswitch.h index c869fb4da..0b5cefc2c 100644 --- a/src/libAtomVM/opcodesswitch.h +++ b/src/libAtomVM/opcodesswitch.h @@ -229,6 +229,14 @@ typedef dreg_t dreg_gc_safe_t; } \ } +#define DECODE_NIL(decode_pc) \ +{ \ + if ((*(decode_pc)++) != COMPACT_ATOM) { \ + fprintf(stderr, "Unexpected operand, expected nil, got %x\n", (decode_pc)[-1]); \ + AVM_ABORT(); \ + } \ +} + #ifdef ENABLE_TRACE #define DECODE_DEST_REGISTER(dreg, decode_pc) \ @@ -600,6 +608,11 @@ typedef struct decode_pc++; \ } +#define DECODE_NIL(decode_pc) \ +{ \ + decode_pc++; \ +} + #define DECODE_DEST_REGISTER(dreg, decode_pc) \ { \ uint8_t first_byte = *(decode_pc)++; \ @@ -946,7 +959,7 @@ typedef struct PROCESS_MAYBE_TRAP_RETURN_VALUE(return_value); \ x_regs[0] = return_value; \ if (ctx->heap.root->next) { \ - if (UNLIKELY(memory_ensure_free_opt(ctx, 0, MEMORY_FORCE_SHRINK) != MEMORY_GC_OK)) { \ + if (UNLIKELY(memory_ensure_free_with_roots(ctx, 0, 1, x_regs, MEMORY_FORCE_SHRINK) != MEMORY_GC_OK)) { \ RAISE_ERROR(OUT_OF_MEMORY_ATOM); \ } \ } \ @@ -1131,7 +1144,7 @@ COLD_FUNC static void dump(Context *ctx) fprintf(stderr, "\n"); fprintf(stderr, "\nStacktrace:\n"); - term_display(stderr, stacktrace_build(ctx, &ctx->x[2]), ctx); + term_display(stderr, stacktrace_build(ctx, &ctx->x[2], 3), ctx); fprintf(stderr, "\n\n"); { @@ -1174,13 +1187,6 @@ COLD_FUNC static void dump(Context *ctx) ct++; } - fprintf(stderr, "\n\nRegisters\n----------"); - for (int i = 0; i < 16; i++) { - fprintf(stderr, "\nx[%i]: ", i); - term_display(stderr, ctx->x[i], ctx); - } - fprintf(stderr, "\n"); - fprintf(stderr, "\n\nMailbox\n--------\n"); mailbox_crashdump(ctx); @@ -1307,12 +1313,12 @@ static term large_integer_to_term(Context *ctx, int num_bytes, const uint8_t *co } } -term make_fun(Context *ctx, const Module *mod, int fun_index) +term make_fun(Context *ctx, const Module *mod, int fun_index, term argv[]) { uint32_t n_freeze = module_get_fun_freeze(mod, fun_index); int size = BOXED_FUN_SIZE + n_freeze; - if (memory_ensure_free_opt(ctx, size, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { + if (memory_ensure_free_with_roots(ctx, size, n_freeze, argv, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { return term_invalid_term(); } term *boxed_func = memory_heap_alloc(&ctx->heap, size); @@ -1322,7 +1328,7 @@ term make_fun(Context *ctx, const Module *mod, int fun_index) boxed_func[2] = term_from_int(fun_index); for (uint32_t i = 3; i < n_freeze + 3; i++) { - boxed_func[i] = ctx->x[i - 3]; + boxed_func[i] = argv[i - 3]; } return ((term) boxed_func) | TERM_BOXED_VALUE_TAG; @@ -1747,7 +1753,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) PROCESS_MAYBE_TRAP_RETURN_VALUE_RESTORE_PC(return_value, orig_pc); x_regs[0] = return_value; if (ctx->heap.root->next) { - if (UNLIKELY(memory_ensure_free_opt(ctx, 0, MEMORY_FORCE_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, 0, 1, x_regs, MEMORY_FORCE_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } } @@ -1836,7 +1842,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) ctx->e += (n_words + 1); if (ctx->heap.root->next) { - if (UNLIKELY(memory_ensure_free_opt(ctx, 0, MEMORY_FORCE_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, 0, 1, x_regs, MEMORY_FORCE_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } } @@ -2007,10 +2013,8 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #endif #ifdef IMPL_EXECUTE_LOOP - context_clean_registers(ctx, live); - if (ctx->heap.root->next || ((ctx->heap.heap_ptr > ctx->e - (stack_need + 1)))) { - if (UNLIKELY(memory_ensure_free_opt(ctx, stack_need + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, stack_need + 1, live, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } } @@ -2040,10 +2044,8 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #endif #ifdef IMPL_EXECUTE_LOOP - context_clean_registers(ctx, live); - if (ctx->heap.root->next || ((ctx->heap.heap_ptr + heap_need) > ctx->e - (stack_need + 1))) { - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_need + stack_need + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_need + stack_need + 1, live, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } } @@ -2070,10 +2072,8 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #endif #ifdef IMPL_EXECUTE_LOOP - context_clean_registers(ctx, live); - if (ctx->heap.root->next || ((ctx->heap.heap_ptr > ctx->e - (stack_need + 1)))) { - if (UNLIKELY(memory_ensure_free_opt(ctx, stack_need + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, stack_need + 1, live, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } } @@ -2108,10 +2108,8 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #endif #ifdef IMPL_EXECUTE_LOOP - context_clean_registers(ctx, live); - if (ctx->heap.root->next || ((ctx->heap.heap_ptr + heap_need) > ctx->e - (stack_need + 1))) { - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_need + stack_need + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_need + stack_need + 1, live, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } } @@ -2139,15 +2137,13 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) size_t heap_free = context_avail_free_memory(ctx); // if we need more heap space than is currently free, then try to GC the needed space if (heap_free < heap_need) { - context_clean_registers(ctx, live_registers); - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_need, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_need, live_registers, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } // otherwise, there is enough space for the needed heap, but there might // more more than necessary. In that case, try to shrink the heap. } else if (heap_free > heap_need * HEAP_NEED_GC_SHRINK_THRESHOLD_COEFF) { - context_clean_registers(ctx, live_registers); - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_need * (HEAP_NEED_GC_SHRINK_THRESHOLD_COEFF / 2), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_need * (HEAP_NEED_GC_SHRINK_THRESHOLD_COEFF / 2), live_registers, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { TRACE("Unable to ensure free memory. heap_need=%i\n", heap_need); RAISE_ERROR(OUT_OF_MEMORY_ATOM); } @@ -2181,8 +2177,9 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) ctx->cp = ctx->e[n_words]; ctx->e += n_words + 1; DEBUG_DUMP_STACK(ctx); + // Hopefully, we only need x[0] if (ctx->heap.root->next) { - if (UNLIKELY(memory_ensure_free_opt(ctx, 0, MEMORY_FORCE_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, 0, 1, x_regs, MEMORY_FORCE_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } } @@ -2205,7 +2202,6 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) break; } - //TODO: implement send/0 case OP_SEND: { #ifdef IMPL_CODE_LOADER TRACE("send/0\n"); @@ -3043,7 +3039,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #endif #ifdef IMPL_CODE_LOADER - TRACE("get_list/2\n"); + TRACE("get_list/3\n"); UNUSED(src_value) #endif break; @@ -3172,13 +3168,6 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #endif case OP_BADMATCH: { - #ifdef IMPL_EXECUTE_LOOP - // We can gc as we are raising - if (UNLIKELY(memory_ensure_free_opt(ctx, TUPLE_SIZE(2), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { - RAISE_ERROR(OUT_OF_MEMORY_ATOM); - } - #endif - term arg1; DECODE_COMPACT_TERM(arg1, pc) @@ -3190,6 +3179,11 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #ifdef IMPL_EXECUTE_LOOP TRACE("badmatch/1, v=0x%lx\n", arg1); + // We can gc as we are raising + if (UNLIKELY(memory_ensure_free_with_roots(ctx, TUPLE_SIZE(2), 1, &arg1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + RAISE_ERROR(OUT_OF_MEMORY_ATOM); + } + term new_error_tuple = term_alloc_tuple(2, &ctx->heap); term_put_tuple_element(new_error_tuple, 0, BADMATCH_ATOM); term_put_tuple_element(new_error_tuple, 1, arg1); @@ -3212,13 +3206,6 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) } case OP_CASE_END: { - #ifdef IMPL_EXECUTE_LOOP - // We can gc as we are raising - if (UNLIKELY(memory_ensure_free_opt(ctx, TUPLE_SIZE(2), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { - RAISE_ERROR(OUT_OF_MEMORY_ATOM); - } - #endif - term arg1; DECODE_COMPACT_TERM(arg1, pc) @@ -3230,6 +3217,11 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #ifdef IMPL_EXECUTE_LOOP TRACE("case_end/1, v=0x%lx\n", arg1); + // We can gc as we are raising + if (UNLIKELY(memory_ensure_free_with_roots(ctx, TUPLE_SIZE(2), 1, &arg1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + RAISE_ERROR(OUT_OF_MEMORY_ATOM); + } + term new_error_tuple = term_alloc_tuple(2, &ctx->heap); term_put_tuple_element(new_error_tuple, 0, CASE_CLAUSE_ATOM); term_put_tuple_element(new_error_tuple, 1, arg1); @@ -3256,12 +3248,12 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) term fun = x_regs[args_count]; if (UNLIKELY(!term_is_function(fun))) { // We can gc as we are raising - if (UNLIKELY(memory_ensure_free_opt(ctx, TUPLE_SIZE(2), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, TUPLE_SIZE(2), 1, &fun, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term new_error_tuple = term_alloc_tuple(2, &ctx->heap); term_put_tuple_element(new_error_tuple, 0, BADFUN_ATOM); - term_put_tuple_element(new_error_tuple, 1, x_regs[args_count]); + term_put_tuple_element(new_error_tuple, 1, fun); RAISE_ERROR(new_error_tuple); } @@ -3325,7 +3317,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) x_regs[0] = return_value; if (ctx->heap.root->next) { - if (UNLIKELY(memory_ensure_free_opt(ctx, 0, MEMORY_FORCE_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, 0, 1, x_regs, MEMORY_FORCE_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } } @@ -3380,7 +3372,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) TRACE("make_fun/2, fun_index=%i\n", fun_index); #ifdef IMPL_EXECUTE_LOOP - term f = make_fun(ctx, mod, fun_index); + term f = make_fun(ctx, mod, fun_index, x_regs); if (term_is_invalid_term(f)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } else { @@ -3433,13 +3425,6 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) } case OP_TRY_CASE_END: { - #ifdef IMPL_EXECUTE_LOOP - // We can gc as we are raising - if (UNLIKELY(memory_ensure_free_opt(ctx, TUPLE_SIZE(2), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { - RAISE_ERROR(OUT_OF_MEMORY_ATOM); - } - #endif - term arg1; DECODE_COMPACT_TERM(arg1, pc) @@ -3451,6 +3436,11 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #ifdef IMPL_EXECUTE_LOOP TRACE("try_case_end/1, val=%lx\n", arg1); + // We can gc as we are raising + if (UNLIKELY(memory_ensure_free_with_roots(ctx, TUPLE_SIZE(2), 1, &arg1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + RAISE_ERROR(OUT_OF_MEMORY_ATOM); + } + term new_error_tuple = term_alloc_tuple(2, &ctx->heap); term_put_tuple_element(new_error_tuple, 0, TRY_CLAUSE_ATOM); term_put_tuple_element(new_error_tuple, 1, arg1); @@ -3518,9 +3508,9 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) break; case ERROR_ATOM_INDEX: { - x_regs[2] = stacktrace_build(ctx, &x_regs[2]); + x_regs[2] = stacktrace_build(ctx, &x_regs[2], 3); // MEMORY_CAN_SHRINK because catch_end is classified as gc in beam_ssa_codegen.erl - if (UNLIKELY(memory_ensure_free_opt(ctx, TUPLE_SIZE(2) * 2, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, TUPLE_SIZE(2) * 2, 2, x_regs + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term reason_tuple = term_alloc_tuple(2, &ctx->heap); @@ -3535,7 +3525,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) } case LOWERCASE_EXIT_ATOM_INDEX: { // MEMORY_CAN_SHRINK because catch_end is classified as gc in beam_ssa_codegen.erl - if (UNLIKELY(memory_ensure_free_opt(ctx, TUPLE_SIZE(2), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, TUPLE_SIZE(2), 1, x_regs + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term exit_tuple = term_alloc_tuple(2, &ctx->heap); @@ -3546,7 +3536,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) break; } } -#endif + #endif break; } @@ -3587,9 +3577,8 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) DECODE_COMPACT_TERM(size, pc) uint32_t words; DECODE_LITERAL(words, pc) - uint32_t regs; - UNUSED(regs); - DECODE_LITERAL(regs, pc) + uint32_t live; + DECODE_LITERAL(live, pc) term flags; UNUSED(flags); DECODE_COMPACT_TERM(flags, pc) @@ -3602,7 +3591,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) VERIFY_IS_INTEGER(size, "bs_init2"); avm_int_t size_val = term_to_int(size); - if (UNLIKELY(memory_ensure_free_opt(ctx, words + term_binary_heap_size(size_val), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, words + term_binary_heap_size(size_val), live, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term t = term_create_empty_binary(size_val, &ctx->heap, ctx->global); @@ -3615,7 +3604,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) DECODE_DEST_REGISTER(dreg, pc); #ifdef IMPL_EXECUTE_LOOP - TRACE("bs_init2/6, fail=%u size=%li words=%u regs=%u dreg=%c%i\n", (unsigned) fail, size_val, (unsigned) words, (unsigned) regs, T_DEST_REG(dreg)); + TRACE("bs_init2/6, fail=%u size=%li words=%u live=%u dreg=%c%i\n", (unsigned) fail, size_val, (unsigned) words, (unsigned) live, T_DEST_REG(dreg)); WRITE_REGISTER(dreg, t); #endif break; @@ -3628,8 +3617,8 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) DECODE_COMPACT_TERM(size, pc) uint32_t words; DECODE_LITERAL(words, pc) - uint32_t regs; - DECODE_LITERAL(regs, pc) + uint32_t live; + DECODE_LITERAL(live, pc) uint32_t flags_value; DECODE_LITERAL(flags_value, pc) @@ -3649,7 +3638,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) RAISE_ERROR(UNSUPPORTED_ATOM); } - if (UNLIKELY(memory_ensure_free_opt(ctx, words + term_binary_heap_size(size_val / 8), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, words + term_binary_heap_size(size_val / 8), live, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term t = term_create_empty_binary(size_val / 8, &ctx->heap, ctx->global); @@ -3662,7 +3651,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) DECODE_DEST_REGISTER(dreg, pc); #ifdef IMPL_EXECUTE_LOOP - TRACE("bs_init_bits/6, fail=%i size=%li words=%i regs=%i dreg=%c%i\n", fail, size_val, words, regs, T_DEST_REG(dreg)); + TRACE("bs_init_bits/6, fail=%i size=%li words=%i live=%u dreg=%c%i\n", fail, size_val, words, (unsigned) live, T_DEST_REG(dreg)); WRITE_REGISTER(dreg, t); #endif break; @@ -4078,9 +4067,9 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) term extra; UNUSED(extra); DECODE_COMPACT_TERM(extra, pc) - term live; + uint32_t live; UNUSED(live); - DECODE_COMPACT_TERM(live, pc) + DECODE_LITERAL(live, pc); uint32_t unit; DECODE_LITERAL(unit, pc); term src; @@ -4114,7 +4103,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) size_t src_size = term_binary_size(src); // TODO: further investigate extra_val - if (UNLIKELY(memory_ensure_free_opt(ctx, src_size + term_binary_heap_size(size_val / 8) + extra_val, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, src_size + term_binary_heap_size(size_val / 8) + extra_val, live, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } #endif @@ -4321,24 +4310,21 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) case OP_BS_START_MATCH2: { uint32_t fail; DECODE_LABEL(fail, pc) - #ifdef IMPL_EXECUTE_LOOP - const uint8_t *src_pc = pc; - #endif term src; DECODE_COMPACT_TERM(src, pc); - term arg2; - DECODE_COMPACT_TERM(arg2, pc); + uint32_t live; + DECODE_LITERAL(live, pc); term slots_term; DECODE_COMPACT_TERM(slots_term, pc); #ifdef IMPL_EXECUTE_LOOP int slots = term_to_int(slots_term); // MEMORY_CAN_SHRINK because bs_start_match is classified as gc in beam_ssa_codegen.erl - if (memory_ensure_free_opt(ctx, TERM_BOXED_BIN_MATCH_STATE_SIZE + slots, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { + x_regs[live] = src; + if (memory_ensure_free_with_roots(ctx, TERM_BOXED_BIN_MATCH_STATE_SIZE + slots, live + 1, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - - DECODE_COMPACT_TERM(src, src_pc); + src = x_regs[live]; #endif dreg_t dreg; @@ -4349,7 +4335,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #endif #ifdef IMPL_EXECUTE_LOOP - TRACE("bs_start_match2/5, fail=%i src=0x%lx arg2=0x%lx arg3=0x%lx dreg=%c%i\n", fail, src, arg2, slots_term, T_DEST_REG(dreg)); + TRACE("bs_start_match2/5, fail=%i src=0x%lx live=%u arg3=0x%lx dreg=%c%i\n", fail, src, (unsigned) live, slots_term, T_DEST_REG(dreg)); if (!(term_is_binary(src) || term_is_match_state(src))) { WRITE_REGISTER(dreg, src); pc = mod->labels[fail]; @@ -4365,35 +4351,36 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #if MAXIMUM_OTP_COMPILER_VERSION >= 22 case OP_BS_START_MATCH3: { - // MEMORY_CAN_SHRINK because bs_start_match is classified as gc in beam_ssa_codegen.erl - #ifdef IMPL_EXECUTE_LOOP - if (memory_ensure_free_opt(ctx, TERM_BOXED_BIN_MATCH_STATE_SIZE, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { - RAISE_ERROR(OUT_OF_MEMORY_ATOM); - } - #endif - uint32_t fail; DECODE_LABEL(fail, pc) term src; DECODE_COMPACT_TERM(src, pc); - term live; - DECODE_COMPACT_TERM(live, pc); - dreg_t dreg; - DECODE_DEST_REGISTER(dreg, pc); + uint32_t live; + DECODE_LITERAL(live, pc); + dreg_gc_safe_t dreg; + DECODE_DEST_REGISTER_GC_SAFE(dreg, pc); #ifdef IMPL_CODE_LOADER TRACE("bs_start_match3/4\n"); #endif #ifdef IMPL_EXECUTE_LOOP - TRACE("bs_start_match3/4, fail=%i src=0x%lx live=0x%lx dreg=%c%i\n", fail, src, live, T_DEST_REG(dreg)); + // MEMORY_CAN_SHRINK because bs_start_match is classified as gc in beam_ssa_codegen.erl + #ifdef IMPL_EXECUTE_LOOP + x_regs[live] = src; + if (memory_ensure_free_with_roots(ctx, TERM_BOXED_BIN_MATCH_STATE_SIZE, live + 1, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { + RAISE_ERROR(OUT_OF_MEMORY_ATOM); + } + src = x_regs[live]; + #endif + TRACE("bs_start_match3/4, fail=%i src=0x%lx live=%u dreg=%c%i\n", fail, src, live, T_DEST_REG_UNSAFE(dreg)); if (!(term_is_binary(src) || term_is_match_state(src))) { - WRITE_REGISTER(dreg, src); + WRITE_REGISTER_GC_SAFE(dreg, src); pc = mod->labels[fail]; } else { term match_state = term_alloc_bin_match_state(src, 0, &ctx->heap); - WRITE_REGISTER(dreg, match_state); + WRITE_REGISTER_GC_SAFE(dreg, match_state); } #endif break; @@ -4404,8 +4391,10 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) DECODE_COMPACT_TERM(src, pc); dreg_t dreg; DECODE_DEST_REGISTER(dreg, pc); - term live; - DECODE_COMPACT_TERM(live, pc); + // TODO: determine why we're not GC-ing here as we have live + uint32_t live; + UNUSED(live); + DECODE_LITERAL(live, pc); #ifdef IMPL_CODE_LOADER TRACE("bs_get_position/3\n"); @@ -4414,7 +4403,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #ifdef IMPL_EXECUTE_LOOP VERIFY_IS_MATCH_STATE(src, "bs_get_position"); - TRACE("bs_get_position/3 src=0x%lx dreg=%c%i live=0x%lx \n", src, T_DEST_REG(dreg), live); + TRACE("bs_get_position/3 src=0x%lx dreg=%c%i live=%u\n", src, T_DEST_REG(dreg), live); avm_int_t offset = term_get_match_state_offset(src); term offset_term = term_from_int(offset); @@ -4426,14 +4415,11 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) case OP_BS_GET_TAIL: { term src; - #ifdef IMPL_EXECUTE_LOOP - const uint8_t *src_pc = pc; - #endif DECODE_COMPACT_TERM(src, pc); dreg_gc_safe_t dreg; DECODE_DEST_REGISTER_GC_SAFE(dreg, pc); - term live; - DECODE_COMPACT_TERM(live, pc); + uint32_t live; + DECODE_LITERAL(live, pc); #ifdef IMPL_CODE_LOADER TRACE("bs_get_tail/3\n"); @@ -4445,7 +4431,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) avm_int_t bs_offset = term_get_match_state_offset(src); term bs_bin = term_get_match_state_binary(src); - TRACE("bs_get_tail/3 src=0x%lx dreg=%c%i live=0x%lx \n", src, T_DEST_REG_GC_SAFE(dreg), live); + TRACE("bs_get_tail/3 src=0x%lx dreg=%c%i live=%u\n", src, T_DEST_REG_GC_SAFE(dreg), live); if (bs_offset == 0) { WRITE_REGISTER_GC_SAFE(dreg, bs_bin); @@ -4460,11 +4446,11 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) size_t new_bin_size = src_size - start_pos; size_t heap_size = term_sub_binary_heap_size(bs_bin, src_size - start_pos); - - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_size, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + x_regs[live] = src; + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_size, live + 1, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - DECODE_COMPACT_TERM(src, src_pc); + src = x_regs[live]; bs_bin = term_get_match_state_binary(src); term t = term_maybe_create_sub_binary(bs_bin, start_pos, new_bin_size, &ctx->heap, ctx->global); WRITE_REGISTER_GC_SAFE(dreg, t); @@ -4709,8 +4695,8 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) DECODE_LABEL(fail, pc) term src; DECODE_COMPACT_TERM(src, pc); - term arg2; - DECODE_COMPACT_TERM(arg2, pc); + uint32_t live; + DECODE_LITERAL(live, pc); term size; DECODE_COMPACT_TERM(size, pc); uint32_t unit; @@ -4728,7 +4714,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) avm_int_t size_val = term_to_int(size); - TRACE("bs_get_integer2/7, fail=%u src=%p size=%u unit=%u flags=%x\n", (unsigned) fail, (void *) src, (unsigned) size_val, (unsigned) unit, (int) flags_value); + TRACE("bs_get_integer2/7, fail=%u src=%p live=%u size=%u unit=%u flags=%x\n", (unsigned) fail, (void *) src, (unsigned) size_val, (unsigned) live, (unsigned) unit, (int) flags_value); avm_int_t increment = size_val * unit; union maybe_unsigned_int64 value; @@ -4765,8 +4751,8 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) const uint8_t *src_pc = pc; #endif DECODE_COMPACT_TERM(src, pc); - term arg2; - DECODE_COMPACT_TERM(arg2, pc); + uint32_t live; + DECODE_LITERAL(live, pc); term size; DECODE_COMPACT_TERM(size, pc); uint32_t unit; @@ -4806,7 +4792,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) RAISE_ERROR(UNSUPPORTED_ATOM); } - TRACE("bs_get_binary2/7, fail=%u src=%p unit=%u\n", (unsigned) fail, (void *) bs_bin, (unsigned) unit); + TRACE("bs_get_binary2/7, fail=%u src=%p live=%u unit=%u\n", (unsigned) fail, (void *) bs_bin, (unsigned) live, (unsigned) unit); if ((unsigned int) (bs_offset / unit + size_val) > term_binary_size(bs_bin)) { TRACE("bs_get_binary2: insufficient capacity -- bs_offset = %d, size_val = %d\n", (int) bs_offset, (int) size_val); @@ -4815,7 +4801,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) term_set_match_state_offset(src, bs_offset + size_val * unit); size_t heap_size = term_sub_binary_heap_size(bs_bin, size_val); - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_size, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_size, live, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } #endif @@ -4857,12 +4843,9 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) term src_bin = term_get_match_state_binary(src); int len = term_binary_size(src_bin) - offset / 8; size_t heap_size = term_sub_binary_heap_size(src_bin, len); - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_size, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_size, 1, &src_bin, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - // src might be invalid after a GC - src = READ_DEST_REGISTER_GC_SAFE(dreg); - src_bin = term_get_match_state_binary(src); bin = term_maybe_create_sub_binary(src_bin, offset / 8, len, &ctx->heap, ctx->global); } } else { @@ -5277,9 +5260,6 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) uint32_t label; DECODE_LABEL(label, pc) term src; - #ifdef IMPL_EXECUTE_LOOP - const uint8_t *src_pc = pc; - #endif DECODE_COMPACT_TERM(src, pc); dreg_gc_safe_t dreg; DECODE_DEST_REGISTER_GC_SAFE(dreg, pc); @@ -5325,10 +5305,11 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) bool is_shared = new_entries == 0; size_t heap_needed = term_map_size_in_terms_maybe_shared(new_map_size, is_shared); // MEMORY_CAN_SHRINK because put_map is classified as gc in beam_ssa_codegen.erl - if (memory_ensure_free_opt(ctx, heap_needed, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { + x_regs[live] = src; + if (memory_ensure_free_with_roots(ctx, heap_needed, live + 1, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - DECODE_COMPACT_TERM(src, src_pc); + src = x_regs[live]; // // // @@ -5408,9 +5389,6 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) uint32_t label; DECODE_LABEL(label, pc) term src; - #ifdef IMPL_EXECUTE_LOOP - const uint8_t *src_pc = pc; - #endif DECODE_COMPACT_TERM(src, pc); dreg_gc_safe_t dreg; DECODE_DEST_REGISTER_GC_SAFE(dreg, pc); @@ -5449,10 +5427,11 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) // size_t src_size = term_get_map_size(src); // MEMORY_CAN_SHRINK because put_map is classified as gc in beam_ssa_codegen.erl - if (memory_ensure_free_opt(ctx, term_map_size_in_terms_maybe_shared(src_size, true), MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { + x_regs[live] = src; + if (memory_ensure_free_with_roots(ctx, term_map_size_in_terms_maybe_shared(src_size, true), live + 1, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - DECODE_COMPACT_TERM(src, src_pc); + src = x_regs[live]; // // Create a new map of the same size as src and populate with entries from src // @@ -5883,7 +5862,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #ifdef IMPL_EXECUTE_LOOP - x_regs[0] = stacktrace_build(ctx, &x_regs[0]); + x_regs[0] = stacktrace_build(ctx, &x_regs[0], 1); #endif break; @@ -5990,19 +5969,26 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) } case OP_BS_START_MATCH4: { - #ifdef IMPL_EXECUTE_LOOP - // MEMORY_CAN_SHRINK because bs_start_match is classified as gc in beam_ssa_codegen.erl - if (memory_ensure_free_opt(ctx, TERM_BOXED_BIN_MATCH_STATE_SIZE, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { - RAISE_ERROR(OUT_OF_MEMORY_ATOM); - } - #endif - // fail since OTP 23 might be either 'no_fail', 'resume' or a fail label - // we are ignoring this right now, but we might use it for future optimizations. + // TODO: figure out what could fail term fail; DECODE_COMPACT_TERM(fail, pc); + #ifdef IMPL_EXECUTE_LOOP + if (!term_is_integer(fail) && !term_is_atom(fail)) { + fprintf(stderr, "Unexpected fail term "); + term_display(stderr, fail, ctx); + fprintf(stderr, "\n"); + AVM_ABORT(); + } + #endif uint32_t live; DECODE_LITERAL(live, pc); + #ifdef IMPL_EXECUTE_LOOP + // MEMORY_CAN_SHRINK because bs_start_match is classified as gc in beam_ssa_codegen.erl + if (memory_ensure_free_with_roots(ctx, TERM_BOXED_BIN_MATCH_STATE_SIZE, live, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { + RAISE_ERROR(OUT_OF_MEMORY_ATOM); + } + #endif term src; DECODE_COMPACT_TERM(src, pc); dreg_t dreg; @@ -6230,8 +6216,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) TRACE("bs_create_bin/6: total binary size (%li) is not evenly divisible by 8\n", binary_size); RAISE_ERROR(UNSUPPORTED_ATOM); } - context_clean_registers(ctx, live); - if (UNLIKELY(memory_ensure_free_opt(ctx, alloc + term_binary_heap_size(binary_size / 8), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, alloc + term_binary_heap_size(binary_size / 8), live, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } term t = term_create_empty_binary(binary_size / 8, &ctx->heap, ctx->global); @@ -6382,9 +6367,6 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) DECODE_COMPACT_TERM(tag, pc) unsigned int args_count; DECODE_LITERAL(args_count, pc) - #ifdef IMPL_EXECUTE_LOOP - const uint8_t *fun_pc = pc; - #endif term fun; DECODE_COMPACT_TERM(fun, pc) @@ -6394,11 +6376,9 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) #ifdef IMPL_EXECUTE_LOOP if (UNLIKELY(!term_is_function(fun))) { // We can gc as we are raising - if (UNLIKELY(memory_ensure_free_opt(ctx, TUPLE_SIZE(2), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, TUPLE_SIZE(2), 1, &fun, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - // Decode the function again after GC was possibly run - DECODE_COMPACT_TERM(fun, fun_pc) term new_error_tuple = term_alloc_tuple(2, &ctx->heap); term_put_tuple_element(new_error_tuple, 0, BADFUN_ATOM); term_put_tuple_element(new_error_tuple, 1, fun); @@ -6413,12 +6393,13 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) TRACE("badrecord/1\n"); #ifdef IMPL_EXECUTE_LOOP + term value; + DECODE_COMPACT_TERM(value, pc) + // We can gc as we are raising - if (UNLIKELY(memory_ensure_free_opt(ctx, TUPLE_SIZE(2), MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, TUPLE_SIZE(2), 1, &value, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - term value; - DECODE_COMPACT_TERM(value, pc) term new_error_tuple = term_alloc_tuple(2, &ctx->heap); term_put_tuple_element(new_error_tuple, 0, BADRECORD_ATOM); term_put_tuple_element(new_error_tuple, 1, value); @@ -6444,10 +6425,6 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) DECODE_ATOM(hint, pc); int size; DECODE_LITERAL(size, pc); - #ifdef IMPL_EXECUTE_LOOP - term dst; - dst = term_alloc_tuple(size, &ctx->heap); - #endif term src; DECODE_COMPACT_TERM(src, pc); dreg_t dreg; @@ -6462,6 +6439,9 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) int list_len; DECODE_LITERAL(list_len, pc); #ifdef IMPL_EXECUTE_LOOP + term dst; + dst = term_alloc_tuple(size, &ctx->heap); + TRACE("update_record/5 hint=%lu, size=%i, src=%p, dst=%p, updates_len=%d\n", hint, size, (void *)src, (void *)dst, list_len); bool reuse = hint == REUSE_ATOM; for (int j = 0; j < size; j++) { @@ -6502,9 +6482,6 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) int fail; DECODE_LABEL(fail, pc); - #ifdef IMPL_EXECUTE_LOOP - const uint8_t *match_pc = pc; - #endif term match_state; DECODE_COMPACT_TERM(match_state, pc); #ifdef IMPL_EXECUTE_LOOP @@ -6561,7 +6538,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) } case INTEGER_ATOM: { - int live; + uint32_t live; DECODE_LITERAL(live, pc); j++; term flags; @@ -6604,7 +6581,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) } case BINARY_ATOM: { - int live; + uint32_t live; DECODE_LITERAL(live, pc); j++; term flags; @@ -6621,7 +6598,6 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) DECODE_LITERAL(unit, pc); j++; #ifdef IMPL_EXECUTE_LOOP - // context_clean_registers(ctx, live); // TODO: check if needed int matched_bits = size * unit; if (bs_offset % 8 != 0 || matched_bits % 8 != 0) { TRACE("bs_match/3: Unsupported. Offset on binary read must be aligned on byte boundaries.\n"); @@ -6632,12 +6608,11 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) goto bs_match_jump_to_fail; } size_t heap_size = term_sub_binary_heap_size(bs_bin, matched_bits / 8); - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_size, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + x_regs[live] = match_state; + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_size, live + 1, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - // re-compute match_state as GC could have moved it - const uint8_t *temp = match_pc; - DECODE_COMPACT_TERM(match_state, temp); + match_state = x_regs[live]; bs_bin = term_get_match_state_binary(match_state); term t = term_maybe_create_sub_binary(bs_bin, bs_offset / 8, matched_bits / 8, &ctx->heap, ctx->global); #endif @@ -6652,14 +6627,13 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) } case GET_TAIL_ATOM: { - int live; + uint32_t live; DECODE_LITERAL(live, pc); j++; int unit; DECODE_LITERAL(unit, pc); j++; #ifdef IMPL_EXECUTE_LOOP - // context_clean_registers(ctx, live); // TODO: check if needed size_t total_bits = term_binary_size(bs_bin) * 8; size_t tail_bits = total_bits - bs_offset; if (bs_offset % 8 != 0 || tail_bits % 8 != 0) { @@ -6667,12 +6641,11 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) RAISE_ERROR(BADARG_ATOM); } size_t heap_size = term_sub_binary_heap_size(bs_bin, tail_bits / 8); - if (UNLIKELY(memory_ensure_free_opt(ctx, heap_size, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { + x_regs[live] = match_state; + if (UNLIKELY(memory_ensure_free_with_roots(ctx, heap_size, live + 1, x_regs, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { RAISE_ERROR(OUT_OF_MEMORY_ATOM); } - // re-compute match_state as GC could have moved it - const uint8_t *temp = match_pc; - DECODE_COMPACT_TERM(match_state, temp); + match_state = x_regs[live]; bs_bin = term_get_match_state_binary(match_state); term t = term_maybe_create_sub_binary(bs_bin, bs_offset / 8, tail_bits / 8, &ctx->heap, ctx->global); #endif @@ -6687,8 +6660,8 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) } case EQUAL_COLON_EQUAL_ATOM: { - term live; - DECODE_COMPACT_TERM(live, pc); + // genot.tab says Live, but compiler always put nil + DECODE_NIL(pc); j++; int size; DECODE_LITERAL(size, pc); @@ -6781,7 +6754,7 @@ HOT_FUNC int scheduler_entry_point(GlobalContext *glb) bool throw = ctx->x[0] == THROW_ATOM; int exit_reason_tuple_size = (throw ? TUPLE_SIZE(2) : 0) + TUPLE_SIZE(2); - if (memory_ensure_free_opt(ctx, exit_reason_tuple_size, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { + if (memory_ensure_free_with_roots(ctx, exit_reason_tuple_size, 1, x_regs + 1, MEMORY_CAN_SHRINK) != MEMORY_GC_OK) { ctx->exit_reason = OUT_OF_MEMORY_ATOM; } else { term error_term; diff --git a/src/libAtomVM/stacktrace.c b/src/libAtomVM/stacktrace.c index 41ca45f22..e33946509 100644 --- a/src/libAtomVM/stacktrace.c +++ b/src/libAtomVM/stacktrace.c @@ -21,6 +21,7 @@ #include "stacktrace.h" #include "defaultatoms.h" #include "globalcontext.h" +#include "memory.h" #ifndef AVM_CREATE_STACKTRACES @@ -32,7 +33,7 @@ term stacktrace_create_raw(Context *ctx, Module *mod, int current_offset, term e return exception_class; } -term stacktrace_build(Context *ctx, term *stack_info) +term stacktrace_build(Context *ctx, term *stack_info, uint32_t live) { UNUSED(ctx); UNUSED(stack_info); @@ -163,7 +164,8 @@ term stacktrace_create_raw(Context *ctx, Module *mod, int current_offset, term e // {num_frames, num_aux_terms, filename_lens, num_mods, [{module, offset}, ...]} size_t requested_size = TUPLE_SIZE(6) + num_frames * (2 + TUPLE_SIZE(2)); - if (UNLIKELY(memory_ensure_free(ctx, requested_size) != MEMORY_GC_OK)) { + // We need to preserve x0 and x1 that contain information on the current exception + if (UNLIKELY(memory_ensure_free_with_roots(ctx, requested_size, 2, ctx->x, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { fprintf(stderr, "WARNING: Unable to allocate heap space for raw stacktrace\n"); return OUT_OF_MEMORY_ATOM; } @@ -254,7 +256,7 @@ static term find_path_created(term module_name, struct ModulePathPair *module_pa return term_invalid_term(); } -term stacktrace_build(Context *ctx, term *stack_info) +term stacktrace_build(Context *ctx, term *stack_info, uint32_t live) { GlobalContext *glb = ctx->global; @@ -280,12 +282,11 @@ term stacktrace_build(Context *ctx, term *stack_info) // [{module, function, arity, [{file, string()}, {line, int}]}, ...] // size_t requested_size = (TUPLE_SIZE(4) + 2) * num_frames + num_aux_terms * (4 + 2 * TUPLE_SIZE(2)) + 2 * filename_lens; - if (UNLIKELY(memory_ensure_free(ctx, requested_size) != MEMORY_GC_OK)) { + if (UNLIKELY(memory_ensure_free_with_roots(ctx, requested_size, live, ctx->x, MEMORY_CAN_SHRINK) != MEMORY_GC_OK)) { free(module_paths); return OUT_OF_MEMORY_ATOM; } - // Note. Safe to get stacktrace after GC when stack_info comes from x[0] term raw_stacktrace = term_get_tuple_element(*stack_info, 4); term stacktrace = term_nil(); diff --git a/src/libAtomVM/stacktrace.h b/src/libAtomVM/stacktrace.h index d8eddb961..6702371b1 100644 --- a/src/libAtomVM/stacktrace.h +++ b/src/libAtomVM/stacktrace.h @@ -30,7 +30,14 @@ extern "C" { #include "term.h" term stacktrace_create_raw(Context *ctx, Module *mod, int current_offset, term exception_class); -term stacktrace_build(Context *ctx, term *stack_info); +/** + * @brief Build a stack trace + * @param ctx context + * @param stack_info pointer to stack info tuple + * @param live number of x registers to preserve, which should include stack_info + * @return the built stack trace + */ +term stacktrace_build(Context *ctx, term *stack_info, uint32_t live); term stacktrace_exception_class(term stack_info); #ifdef __cplusplus diff --git a/src/platforms/esp32/components/avm_sys/platform_nifs.c b/src/platforms/esp32/components/avm_sys/platform_nifs.c index 7c501490c..af7994853 100644 --- a/src/platforms/esp32/components/avm_sys/platform_nifs.c +++ b/src/platforms/esp32/components/avm_sys/platform_nifs.c @@ -284,6 +284,7 @@ static term nif_esp_partition_write(Context *ctx, int argc, term argv[]) static term nif_esp_partition_list(Context *ctx, int argc, term argv[]) { UNUSED(argc); + UNUSED(argv); size_t needed = 0;