diff --git a/common.gypi b/common.gypi index dd89f1c929075e..6f1a7a0e3c7f4d 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.13', + 'v8_embedder_string': '-node.12', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 7307ced9fc28f8..46dd9fb1aaf72f 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -62,6 +62,7 @@ Andrew Paprocki Anna Henningsen Antoine du Hamel Anton Bikineev +Archil Sharashenidze Bangfu Tao Ben Coe Ben Newman @@ -251,6 +252,7 @@ Yi Wang Yong Wang Youfeng Hao Yu Yin +Yujie Wang Yuri Iozzelli Yusif Khudhur Zac Hansen diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index 23bce0f4bdcbfc..ff69465f0ebf3b 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -9,18 +9,14 @@ load( "v8_build_config", "v8_config", "v8_custom_config", - "v8_raw_flag", "v8_flag", "v8_int", - "v8_string", "v8_library", - "v8_torque", "v8_mksnapshot", + "v8_string", + "v8_torque", ) - -# ================================================= -# Configuration settings -# ================================================= +load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression") config_setting( name = "is_debug", @@ -29,93 +25,6 @@ config_setting( }, ) -config_setting( - name = "is_k8", - values = {"cpu": "k8"}, -) - -config_setting( - name = "is_darwin", - values = {"cpu": "darwin"}, -) - -selects.config_setting_group( - name = "is_x64", - match_any = [":is_k8", ":is_darwin"], -) - -config_setting( - name = "is_arm64", - values = { - "host_cpu": "k8", - "cpu": "arm", - }, -) - -config_setting( - name = "is_ia32", - values = { - "host_cpu": "k8", - "cpu": "x86", - }, -) - -config_setting( - name = "is_arm", - values = { - "host_cpu": "k8", - "cpu": "armeabi-v7a", - }, -) - -selects.config_setting_group( - name = "is_32bits", - match_any = [":is_ia32", ":is_arm"], -) - -config_setting( - name = "is_linux", - constraint_values = ["@platforms//os:linux"], -) - -config_setting( - name = "is_android", - constraint_values = ["@platforms//os:android"], -) - -config_setting( - name = "is_macos", - constraint_values = ["@platforms//os:macos"], -) - -selects.config_setting_group( - name = "is_posix", - match_any = [":is_linux", ":is_android", ":is_macos"], -) - -config_setting( - name = "is_linux_x64", - constraint_values = ["@platforms//os:linux"], - values = {"cpu": "k8"}, -) - -config_setting( - name = "is_macos_x64", - constraint_values = ["@platforms//os:macos"], - values = {"cpu": "darwin"}, -) - -selects.config_setting_group( - name = "is_linux_x64_or_macos_x64", - match_any = [":is_linux_x64", ":is_macos_x64"], -) - -config_setting( - name = "is_android_x86", - constraint_values = ["@platforms//os:android"], - values = {"cpu": "x86"}, -) - # ================================================= # Flags # ================================================= @@ -165,6 +74,7 @@ config_setting( # v8_control_flow_integrity # v8_enable_virtual_memory_cage # cppgc_enable_caged_heap +# cppgc_enable_check_assignments_in_prefinalizers # cppgc_enable_object_names # cppgc_enable_verify_heap # cppgc_enable_young_generation @@ -179,48 +89,147 @@ config_setting( # v8_enable_allocation_folding # v8_allocation_site_tracking -v8_string(name = "v8_root", default = ".") +v8_string( + name = "v8_root", + default = "third_party/v8/HEAD", +) v8_flag(name = "v8_android_log_stdout") + v8_flag(name = "v8_annotate_torque_ir") v8_flag(name = "v8_code_comments") -v8_flag(name = "v8_deprecation_warnings", default = True) -v8_flag(name = "v8_imminent_deprecation_warnings", default = True) +v8_flag( + name = "v8_deprecation_warnings", + default = True, +) + +v8_flag( + name = "v8_imminent_deprecation_warnings", + default = True, +) v8_flag(name = "v8_enable_backtrace") + v8_flag(name = "v8_enable_debug_code") + v8_flag(name = "v8_enable_disassembler") -v8_flag(name = "v8_enable_handle_zapping", default = True) + +v8_flag( + name = "v8_enable_handle_zapping", + default = True, +) + v8_flag(name = "v8_enable_hugepage") + v8_flag(name = "v8_enable_fast_mksnapshot") + v8_flag(name = "v8_enable_future") -v8_flag(name = "v8_enable_i18n_support", default = True) -v8_flag(name = "v8_enable_lazy_source_positions", default = True) -v8_flag(name = "v8_enable_minor_mc", default = True) + +# NOTE: Transitions are not recommended in library targets: +# https://groups.google.com/a/google.com/g/c-toolchain-team/c/W4nmWonD0ow/m/rLGyIL4YIQAJ +# Therefore we create multiple targets with and without ICU, instead of +# implementing the flag v8_enable_i18n_support. + +v8_flag( + name = "v8_enable_lazy_source_positions", + default = True, +) + +v8_flag( + name = "v8_enable_minor_mc", + default = True, +) + v8_flag(name = "v8_enable_object_print") + v8_flag(name = "v8_enable_slow_dchecks") + v8_flag(name = "v8_enable_snapshot_code_comments") + v8_flag(name = "v8_enable_snapshot_native_code_counters") + v8_flag(name = "v8_enable_trace_maps") + v8_flag(name = "v8_enable_v8_checks") + v8_flag(name = "v8_enable_verify_csa") + v8_flag(name = "v8_enable_verify_heap") + v8_flag(name = "v8_enable_verify_predictable") + v8_flag(name = "v8_enable_test_features") -v8_flag(name = "v8_enable_webassembly", default = True) -v8_int(name = "v8_typed_array_max_size_in_heap", default = 64) +v8_flag( + name = "v8_enable_webassembly", + default = True, +) -# Pointer compression, true by default if x64 or arm64. -v8_raw_flag(name = "v8_enable_pointer_compression") +v8_int( + name = "v8_typed_array_max_size_in_heap", + default = 64, +) + +# We use a string flag to create a 3 value-logic. +# If no explicit value for v8_enable_pointer_compression, we set it to 'none'. +v8_string( + name = "v8_enable_pointer_compression", + default = "none", +) + +# Default setting for v8_enable_pointer_compression. +config_setting( + name = "v8_enable_pointer_compression_is_none", + flag_values = { + ":v8_enable_pointer_compression": "none", + }, +) + +# Explicity defined v8_enable_pointer_compression. +config_setting( + name = "v8_enable_pointer_compression_is_true", + flag_values = { + ":v8_enable_pointer_compression": "True", + }, +) + +# Default setting for v8_enable_pointer_compression when target is x64. +selects.config_setting_group( + name = "v8_target_x64_default_pointer_compression", + match_all = [ + ":v8_enable_pointer_compression_is_none", + "@config//:v8_target_x64", + ], +) + +# Default setting for v8_enable_pointer_compression when target is arm64. +selects.config_setting_group( + name = "v8_target_arm64_default_pointer_compression", + match_all = [ + ":v8_enable_pointer_compression_is_none", + "@config//:v8_target_arm64", + ], +) + +# v8_enable_pointer_compression is valid whenever it is explicitly defined +# or we have the default settings for targets x64 and arm64. selects.config_setting_group( name = "is_v8_enable_pointer_compression", - match_any = [ ":raw_v8_enable_pointer_compression", ":is_x64", ":is_arm64" ], + match_any = [ + ":v8_enable_pointer_compression_is_true", + ":v8_target_x64_default_pointer_compression", + ":v8_target_arm64_default_pointer_compression", + ], ) + # Pointer cage, true by default if v8_enable_pointer_compression. -v8_flag(name = "v8_enable_pointer_compression_shared_cage", default = True) +v8_flag( + name = "v8_enable_pointer_compression_shared_cage", + default = True, +) + # Enable shared cage if v8_enable_pointer_compression # and v8_enable_pointer_compression_shared_cage. selects.config_setting_group( @@ -230,6 +239,7 @@ selects.config_setting_group( ":is_v8_enable_pointer_compression_shared_cage", ], ) + # Enable isolated cage if v8_enable_pointer_compression and # NOT v8_enable_pointer_compression_shared_cage. selects.config_setting_group( @@ -243,7 +253,10 @@ selects.config_setting_group( # Enable -rdynamic. selects.config_setting_group( name = "should_add_rdynamic", - match_all = [ ":is_linux", ":is_v8_enable_backtrace" ], + match_all = [ + "@config//:is_linux", + ":is_v8_enable_backtrace", + ], ) v8_custom_config(name = "default") @@ -260,13 +273,11 @@ v8_config( "v8_enable_handle_zapping": "ENABLE_HANDLE_ZAPPING", "v8_enable_hugepage": "ENABLE_HUGEPAGE", "v8_enable_future": "V8_ENABLE_FUTURE", - "v8_enable_i18n_support": "V8_INTL_SUPPORT", "v8_enable_lazy_source_positions": "V8_ENABLE_LAZY_SOURCE_POSITIONS", "v8_enable_minor_mc": "ENABLE_MINOR_MC", "v8_enable_object_print": "OBJECT_PRINT", "v8_enable_slow_dchecks": "ENABLE_SLOW_DCHECKS", - "v8_enable_snapshot_native_code_counters": - "V8_SNAPSHOT_NATIVE_CODE_COUNTERS", + "v8_enable_snapshot_native_code_counters": "V8_SNAPSHOT_NATIVE_CODE_COUNTERS", "v8_enable_trace_maps": "V8_TRACE_MAPS", "v8_enable_v8_checks": "V8_ENABLE_CHECKS", "v8_enable_verify_csa": "ENABLE_VERIFY_CSA", @@ -277,30 +288,37 @@ v8_config( defines = [ "GOOGLE3", "CHROMIUM_ZLIB_NO_CHROMECONF", + "ENABLE_DEBUGGER_SUPPORT", "V8_ADVANCED_BIGINT_ALGORITHMS", "V8_CONCURRENT_MARKING", ] + select({ - ":is_debug": [ "DEBUG" ], - "//conditions:default": [], - }) + select({ - ":is_ia32": [ "V8_TARGET_ARCH_IA32" ], - ":is_x64": [ "V8_TARGET_ARCH_X64" ], - ":is_arm": [ - "V8_TARGET_ARCH_ARM", - "CAN_USE_ARMV7_INSTRUCTIONS", - "CAN_USE_VFP3_INSTRUCTIONS", + ":is_debug": [ + "DEBUG", + "V8_ENABLE_CHECKS", ], - ":is_arm64": [ "V8_TARGET_ARCH_ARM64" ], - }) + select({ - ":is_android": [ + "//conditions:default": [], + }) + select( + { + "@config//:v8_target_ia32": ["V8_TARGET_ARCH_IA32"], + "@config//:v8_target_x64": ["V8_TARGET_ARCH_X64"], + "@config//:v8_target_arm": [ + "V8_TARGET_ARCH_ARM", + "CAN_USE_ARMV7_INSTRUCTIONS", + "CAN_USE_VFP3_INSTRUCTIONS", + ], + "@config//:v8_target_arm64": ["V8_TARGET_ARCH_ARM64"], + }, + no_match_error = "Please specify a target cpu supported by v8", + ) + select({ + "@config//:is_android": [ "V8_HAVE_TARGET_OS", "V8_TARGET_OS_ANDROID", ], - ":is_linux": [ + "@config//:is_linux": [ "V8_HAVE_TARGET_OS", "V8_TARGET_OS_LINUX", ], - ":is_macos": [ + "@config//:is_macos": [ "V8_HAVE_TARGET_OS", "V8_TARGET_OS_MACOSX", ], @@ -312,10 +330,10 @@ v8_config( "//conditions:default": [], }) + select({ ":enable_pointer_compression_shared_cage": [ - "V8_COMPRESS_POINTERS_IN_SHARED_CAGE" + "V8_COMPRESS_POINTERS_IN_SHARED_CAGE", ], ":enable_pointer_compression_isolated_cage": [ - "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE" + "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE", ], "//conditions:default": [], }) + select({ @@ -326,7 +344,7 @@ v8_config( ], "//conditions:default": [], }), - deps = [ ":default" ], + deps = [":default"], ) # ================================================= @@ -345,8 +363,8 @@ filegroup( name = "v8_version_files", srcs = [ "include/v8-value-serializer-version.h", - "include/v8-version-string.h", "include/v8-version.h", + "include/v8-version-string.h", ], ) @@ -361,10 +379,10 @@ filegroup( "include/cppgc/ephemeron-pair.h", "include/cppgc/explicit-management.h", "include/cppgc/garbage-collected.h", + "include/cppgc/heap.h", "include/cppgc/heap-consistency.h", "include/cppgc/heap-state.h", "include/cppgc/heap-statistics.h", - "include/cppgc/heap.h", "include/cppgc/internal/api-constants.h", "include/cppgc/internal/atomic-entry-flag.h", "include/cppgc/internal/caged-heap-local-data.h", @@ -391,14 +409,13 @@ filegroup( "include/cppgc/trace-trait.h", "include/cppgc/type-traits.h", "include/cppgc/visitor.h", - ] + ], ) filegroup( name = "v8_headers_files", srcs = [ - ":cppgc_headers_files", - ":v8_version_files", + "include/v8.h", "include/v8-array-buffer.h", "include/v8-callbacks.h", "include/v8-container.h", @@ -424,12 +441,12 @@ filegroup( "include/v8-maybe.h", "include/v8-memory-span.h", "include/v8-message.h", - "include/v8-microtask-queue.h", "include/v8-microtask.h", + "include/v8-microtask-queue.h", "include/v8-object.h", "include/v8-persistent-handle.h", - "include/v8-primitive-object.h", "include/v8-primitive.h", + "include/v8-primitive-object.h", "include/v8-profiler.h", "include/v8-promise.h", "include/v8-proxy.h", @@ -442,11 +459,12 @@ filegroup( "include/v8-typed-array.h", "include/v8-unwinder.h", "include/v8-util.h", - "include/v8-value-serializer.h", "include/v8-value.h", + "include/v8-value-serializer.h", "include/v8-wasm.h", "include/v8-weak-callback-info.h", - "include/v8.h", + ":cppgc_headers_files", + ":v8_version_files", ], ) @@ -456,7 +474,7 @@ filegroup( "src/flags/flag-definitions.h", "src/flags/flags.h", ] + select({ - "is_v8_enable_webassembly": [ "src/wasm/wasm-feature-flags.h" ], + "is_v8_enable_webassembly": ["src/wasm/wasm-feature-flags.h"], "//conditions:default": [], }), ) @@ -577,7 +595,7 @@ filegroup( "src/base/vlq-base64.cc", "src/base/vlq-base64.h", ] + select({ - ":is_posix": [ + "@config//:is_posix": [ "src/base/platform/platform-posix.cc", "src/base/platform/platform-posix.h", "src/base/platform/platform-posix-time.cc", @@ -585,19 +603,20 @@ filegroup( ], "//conditions:default": [], }) + select({ - ":is_linux": [ + "@config//:is_linux": [ "src/base/debug/stack_trace_posix.cc", "src/base/platform/platform-linux.cc", ], - "is_android": [ + "@config//:is_android": [ "src/base/debug/stack_trace_android.cc", "src/base/platform/platform-linux.cc", ], - "is_macos": [ + "@config//:is_macos": [ "src/base/debug/stack_trace_posix.cc", "src/base/platform/platform-macos.cc", ], }), + visibility = ["//visibility:public"], ) filegroup( @@ -642,11 +661,11 @@ filegroup( filegroup( name = "torque_runtime_support_files", - srcs = [ "src/torque/runtime-support.h" ], + srcs = ["src/torque/runtime-support.h"], ) filegroup( - name = "torque_files", + name = "noicu/torque_files", srcs = [ "src/builtins/aggregate-error.tq", "src/builtins/array-at.tq", @@ -805,6 +824,7 @@ filegroup( "src/objects/js-proxy.tq", "src/objects/js-regexp-string-iterator.tq", "src/objects/js-regexp.tq", + "src/objects/js-temporal-objects.tq", "src/objects/js-weak-refs.tq", "src/objects/literal-objects.tq", "src/objects/map.tq", @@ -834,6 +854,7 @@ filegroup( "src/objects/template-objects.tq", "src/objects/templates.tq", "src/objects/torque-defined-classes.tq", + "src/objects/turbofan-types.tq", "test/torque/test-torque.tq", "third_party/v8/builtins/array-sort.tq", ] + select({ @@ -843,24 +864,27 @@ filegroup( "src/wasm/wasm-objects.tq", ], "//conditions:default": [], - }) + select({ - ":is_v8_enable_i18n_support": [ - "src/objects/intl-objects.tq", - "src/objects/js-break-iterator.tq", - "src/objects/js-collator.tq", - "src/objects/js-date-time-format.tq", - "src/objects/js-display-names.tq", - "src/objects/js-list-format.tq", - "src/objects/js-locale.tq", - "src/objects/js-number-format.tq", - "src/objects/js-plural-rules.tq", - "src/objects/js-relative-time-format.tq", - "src/objects/js-segment-iterator.tq", - "src/objects/js-segmenter.tq", - "src/objects/js-segments.tq", - ], - "//conditions:default": [], - }) + }), +) + +filegroup( + name = "icu/torque_files", + srcs = [ + "src/objects/intl-objects.tq", + "src/objects/js-break-iterator.tq", + "src/objects/js-collator.tq", + "src/objects/js-date-time-format.tq", + "src/objects/js-display-names.tq", + "src/objects/js-list-format.tq", + "src/objects/js-locale.tq", + "src/objects/js-number-format.tq", + "src/objects/js-plural-rules.tq", + "src/objects/js-relative-time-format.tq", + "src/objects/js-segment-iterator.tq", + "src/objects/js-segmenter.tq", + "src/objects/js-segments.tq", + ":noicu/torque_files", + ], ) filegroup( @@ -999,6 +1023,7 @@ filegroup( "src/builtins/builtins-sharedarraybuffer.cc", "src/builtins/builtins-string.cc", "src/builtins/builtins-symbol.cc", + "src/builtins/builtins-temporal.cc", "src/builtins/builtins-trace.cc", "src/builtins/builtins-typed-array.cc", "src/builtins/builtins-utils-inl.h", @@ -1085,8 +1110,6 @@ filegroup( "src/common/assert-scope.cc", "src/common/assert-scope.h", "src/common/checks.h", - "src/common/external-pointer-inl.h", - "src/common/external-pointer.h", "src/common/message-template.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", @@ -1159,8 +1182,6 @@ filegroup( "src/execution/arguments.h", "src/execution/execution.cc", "src/execution/execution.h", - "src/execution/external-pointer-table.cc", - "src/execution/external-pointer-table.h", "src/execution/frame-constants.h", "src/execution/frames-inl.h", "src/execution/frames.cc", @@ -1404,8 +1425,6 @@ filegroup( "src/init/startup-data-util.h", "src/init/v8.cc", "src/init/v8.h", - "src/init/vm-cage.cc", - "src/init/vm-cage.h", "src/interpreter/block-coverage-builder.h", "src/interpreter/bytecode-array-builder.cc", "src/interpreter/bytecode-array-builder.h", @@ -1577,6 +1596,8 @@ filegroup( "src/objects/js-regexp-string-iterator.h", "src/objects/js-regexp.cc", "src/objects/js-regexp.h", + "src/objects/js-temporal-objects.h", + "src/objects/js-temporal-objects-inl.h", "src/objects/js-weak-refs.h", "src/objects/js-weak-refs-inl.h", "src/objects/keys.cc", @@ -1708,6 +1729,8 @@ filegroup( "src/objects/transitions-inl.h", "src/objects/transitions.cc", "src/objects/transitions.h", + "src/objects/turbofan-types-inl.h", + "src/objects/turbofan-types.h", "src/objects/type-hints.cc", "src/objects/type-hints.h", "src/objects/value-serializer.cc", @@ -1856,6 +1879,14 @@ filegroup( "src/runtime/runtime-weak-refs.cc", "src/runtime/runtime.cc", "src/runtime/runtime.h", + "src/security/external-pointer-table.cc", + "src/security/vm-cage.cc", + "src/security/caged-pointer-inl.h", + "src/security/caged-pointer.h", + "src/security/external-pointer-inl.h", + "src/security/external-pointer-table.h", + "src/security/external-pointer.h", + "src/security/vm-cage.h", "src/base/sanitizer/asan.h", "src/base/sanitizer/lsan-page-allocator.cc", "src/base/sanitizer/lsan-page-allocator.h", @@ -1884,9 +1915,16 @@ filegroup( "src/snapshot/serializer-deserializer.cc", "src/snapshot/serializer-deserializer.h", "src/snapshot/serializer.cc", + "src/snapshot/serializer-inl.h", "src/snapshot/serializer.h", + "src/snapshot/shared-heap-deserializer.h", + "src/snapshot/shared-heap-deserializer.cc", + "src/snapshot/shared-heap-serializer.h", + "src/snapshot/shared-heap-serializer.cc", "src/snapshot/snapshot-compression.cc", "src/snapshot/snapshot-compression.h", + "third_party/zlib/google/compression_utils_portable.h", + "third_party/zlib/google/compression_utils_portable.cc", "src/snapshot/snapshot-data.cc", "src/snapshot/snapshot-data.h", "src/snapshot/snapshot-source-sink.cc", @@ -1989,7 +2027,7 @@ filegroup( "src/heap/third-party/heap-api.h", "src/heap/third-party/heap-api-stub.cc", ] + select({ - ":is_ia32": [ + "@config//:v8_target_ia32": [ "src/baseline/ia32/baseline-assembler-ia32-inl.h", "src/baseline/ia32/baseline-compiler-ia32-inl.h", "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h", @@ -2015,10 +2053,9 @@ filegroup( "src/execution/ia32/frame-constants-ia32.h", "src/regexp/ia32/regexp-macro-assembler-ia32.cc", "src/regexp/ia32/regexp-macro-assembler-ia32.h", - "src/third_party/valgrind/valgrind.h", "src/wasm/baseline/ia32/liftoff-assembler-ia32.h", ], - ":is_x64": [ + "@config//:v8_target_x64": [ "src/baseline/x64/baseline-assembler-x64-inl.h", "src/baseline/x64/baseline-compiler-x64-inl.h", "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h", @@ -2048,10 +2085,9 @@ filegroup( "src/execution/x64/frame-constants-x64.h", "src/regexp/x64/regexp-macro-assembler-x64.cc", "src/regexp/x64/regexp-macro-assembler-x64.h", - "src/third_party/valgrind/valgrind.h", "src/wasm/baseline/x64/liftoff-assembler-x64.h", ], - "is_arm": [ + "@config//:v8_target_arm": [ "src/baseline/arm/baseline-assembler-arm-inl.h", "src/baseline/arm/baseline-compiler-arm-inl.h", "src/codegen/arm/assembler-arm-inl.h", @@ -2082,7 +2118,7 @@ filegroup( "src/regexp/arm/regexp-macro-assembler-arm.h", "src/wasm/baseline/arm/liftoff-assembler-arm.h", ], - ":is_arm64": [ + "@config//:v8_target_arm64": [ "src/baseline/arm64/baseline-assembler-arm64-inl.h", "src/baseline/arm64/baseline-compiler-arm64-inl.h", "src/codegen/arm64/assembler-arm64-inl.h", @@ -2126,11 +2162,18 @@ filegroup( "src/wasm/baseline/arm64/liftoff-assembler-arm64.h", ], }) + select({ - ":is_linux_x64_or_macos_x64": [ + # Only for x64 builds and for arm64 with x64 host simulator. + "@config//:is_x64": [ "src/trap-handler/handler-inside-posix.cc", "src/trap-handler/handler-outside-posix.cc", ], "//conditions:default": [], + }) + select({ + "@config//:v8_arm64_simulator": [ + "src/trap-handler/trap-handler-simulator.h", + "src/trap-handler/handler-outside-simulator.cc", + ], + "//conditions:default": [], }) + select({ ":is_v8_enable_webassembly": [ "src/asmjs/asm-js.cc", @@ -2143,6 +2186,7 @@ filegroup( "src/asmjs/asm-types.h", "src/compiler/int64-lowering.h", "src/compiler/wasm-compiler.h", + "src/compiler/wasm-escape-analysis.h", "src/compiler/wasm-inlining.h", "src/debug/debug-wasm-objects.cc", "src/debug/debug-wasm-objects.h", @@ -2189,6 +2233,7 @@ filegroup( "src/wasm/signature-map.h", "src/wasm/simd-shuffle.cc", "src/wasm/simd-shuffle.h", + "src/wasm/stacks.h", "src/wasm/streaming-decoder.cc", "src/wasm/streaming-decoder.h", "src/wasm/struct-types.h", @@ -2235,55 +2280,57 @@ filegroup( "src/wasm/wasm-value.h", ], "//conditions:default": [], - }) + select({ - ":is_v8_enable_i18n_support": [ - "src/builtins/builtins-intl.cc", - "src/builtins/builtins-intl-gen.cc", - "src/objects/intl-objects.cc", - "src/objects/intl-objects.h", - "src/objects/js-break-iterator.cc", - "src/objects/js-break-iterator.h", - "src/objects/js-break-iterator-inl.h", - "src/objects/js-collator.cc", - "src/objects/js-collator.h", - "src/objects/js-collator-inl.h", - "src/objects/js-date-time-format.cc", - "src/objects/js-date-time-format.h", - "src/objects/js-date-time-format-inl.h", - "src/objects/js-display-names.cc", - "src/objects/js-display-names.h", - "src/objects/js-display-names-inl.h", - "src/objects/js-list-format.cc", - "src/objects/js-list-format.h", - "src/objects/js-list-format-inl.h", - "src/objects/js-locale.cc", - "src/objects/js-locale.h", - "src/objects/js-locale-inl.h", - "src/objects/js-number-format.cc", - "src/objects/js-number-format.h", - "src/objects/js-number-format-inl.h", - "src/objects/js-plural-rules.cc", - "src/objects/js-plural-rules.h", - "src/objects/js-plural-rules-inl.h", - "src/objects/js-relative-time-format.cc", - "src/objects/js-relative-time-format.h", - "src/objects/js-relative-time-format-inl.h", - "src/objects/js-segmenter.cc", - "src/objects/js-segmenter.h", - "src/objects/js-segmenter-inl.h", - "src/objects/js-segment-iterator.cc", - "src/objects/js-segment-iterator.h", - "src/objects/js-segment-iterator-inl.h", - "src/objects/js-segments.cc", - "src/objects/js-segments.h", - "src/objects/js-segments-inl.h", - "src/runtime/runtime-intl.cc", - "src/strings/char-predicates.cc", - ], - "//conditions:default": [], }), ) +filegroup( + name = "icu/v8_base_without_compiler_files", + srcs = [ + "src/builtins/builtins-intl.cc", + "src/builtins/builtins-intl-gen.cc", + "src/objects/intl-objects.cc", + "src/objects/intl-objects.h", + "src/objects/js-break-iterator.cc", + "src/objects/js-break-iterator.h", + "src/objects/js-break-iterator-inl.h", + "src/objects/js-collator.cc", + "src/objects/js-collator.h", + "src/objects/js-collator-inl.h", + "src/objects/js-date-time-format.cc", + "src/objects/js-date-time-format.h", + "src/objects/js-date-time-format-inl.h", + "src/objects/js-display-names.cc", + "src/objects/js-display-names.h", + "src/objects/js-display-names-inl.h", + "src/objects/js-list-format.cc", + "src/objects/js-list-format.h", + "src/objects/js-list-format-inl.h", + "src/objects/js-locale.cc", + "src/objects/js-locale.h", + "src/objects/js-locale-inl.h", + "src/objects/js-number-format.cc", + "src/objects/js-number-format.h", + "src/objects/js-number-format-inl.h", + "src/objects/js-plural-rules.cc", + "src/objects/js-plural-rules.h", + "src/objects/js-plural-rules-inl.h", + "src/objects/js-relative-time-format.cc", + "src/objects/js-relative-time-format.h", + "src/objects/js-relative-time-format-inl.h", + "src/objects/js-segment-iterator.cc", + "src/objects/js-segment-iterator.h", + "src/objects/js-segment-iterator-inl.h", + "src/objects/js-segmenter.cc", + "src/objects/js-segmenter.h", + "src/objects/js-segmenter-inl.h", + "src/objects/js-segments.cc", + "src/objects/js-segments.h", + "src/objects/js-segments-inl.h", + "src/runtime/runtime-intl.cc", + "src/strings/char-predicates.cc", + ], +) + filegroup( name = "v8_compiler_files", srcs = [ @@ -2526,14 +2573,13 @@ filegroup( ":is_v8_enable_webassembly": [ "src/compiler/int64-lowering.cc", "src/compiler/wasm-compiler.cc", + "src/compiler/wasm-escape-analysis.cc", "src/compiler/wasm-inlining.cc", ], "//conditions:default": [], }), ) - - filegroup( name = "v8_initializers_files", srcs = [ @@ -2605,10 +2651,10 @@ filegroup( "src/interpreter/interpreter-intrinsics-generator.cc", "src/interpreter/interpreter-intrinsics-generator.h", ] + select({ - ":is_ia32": ["src/builtins/ia32/builtins-ia32.cc"], - ":is_x64": ["src/builtins/x64/builtins-x64.cc"], - ":is_arm" : ["src/builtins/arm/builtins-arm.cc"], - ":is_arm64": ["src/builtins/arm64/builtins-arm64.cc"], + "@config//:v8_target_ia32": ["src/builtins/ia32/builtins-ia32.cc"], + "@config//:v8_target_x64": ["src/builtins/x64/builtins-x64.cc"], + "@config//:v8_target_arm": ["src/builtins/arm/builtins-arm.cc"], + "@config//:v8_target_arm64": ["src/builtins/arm64/builtins-arm64.cc"], }) + select({ ":is_v8_enable_webassembly": [ "src/builtins/builtins-wasm-gen.cc", @@ -2702,6 +2748,7 @@ filegroup( "src/heap/cppgc/task-handle.h", "src/heap/cppgc/trace-event.h", "src/heap/cppgc/trace-trait.cc", + "src/heap/cppgc/unmarker.h", "src/heap/cppgc/virtual-memory.cc", "src/heap/cppgc/virtual-memory.h", "src/heap/cppgc/visitor.cc", @@ -2720,19 +2767,22 @@ filegroup( "src/heap/base/worklist.h", "src/heap/cppgc/globals.h", ] + select({ - ":is_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"], - ":is_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"], - ":is_arm" : ["src/heap/base/asm/arm/push_registers_asm.cc"], - ":is_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"], + # Note these cannot be v8_target_is_* selects because these contain + # inline assembly that runs inside the executable. Since these are + # linked directly into mksnapshot, they must use the actual target cpu. + "@config//:is_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"], + "@config//:is_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"], + "@config//:is_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"], + "@config//:is_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"], }), ) filegroup( name = "v8_bigint", srcs = [ + "src/bigint/bigint.h", "src/bigint/bigint-internal.cc", "src/bigint/bigint-internal.h", - "src/bigint/bigint.h", "src/bigint/bitwise.cc", "src/bigint/digit-arithmetic.h", "src/bigint/div-barrett.cc", @@ -2755,6 +2805,7 @@ filegroup( filegroup( name = "mksnapshot_files", srcs = [ + "src/init/setup-isolate-full.cc", "src/snapshot/embedded/embedded-empty.cc", "src/snapshot/embedded/embedded-file-writer.cc", "src/snapshot/embedded/embedded-file-writer.h", @@ -2770,8 +2821,7 @@ filegroup( "src/snapshot/embedded/platform-embedded-file-writer-win.h", "src/snapshot/mksnapshot.cc", "src/snapshot/snapshot-empty.cc", - "src/init/setup-isolate-full.cc", - ] + ], ) filegroup( @@ -2803,6 +2853,8 @@ filegroup( "src/inspector/v8-debugger.h", "src/inspector/v8-debugger-agent-impl.cc", "src/inspector/v8-debugger-agent-impl.h", + "src/inspector/v8-debugger-id.cc", + "src/inspector/v8-debugger-id.h", "src/inspector/v8-debugger-script.cc", "src/inspector/v8-debugger-script.h", "src/inspector/v8-heap-profiler-agent-impl.cc", @@ -2825,8 +2877,6 @@ filegroup( "src/inspector/v8-string-conversions.h", "src/inspector/v8-value-utils.cc", "src/inspector/v8-value-utils.h", - "src/inspector/v8-debugger-id.h", - "src/inspector/v8-debugger-id.cc", "src/inspector/value-mirror.cc", "src/inspector/value-mirror.h", ":crdtp_platform_files", @@ -2848,7 +2898,6 @@ filegroup( srcs = [ "third_party/inspector_protocol/crdtp/cbor.cc", "third_party/inspector_protocol/crdtp/cbor.h", - "third_party/inspector_protocol/crdtp/maybe.h", "third_party/inspector_protocol/crdtp/dispatch.cc", "third_party/inspector_protocol/crdtp/dispatch.h", "third_party/inspector_protocol/crdtp/error_support.cc", @@ -2857,11 +2906,12 @@ filegroup( "third_party/inspector_protocol/crdtp/find_by_first.h", "third_party/inspector_protocol/crdtp/frontend_channel.h", "third_party/inspector_protocol/crdtp/glue.h", - "third_party/inspector_protocol/crdtp/protocol_core.h", - "third_party/inspector_protocol/crdtp/protocol_core.cc", "third_party/inspector_protocol/crdtp/json.cc", "third_party/inspector_protocol/crdtp/json.h", + "third_party/inspector_protocol/crdtp/maybe.h", "third_party/inspector_protocol/crdtp/parser_handler.h", + "third_party/inspector_protocol/crdtp/protocol_core.cc", + "third_party/inspector_protocol/crdtp/protocol_core.h", "third_party/inspector_protocol/crdtp/serializable.cc", "third_party/inspector_protocol/crdtp/serializable.h", "third_party/inspector_protocol/crdtp/serializer_traits.h", @@ -2873,12 +2923,39 @@ filegroup( ) filegroup( - name = "snapshot_files", + name = "noicu/snapshot_files", srcs = [ "src/init/setup-isolate-deserialize.cc", - # TODO(victorgomes): Create a flag to select pregenerated snapshots. - ":generated_snapshot_files", - ] + ] + select({ + "@config//:v8_target_arm": [ + "google3/snapshots/arm/noicu/embedded.S", + "google3/snapshots/arm/noicu/snapshot.cc", + ], + "@config//:v8_target_ia32": [ + "google3/snapshots/ia32/noicu/embedded.S", + "google3/snapshots/ia32/noicu/snapshot.cc", + ], + "@config//:v8_target_arm64": [":noicu/generated_snapshot_files"], + "@config//:v8_target_x64": [":noicu/generated_snapshot_files"], + }), +) + +filegroup( + name = "icu/snapshot_files", + srcs = [ + "src/init/setup-isolate-deserialize.cc", + ] + select({ + "@config//:v8_target_arm": [ + "google3/snapshots/arm/icu/embedded.S", + "google3/snapshots/arm/icu/snapshot.cc", + ], + "@config//:v8_target_ia32": [ + "google3/snapshots/ia32/icu/embedded.S", + "google3/snapshots/ia32/icu/snapshot.cc", + ], + "@config//:v8_target_arm64": [":icu/generated_snapshot_files"], + "@config//:v8_target_x64": [":icu/generated_snapshot_files"], + }), ) filegroup( @@ -2888,7 +2965,7 @@ filegroup( "src/wasm/c-api.h", "third_party/wasm-api/wasm.h", "third_party/wasm-api/wasm.hh", - ] + ], ) # ================================================= @@ -2899,7 +2976,13 @@ filegroup( # which needs class-debug-readers and debug-macros. v8_torque( name = "generated_torque_files", - srcs = [":torque_files"], + args = select({ + ":is_v8_annotate_torque_ir": ["-annotate-ir"], + "//conditions:default": [], + }) + select({ + "@config//:v8_target_is_32_bits": ["-m32"], + "//conditions:default": [], + }), extras = [ "bit-fields.h", "builtin-definitions.h", @@ -2922,18 +3005,13 @@ v8_torque( "objects-printer.cc", "visitor-lists.h", ], - args = select({ - ":is_v8_annotate_torque_ir": [ "-annotate-ir" ], - "//conditions:default": [], - }) + select({ - ":is_32bits": [ "-m32" ], - "//conditions:default": [], - }), + icu_srcs = [":icu/torque_files"], + noicu_srcs = [":noicu/torque_files"], ) genrule( name = "generated_inspector_files", - srcs = [ "include/js_protocol.pdl" ], + srcs = ["include/js_protocol.pdl"], outs = [ "include/inspector/Debugger.h", "include/inspector/Runtime.h", @@ -2954,9 +3032,38 @@ genrule( "src/inspector/protocol/Schema.cpp", "src/inspector/protocol/Schema.h", ], - message = "Generating inspector files", cmd = "bazel/generate-inspector-files.sh $(@D)", local = 1, + message = "Generating inspector files", +) + +filegroup( + name = "v8_common_libshared_files", + srcs = [ + ":torque_runtime_support_files", + ":v8_compiler_files", + ":v8_initializers_files", + ":v8_libplatform_files", + ":v8_libsampler_files", + ":v8_shared_internal_headers", + ], +) + +filegroup( + name = "d8_files", + srcs = [ + "src/d8/async-hooks-wrapper.cc", + "src/d8/async-hooks-wrapper.h", + "src/d8/d8.cc", + "src/d8/d8.h", + "src/d8/d8-console.cc", + "src/d8/d8-console.h", + "src/d8/d8-js.cc", + "src/d8/d8-platforms.cc", + "src/d8/d8-platforms.h", + "src/d8/d8-posix.cc", + "src/d8/d8-test.cc", + ], ) genrule( @@ -2978,75 +3085,126 @@ genrule( v8_mksnapshot( name = "generated_snapshot_files", args = select({ - ":is_v8_enable_verify_heap": [ "--verify-heap" ], + ":is_v8_enable_verify_heap": ["--verify-heap"], "//conditions:default": [], }) + select({ ":is_v8_enable_fast_mksnapshot": [ "--no-turbo-rewrite-far-jumps", "--no-turbo-verify-allocation", - ], + ], "//conditions:default": [], }) + select({ - ":is_v8_enable_snapshot_code_comments": [ "--code-comments" ], + ":is_v8_enable_snapshot_code_comments": ["--code-comments"], "//conditions:default": [], }) + select({ ":is_v8_enable_snapshot_native_code_counters": [ - "--native-code-counters" + "--native-code-counters", ], - "//conditions:default": [ "--no-native-code-counters" ], - }) + "//conditions:default": ["--no-native-code-counters"], + }), ) # ================================================= # Libraries rules # ================================================= +# NOTE: This allow headers to be accessed without the icu/noicu prefixes. +cc_library( + name = "icu/generated_torque_headers", + hdrs = [":icu/generated_torque_files"], + strip_include_prefix = "icu", +) + +cc_library( + name = "noicu/generated_torque_headers", + hdrs = [":noicu/generated_torque_files"], + strip_include_prefix = "noicu", +) + v8_library( name = "v8_libbase", srcs = [ - ":v8_shared_internal_headers", ":v8_libbase_files", + ":v8_shared_internal_headers", + ], +) + +cc_library( + name = "torque_base_headers", + hdrs = [ + "src/torque/kythe-data.h", + "src/torque/torque-compiler.h", + ], + include_prefix = "third_party/v8", + includes = ["."], + visibility = ["//visibility:public"], +) + +cc_library( + name = "torque_base", + srcs = [ + ":torque_base_files", + ], + copts = ["-fexceptions"], + features = ["-use_header_modules"], + visibility = ["//visibility:public"], + deps = [ + ":torque_base_headers", + ":v8_libbase", ], ) v8_library( name = "v8_libshared", srcs = [ - ":generated_torque_files", - ":torque_runtime_support_files", ":v8_base_without_compiler_files", - ":v8_compiler_files", - ":v8_initializers_files", - ":v8_libplatform_files", - ":v8_libsampler_files", - ":v8_shared_internal_headers", - ] + select({ - ":is_v8_enable_i18n_support": [ ":generated_regexp_special_case" ], - "//conditions:default": [], - }), + ":v8_common_libshared_files", + ], + icu_deps = [ + ":icu/generated_torque_headers", + "@icu", + ], + icu_srcs = [ + ":generated_regexp_special_case", + ":icu/generated_torque_files", + ":icu/v8_base_without_compiler_files", + ], + noicu_deps = [ + ":noicu/generated_torque_headers", + ], + noicu_srcs = [ + ":noicu/generated_torque_files", + ], deps = [ ":v8_libbase", "@zlib", - ] + select({ - ":is_v8_enable_i18n_support": [ "@icu" ], - "//conditions:default": [], - }), + ], ) v8_library( name = "v8", - srcs = [ - ":snapshot_files", - ":v8_inspector_files", - ], - deps = [ ":v8_libshared" ], + srcs = [":v8_inspector_files"], + icu_deps = [":icu/v8_libshared"], + icu_srcs = [":icu/snapshot_files"], + noicu_deps = [":noicu/v8_libshared"], + noicu_srcs = [":noicu/snapshot_files"], ) # TODO(victorgomes): Check if v8_enable_webassembly is true. v8_library( name = "wee8", - srcs = [ ":wee8_files" ], - deps = [ ":v8" ], + srcs = [":wee8_files"], + deps = [":noicu/v8"], +) + +alias( + name = "core_lib_noicu", + actual = "noicu/v8", +) + +alias( + name = "core_lib_icu", + actual = "icu/v8", ) # ================================================= @@ -3063,57 +3221,66 @@ v8_binary( "src/interpreter/bytecodes.cc", "src/interpreter/bytecodes.h", ], - deps = [ "v8_libbase" ], + deps = ["v8_libbase"], ) v8_binary( name = "regexp_special_case_generator", srcs = [ - ":v8_shared_internal_headers", - ":v8_libbase_files", - "src/regexp/special-case.h", "src/regexp/gen-regexp-special-case.cc", + "src/regexp/special-case.h", + ":v8_libbase_files", + ":v8_shared_internal_headers", + ], + defines = [ + "V8_INTL_SUPPORT", + "ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC", + # src/regexp/regexp-compiler-tonode.cc uses an unsafe ICU method and + # access a character implicitly. + "UNISTR_FROM_CHAR_EXPLICIT=", + ], + deps = [ + "@icu", ], - deps = [ "@icu" ], ) v8_binary( name = "torque", srcs = [ - ":torque_base_files", "src/torque/torque.cc", + ":torque_base_files", ], - copts = [ "-fexceptions" ], - features = [ "-use_header_modules" ], - deps = [ "v8_libbase" ], + copts = ["-fexceptions"], + features = ["-use_header_modules"], + linkopts = select({ + "@config//:is_android": ["-llog"], + "//conditions:default": [], + }), + deps = ["v8_libbase"], ) v8_binary( name = "mksnapshot", - srcs = [ ":mksnapshot_files" ], - deps = [ ":v8_libshared" ], + srcs = [":mksnapshot_files"], + icu_deps = [":icu/v8_libshared"], linkopts = select({ - "is_android": [ "-llog" ], + "@config//:is_android": ["-llog"], "//conditions:default": [], }), + noicu_deps = [":noicu/v8_libshared"], ) v8_binary( name = "d8", - srcs = [ - "src/d8/async-hooks-wrapper.cc", - "src/d8/async-hooks-wrapper.h", - "src/d8/d8.cc", - "src/d8/d8.h", - "src/d8/d8-console.cc", - "src/d8/d8-console.h", - "src/d8/d8-js.cc", - "src/d8/d8-platforms.cc", - "src/d8/d8-platforms.h", - "src/d8/d8-posix.cc", - "src/d8/d8-test.cc", - ], - deps = [ ":v8" ], + srcs = [":d8_files"], + icu_deps = [":icu/v8"], + noicu_deps = [":noicu/v8"], +) + +# This target forces torque to be compiled without pointer compression. +v8_binary_non_pointer_compression( + name = "torque_non_pointer_compression", + binary = "torque", ) # ================================================= @@ -3124,9 +3291,39 @@ v8_build_config( name = "v8_build_config", ) -# Runs mjunit with d8. +# Runs mjsunit with d8. +py_test( + name = "noicu/mjsunit", + size = "medium", + srcs = [ + "test/mjsunit/testcfg.py", + "tools/predictable_wrapper.py", + "tools/run-tests.py", + ] + glob(["tools/testrunner/**/*.py"]), + args = [ + "--no-sorting", + "--nopresubmit", + "--variant=google3_noicu", + "--outdir noicu", + "--verbose", + "mjsunit", + ], + data = [ + "//testing/pybase", + ":noicu/v8_build_config", + ":noicu/d8", + "test", + ] + glob(["test/**"]) + glob(["tools/**/*.js"]) + glob(["tools/**/*.mjs"]), + main = "tools/run-tests.py", + python_version = "PY3", + tags = [ + # Disable sanitizers, as they don't work in general in V8. + "nosan", + ], +) + py_test( - name = "mjsunit", + name = "icu/mjsunit", size = "medium", srcs = [ "test/mjsunit/testcfg.py", @@ -3136,19 +3333,19 @@ py_test( args = [ "--no-sorting", "--nopresubmit", - # TODO(victorgomes): Create a flag to pass the variant in the cmdline. - "--variant=default", - "--outdir bazel-bin", + "--variant=google3_icu", + "--outdir icu", + "--verbose", "mjsunit", ], data = [ - ":v8_build_config", - ":d8", + "//testing/pybase", + ":icu/v8_build_config", + ":icu/d8", "test", ] + glob(["test/**"]) + glob(["tools/**/*.js"]) + glob(["tools/**/*.mjs"]), main = "tools/run-tests.py", - # TODO(victorgomes): Move this to PY3. - python_version = "PY2", + python_version = "PY3", tags = [ # Disable sanitizers, as they don't work in general in V8. "nosan", diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index f491f2a4e64c6b..bca5b5356b20bc 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -343,13 +343,21 @@ declare_args() { # Enable global allocation site tracking. v8_allocation_site_tracking = true + # TODO(cbruni, v8:12302): Remove once API is migrated + # Enable legacy mode for ScriptOrModule's lifetime. By default it's a + # temporary object, if enabled it will be kept alive by the parent Script. + # This is only used by nodejs. + v8_scriptormodule_legacy_lifetime = false + # If enabled, the receiver is always included in the actual and formal # parameter count of function with JS linkage. # TODO(v8:11112): Remove once all architectures support the flag and it is # enabled unconditionally. v8_include_receiver_in_argc = v8_current_cpu == "x86" || v8_current_cpu == "x64" || - v8_current_cpu == "arm" || v8_current_cpu == "arm64" + v8_current_cpu == "arm" || v8_current_cpu == "arm64" || + v8_current_cpu == "mips64el" || v8_current_cpu == "mipsel" || + v8_current_cpu == "loong64" || v8_current_cpu == "riscv64" } # Derived defaults. @@ -456,9 +464,12 @@ if (v8_multi_arch_build && v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression } if (v8_enable_short_builtin_calls && - (!v8_enable_pointer_compression || v8_control_flow_integrity)) { + ((!v8_enable_pointer_compression && v8_current_cpu != "x64") || + v8_control_flow_integrity)) { # Disable short calls when pointer compression is not enabled. - # Or when CFI is enabled (until the CFI-related issues are fixed). + # Or when CFI is enabled (until the CFI-related issues are fixed), except x64, + # where short builtin calls can still be enabled if the code range is + # guaranteed to be close enough to embedded builtins. v8_enable_short_builtin_calls = false } if (v8_enable_shared_ro_heap == "") { @@ -473,10 +484,8 @@ if (build_with_chromium && v8_current_cpu == "arm64" && } # Enable the virtual memory cage on 64-bit Chromium builds. -if (build_with_chromium && - (v8_current_cpu == "arm64" || v8_current_cpu == "x64")) { - # The cage is incompatible with lsan. - v8_enable_virtual_memory_cage = !is_lsan +if (build_with_chromium && v8_enable_pointer_compression_shared_cage) { + v8_enable_virtual_memory_cage = true } assert(!v8_disable_write_barriers || v8_enable_single_generation, @@ -498,6 +507,9 @@ assert(!v8_enable_map_packing || !v8_enable_pointer_compression, assert(!v8_enable_map_packing || v8_current_cpu == "x64", "Map packing is only supported on x64") +assert(!v8_enable_external_code_space || v8_enable_pointer_compression, + "External code space feature requires pointer compression") + assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression, "V8 Heap Sandbox requires pointer compression") @@ -511,9 +523,6 @@ assert( !v8_enable_virtual_memory_cage || v8_enable_pointer_compression_shared_cage, "V8 VirtualMemoryCage requires the shared pointer compression cage") -assert(!v8_enable_virtual_memory_cage || !is_lsan, - "V8 VirtualMemoryCage is currently incompatible with Leak Sanitizer") - assert( !v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression, "Can't share a pointer compression cage if pointers aren't compressed") @@ -958,6 +967,9 @@ config("features") { if (v8_allocation_site_tracking) { defines += [ "V8_ALLOCATION_SITE_TRACKING" ] } + if (v8_scriptormodule_legacy_lifetime) { + defines += [ "V8_SCRIPTORMODULE_LEGACY_LIFETIME" ] + } if (v8_advanced_bigint_algorithms) { defines += [ "V8_ADVANCED_BIGINT_ALGORITHMS" ] } @@ -1742,6 +1754,7 @@ torque_files = [ "src/objects/js-proxy.tq", "src/objects/js-regexp-string-iterator.tq", "src/objects/js-regexp.tq", + "src/objects/js-temporal-objects.tq", "src/objects/js-weak-refs.tq", "src/objects/literal-objects.tq", "src/objects/map.tq", @@ -1771,6 +1784,7 @@ torque_files = [ "src/objects/template-objects.tq", "src/objects/templates.tq", "src/objects/torque-defined-classes.tq", + "src/objects/turbofan-types.tq", "test/torque/test-torque.tq", "third_party/v8/builtins/array-sort.tq", ] @@ -2200,6 +2214,7 @@ action("v8_dump_build_config") { "v8_enable_virtual_memory_cage=$v8_enable_virtual_memory_cage", "v8_enable_third_party_heap=$v8_enable_third_party_heap", "v8_enable_webassembly=$v8_enable_webassembly", + "v8_dict_property_const_tracking=$v8_dict_property_const_tracking", "v8_control_flow_integrity=$v8_control_flow_integrity", "v8_target_cpu=\"$v8_target_cpu\"", ] @@ -2693,8 +2708,6 @@ v8_header_set("v8_internal_headers") { "src/codegen/unoptimized-compilation-info.h", "src/common/assert-scope.h", "src/common/checks.h", - "src/common/external-pointer-inl.h", - "src/common/external-pointer.h", "src/common/message-template.h", "src/common/ptr-compr-inl.h", "src/common/ptr-compr.h", @@ -2860,7 +2873,6 @@ v8_header_set("v8_internal_headers") { "src/execution/arguments-inl.h", "src/execution/arguments.h", "src/execution/execution.h", - "src/execution/external-pointer-table.h", "src/execution/frame-constants.h", "src/execution/frames-inl.h", "src/execution/frames.h", @@ -3009,7 +3021,6 @@ v8_header_set("v8_internal_headers") { "src/init/setup-isolate.h", "src/init/startup-data-util.h", "src/init/v8.h", - "src/init/vm-cage.h", "src/interpreter/block-coverage-builder.h", "src/interpreter/bytecode-array-builder.h", "src/interpreter/bytecode-array-iterator.h", @@ -3139,6 +3150,8 @@ v8_header_set("v8_internal_headers") { "src/objects/js-regexp.h", "src/objects/js-segments-inl.h", "src/objects/js-segments.h", + "src/objects/js-temporal-objects-inl.h", + "src/objects/js-temporal-objects.h", "src/objects/js-weak-refs-inl.h", "src/objects/js-weak-refs.h", "src/objects/keys.h", @@ -3241,6 +3254,8 @@ v8_header_set("v8_internal_headers") { "src/objects/torque-defined-classes.h", "src/objects/transitions-inl.h", "src/objects/transitions.h", + "src/objects/turbofan-types-inl.h", + "src/objects/turbofan-types.h", "src/objects/type-hints.h", "src/objects/value-serializer.h", "src/objects/visitors-inl.h", @@ -3310,6 +3325,12 @@ v8_header_set("v8_internal_headers") { "src/roots/roots.h", "src/runtime/runtime-utils.h", "src/runtime/runtime.h", + "src/security/caged-pointer-inl.h", + "src/security/caged-pointer.h", + "src/security/external-pointer-inl.h", + "src/security/external-pointer-table.h", + "src/security/external-pointer.h", + "src/security/vm-cage.h", "src/snapshot/code-serializer.h", "src/snapshot/context-deserializer.h", "src/snapshot/context-serializer.h", @@ -3322,7 +3343,10 @@ v8_header_set("v8_internal_headers") { "src/snapshot/references.h", "src/snapshot/roots-serializer.h", "src/snapshot/serializer-deserializer.h", + "src/snapshot/serializer-inl.h", "src/snapshot/serializer.h", + "src/snapshot/shared-heap-deserializer.h", + "src/snapshot/shared-heap-serializer.h", "src/snapshot/snapshot-compression.h", "src/snapshot/snapshot-data.h", "src/snapshot/snapshot-source-sink.h", @@ -3396,6 +3420,7 @@ v8_header_set("v8_internal_headers") { "src/asmjs/asm-types.h", "src/compiler/int64-lowering.h", "src/compiler/wasm-compiler.h", + "src/compiler/wasm-escape-analysis.h", "src/compiler/wasm-inlining.h", "src/debug/debug-wasm-objects-inl.h", "src/debug/debug-wasm-objects.h", @@ -3424,6 +3449,7 @@ v8_header_set("v8_internal_headers") { "src/wasm/object-access.h", "src/wasm/signature-map.h", "src/wasm/simd-shuffle.h", + "src/wasm/stacks.h", "src/wasm/streaming-decoder.h", "src/wasm/struct-types.h", "src/wasm/value-type.h", @@ -3684,6 +3710,8 @@ v8_header_set("v8_internal_headers") { ] } else if (v8_current_cpu == "ppc64") { sources += [ ### gcmole(arch:ppc64) ### + "src/baseline/s390/baseline-assembler-s390-inl.h", + "src/baseline/s390/baseline-compiler-s390-inl.h", "src/codegen/ppc/assembler-ppc-inl.h", "src/codegen/ppc/assembler-ppc.h", "src/codegen/ppc/constants-ppc.h", @@ -3864,6 +3892,7 @@ if (v8_enable_webassembly) { v8_compiler_sources += [ "src/compiler/int64-lowering.cc", "src/compiler/wasm-compiler.cc", + "src/compiler/wasm-escape-analysis.cc", "src/compiler/wasm-inlining.cc", ] } @@ -3990,6 +4019,7 @@ v8_source_set("v8_base_without_compiler") { "src/builtins/builtins-sharedarraybuffer.cc", "src/builtins/builtins-string.cc", "src/builtins/builtins-symbol.cc", + "src/builtins/builtins-temporal.cc", "src/builtins/builtins-trace.cc", "src/builtins/builtins-typed-array.cc", "src/builtins/builtins-weak-refs.cc", @@ -4057,7 +4087,6 @@ v8_source_set("v8_base_without_compiler") { "src/diagnostics/unwinder.cc", "src/execution/arguments.cc", "src/execution/execution.cc", - "src/execution/external-pointer-table.cc", "src/execution/frames.cc", "src/execution/futex-emulation.cc", "src/execution/interrupts-scope.cc", @@ -4149,7 +4178,6 @@ v8_source_set("v8_base_without_compiler") { "src/init/isolate-allocator.cc", "src/init/startup-data-util.cc", "src/init/v8.cc", - "src/init/vm-cage.cc", "src/interpreter/bytecode-array-builder.cc", "src/interpreter/bytecode-array-iterator.cc", "src/interpreter/bytecode-array-random-iterator.cc", @@ -4318,6 +4346,8 @@ v8_source_set("v8_base_without_compiler") { "src/runtime/runtime-typedarray.cc", "src/runtime/runtime-weak-refs.cc", "src/runtime/runtime.cc", + "src/security/external-pointer-table.cc", + "src/security/vm-cage.cc", "src/snapshot/code-serializer.cc", "src/snapshot/context-deserializer.cc", "src/snapshot/context-serializer.cc", @@ -4329,6 +4359,8 @@ v8_source_set("v8_base_without_compiler") { "src/snapshot/roots-serializer.cc", "src/snapshot/serializer-deserializer.cc", "src/snapshot/serializer.cc", + "src/snapshot/shared-heap-deserializer.cc", + "src/snapshot/shared-heap-serializer.cc", "src/snapshot/snapshot-compression.cc", "src/snapshot/snapshot-data.cc", "src/snapshot/snapshot-source-sink.cc", @@ -5485,6 +5517,7 @@ v8_source_set("cppgc_base") { "src/heap/cppgc/sweeper.cc", "src/heap/cppgc/sweeper.h", "src/heap/cppgc/task-handle.h", + "src/heap/cppgc/unmarker.h", # TODO(v8:11952): Remove the testing header here once depending on both, # //v8:v8 and //v8:v8_for_testing does not result in ODR violations. diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS index dc831c0e977fab..add6b07ed60d9d 100644 --- a/deps/v8/COMMON_OWNERS +++ b/deps/v8/COMMON_OWNERS @@ -24,7 +24,6 @@ marja@chromium.org mlippautz@chromium.org mslekova@chromium.org mvstanton@chromium.org -neis@chromium.org nicohartmann@chromium.org omerkatz@chromium.org pthier@chromium.org diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 587b7e53759ce5..8d1be4a65833ae 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -6,9 +6,6 @@ use_relative_paths = True gclient_gn_args_file = 'build/config/gclient_args.gni' gclient_gn_args = [ - # TODO(https://crbug.com/1137662, https://crbug.com/1080854) - # Remove when migration is complete. - 'checkout_fuchsia_for_arm64_host', ] vars = { @@ -27,12 +24,6 @@ vars = { # Wildcards are supported (e.g. "qemu.*"). 'checkout_fuchsia_boot_images': "qemu.x64,qemu.arm64", - # TODO(https://crbug.com/1137662, https://crbug.com/1080854) - # Remove when migration is complete. - # By default, do not check out files required to run fuchsia tests in - # qemu on linux-arm64 machines. - 'checkout_fuchsia_for_arm64_host': False, - 'checkout_instrumented_libraries': False, 'checkout_ittapi': False, # Fetch clang-tidy into the same bin/ directory as our clang binary. @@ -49,10 +40,10 @@ vars = { 'reclient_version': 're_client_version:0.40.0.40ff5a5', # GN CIPD package version. - 'gn_version': 'git_revision:0153d369bbccc908f4da4993b1ba82728055926a', + 'gn_version': 'git_revision:8926696a4186279489cc2b8d768533e61bba73d7', # luci-go CIPD package version. - 'luci_go': 'git_revision:a373a19da0fbbbe81b2b684e3797260294393e40', + 'luci_go': 'git_revision:68355732afb00a422ae0c70eed95c6a45f9868b1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -81,7 +72,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_sources_version # and whatever else without interference from each other. - 'android_sdk_sources_version': 'n7svc8KYah-i4s8zwkVa85SI3_H0WFOniP0mpwNdFO0C', + 'android_sdk_sources_version': 'Yw53980aNNn0n9l58lN7u0wSVmxlY0OM1zFnGDQeJs4C', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_tools-lint_version # and whatever else without interference from each other. @@ -90,13 +81,13 @@ vars = { deps = { 'base/trace_event/common': - Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68d816952258c9d817bba656ee2664b35507f01b', + Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '7f36dbc19d31e2aad895c60261ca8f726442bfbb', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'ebad8533842661f66b9b905e0ee9890a32f628d5', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'cf325916d58a194a935c26a56fcf6b525d1e2bf4', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'a9bc3e283182a586998338a665c7eae17406ec54', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '80e4f838faaf50e18629ae630df1d421f255a62a', 'buildtools/clang_format/script': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99876cacf78329e5f99c244dbe42ccd1654517a0', 'buildtools/linux64': { 'packages': [ { @@ -120,9 +111,9 @@ deps = { 'buildtools/third_party/libc++/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '9959b06ccd7291269796e85c7c8f7b432af414bd', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '4c6e0991b109638204f08c93600b008c21f01da5', 'buildtools/third_party/libunwind/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'a002c725cf03e16d3bc47dd9b7962aa22f7ee1d9', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '99015718c37b30d44c3bcbcc92a03fb85fb85a99', 'buildtools/win': { 'packages': [ { @@ -148,14 +139,14 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '50dd431dffe5cf86e9064a652d6b01dbbe542cf0', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ba82d46238bd16c3e31b93d21d2846c81a9ccf7a', 'test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b', 'third_party/aemu-linux-x64': { 'packages': [ { 'package': 'fuchsia/third_party/aemu/linux-amd64', - 'version': 'FAd7QuRV-mCjbKgg2SO4BBlRCvGIsI672THjo3tEIZAC' + 'version': 'hys6gk1KOHMz9nURGWen255HiLIaVd3e4eZfa-w6l7oC' }, ], 'condition': 'host_os == "linux" and checkout_fuchsia', @@ -176,7 +167,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/android_platform': { - 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '7a11b799efba1cd679b4f5d14889465e9e1fb1f4', + 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '72e09e98a62744cd10b762bd438c702ed8b131fb', 'condition': 'checkout_android', }, 'third_party/android_sdk/public': { @@ -218,7 +209,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'c0b9d253fbf9a729be51d3890fa78be4b5eb3352', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '75c4ea8c6eef1d5941ec3d5cfee174e8d0f73566', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -226,20 +217,20 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '0e2fb336b2e7ddbbb9c5ab70eab25f82f55dff2b', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '57c928cd959aa46e9dbd6b0bc754888075b4a4c3', 'third_party/fuchsia-sdk': { 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '18896843130c33372c455c153ad07d2217bd2085', 'condition': 'checkout_fuchsia', }, 'third_party/google_benchmark/src': { - 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '0baacde3618ca617da95375e0af13ce1baadea47', + 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '4f31803ebbf283e24602260e39e63a296d44b0f8', }, 'third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '3b49be074d5c1340eeb447e6a8e78427051e675a', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '16f637fbf4ffc3f7a01fa4eceb7906634565242f', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '3f443830bd52d3aa5fab3c1aa2b6d0848bb5039d', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'eedbaf76e49d28465d9119b10c30b82906e606ff', 'third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '5df06a49fc485f3371e8ca2f4957dac4840ba3bb', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '3c149f5611237dc59a7ec229e8ea009d8be8f51d', 'third_party/ittapi': { # Force checkout ittapi libraries to pass v8 header includes check on # bots that has check_v8_header_includes enabled. @@ -247,7 +238,7 @@ deps = { 'condition': "checkout_ittapi or check_v8_header_includes", }, 'third_party/jinja2': - Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '6db8da1615a13fdfab925688bc4bf2eb394a73af', + Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'ee69aa00ee8536f61db6a451f3858745cf587de6', 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1', 'third_party/logdog/logdog': @@ -283,9 +274,9 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'dfa96e81458fb3b39676e45f7e9e000dff789b05', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '6da1d53b97c89b07e47714d88cab61f1ce003c68', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'c06edd1f455183fc89e9f8c2cf745db8f564d8ea', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '21baac0e13389b03d6f805701c75544ed0b1ebb0', 'tools/clang/dsymutil': { 'packages': [ { @@ -494,7 +485,7 @@ hooks = [ '--no_resume', '--no_auth', '--bucket', 'chromium-instrumented-libraries', - '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins-xenial.tgz.sha1', + '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins.tgz.sha1', ], }, { @@ -505,7 +496,7 @@ hooks = [ '--no_resume', '--no_auth', '--bucket', 'chromium-instrumented-libraries', - '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins-xenial.tgz.sha1', + '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins.tgz.sha1', ], }, { @@ -540,13 +531,13 @@ hooks = [ 'pattern': '.', # clang not supported on aix 'condition': 'host_os != "aix"', - 'action': ['python', 'tools/clang/scripts/update.py'], + 'action': ['python3', 'tools/clang/scripts/update.py'], }, { 'name': 'clang_tidy', 'pattern': '.', 'condition': 'checkout_clang_tidy', - 'action': ['python', 'tools/clang/scripts/update.py', + 'action': ['python3', 'tools/clang/scripts/update.py', '--package=clang-tidy'], }, { @@ -576,11 +567,11 @@ hooks = [ ], }, { - # Mac does not have llvm-objdump, download it for cross builds in Fuchsia. + # Mac does not have llvm-objdump, download it for cross builds in Fuchsia. 'name': 'llvm-objdump', 'pattern': '.', 'condition': 'host_os == "mac" and checkout_fuchsia', - 'action': ['python', 'tools/clang/scripts/update.py', + 'action': ['python3', 'tools/clang/scripts/update.py', '--package=objdump'], }, # Download and initialize "vpython" VirtualEnv environment packages. diff --git a/deps/v8/WORKSPACE b/deps/v8/WORKSPACE index 289902f68d857f..32fff02aab80b6 100644 --- a/deps/v8/WORKSPACE +++ b/deps/v8/WORKSPACE @@ -16,6 +16,12 @@ http_archive( load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace") bazel_skylib_workspace() +new_local_repository( + name = "config", + path = "bazel/config", + build_file = "bazel/config/BUILD.bazel", +) + new_local_repository( name = "zlib", path = "third_party/zlib", diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h index 9384adeb69f65d..1fd2283decc212 100644 --- a/deps/v8/base/trace_event/common/trace_event_common.h +++ b/deps/v8/base/trace_event/common/trace_event_common.h @@ -224,12 +224,6 @@ // variable a unique name based on the line number to prevent name collisions. #define INTERNAL_TRACE_EVENT_UID(name_prefix) PERFETTO_UID(name_prefix) -// Special trace event macro to trace task execution with the location where it -// was posted from. -// TODO(skyostil): Convert this into a regular typed trace event. -#define TRACE_TASK_EXECUTION(run_function, task) \ - INTERNAL_TRACE_TASK_EXECUTION(run_function, task) - // Special trace event macro to trace log messages. // TODO(skyostil): Convert this into a regular typed trace event. #define TRACE_LOG_MESSAGE(file, message, line) \ @@ -985,11 +979,6 @@ struct BASE_EXPORT TraceTimestampTraits<::base::TimeTicks> { category_group, name, id, \ TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) -// Special trace event macro to trace task execution with the location where it -// was posted from. -#define TRACE_TASK_EXECUTION(run_function, task) \ - INTERNAL_TRACE_TASK_EXECUTION(run_function, task) - // Special trace event macro to trace log messages. #define TRACE_LOG_MESSAGE(file, message, line) \ INTERNAL_TRACE_LOG_MESSAGE(file, message, line) diff --git a/deps/v8/bazel/BUILD.icu b/deps/v8/bazel/BUILD.icu index fd651d513ddf89..ea3860ac901b0a 100644 --- a/deps/v8/bazel/BUILD.icu +++ b/deps/v8/bazel/BUILD.icu @@ -26,7 +26,6 @@ cc_library( "U_ENABLE_RESOURCE_TRACING=0", "UNISTR_FROM_STRING_EXPLICIT=", "UNISTR_FROM_CHAR_EXPLICIT=", - "ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE", ], copts = [ "-Wno-unused-function", diff --git a/deps/v8/bazel/config/BUILD.bazel b/deps/v8/bazel/config/BUILD.bazel new file mode 100644 index 00000000000000..78dcdb14d5ec45 --- /dev/null +++ b/deps/v8/bazel/config/BUILD.bazel @@ -0,0 +1,109 @@ +# Copyright 2021 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +load("@bazel_skylib//lib:selects.bzl", "selects") +load( + ":v8-target-cpu.bzl", + "v8_configure_target_cpu", + "v8_target_cpu", +) + +package( + default_visibility = [ + "//visibility:public", + ], +) + +config_setting( + name = "platform_cpu_x64", + constraint_values = ["@platforms//cpu:x86_64"], +) + +config_setting( + name = "platform_cpu_ia32", + constraint_values = ["@platforms//cpu:x86_32"], +) + +config_setting( + name = "platform_cpu_arm64", + constraint_values = ["@platforms//cpu:arm"], +) + +config_setting( + name = "platform_cpu_arm", + constraint_values = ["@platforms//cpu:arm"], +) + +v8_target_cpu( + name = "v8_target_cpu", + build_setting_default = "none", +) + +config_setting( + name = "v8_host_target_is_none", + flag_values = { + ":v8_target_cpu": "none", + }, +) + +v8_configure_target_cpu( + name = "x64", + matching_configs = [":platform_cpu_x64"], +) + +v8_configure_target_cpu( + name = "ia32", + matching_configs = [":platform_cpu_ia32"], +) + +v8_configure_target_cpu( + name = "arm", + matching_configs = [":platform_cpu_arm64"], +) + +v8_configure_target_cpu( + name = "arm64", + matching_configs = [":platform_cpu_arm"], +) + +selects.config_setting_group( + name = "v8_target_is_32_bits", + match_any = [ + ":v8_target_ia32", + ":v8_target_arm", + ], +) + +# Running arm64 simulator on x64 host. +selects.config_setting_group( + name = "v8_arm64_simulator", + match_all = [ + ":v8_target_arm64", + ":is_x64", + ], +) + +config_setting( + name = "is_linux", + constraint_values = ["@platforms//os:linux"], +) + +config_setting( + name = "is_android", + constraint_values = ["@platforms//os:android"], +) + +config_setting( + name = "is_macos", + constraint_values = ["@platforms//os:macos"], +) + +selects.config_setting_group( + name = "is_posix", + match_any = [ + ":is_linux", + ":is_android", + ":is_macos", + ], +) diff --git a/deps/v8/bazel/config/v8-target-cpu.bzl b/deps/v8/bazel/config/v8-target-cpu.bzl new file mode 100644 index 00000000000000..2d5d241ebf45f4 --- /dev/null +++ b/deps/v8/bazel/config/v8-target-cpu.bzl @@ -0,0 +1,61 @@ +# Copyright 2021 the V8 project authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Build rules to choose the v8 target architecture.""" + +load("@bazel_skylib//lib:selects.bzl", "selects") + +V8CpuTypeInfo = provider( + doc = "A singleton provider that specifies the V8 target CPU type", + fields = { + "value": "The V8 Target CPU selected.", + }, +) + +def _host_target_cpu_impl(ctx): + allowed_values = ["arm", "arm64", "ia32", "x64", "none"] + cpu_type = ctx.build_setting_value + if cpu_type in allowed_values: + return V8CpuTypeInfo(value = cpu_type) + else: + fail("Error setting " + str(ctx.label) + ": invalid v8 target cpu '" + + cpu_type + "'. Allowed values are " + str(allowed_values)) + +v8_target_cpu = rule( + implementation = _host_target_cpu_impl, + build_setting = config.string(flag = True), + doc = "CPU that V8 will generate code for.", +) + +def v8_configure_target_cpu(name, matching_configs): + selects.config_setting_group( + name = "is_" + name, + match_any = matching_configs, + ) + + # If v8_target_cpu flag is set to 'name' + native.config_setting( + name = "v8_host_target_is_" + name, + flag_values = { + ":v8_target_cpu": name, + }, + ) + + # Default target if no v8_host_target flag is set. + selects.config_setting_group( + name = "v8_target_is_" + name, + match_all = [ + ":v8_host_target_is_none", + ":is_" + name, + ], + ) + + # Select either the default target or the flag. + selects.config_setting_group( + name = "v8_target_" + name, + match_any = [ + ":v8_host_target_is_" + name, + ":v8_target_is_" + name, + ], + ) diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl index 58fd53ed607e12..130e7be9eddd65 100644 --- a/deps/v8/bazel/defs.bzl +++ b/deps/v8/bazel/defs.bzl @@ -22,10 +22,6 @@ _create_option_int = rule( build_setting = config.int(flag = True), ) -def v8_raw_flag(name, default = False): - _create_option_flag(name = name, build_setting_default = default) - native.config_setting(name = "raw_" + name, flag_values = {name: "True"}) - def v8_flag(name, default = False): _create_option_flag(name = name, build_setting_default = default) native.config_setting(name = "is_" + name, flag_values = {name: "True"}) @@ -40,28 +36,30 @@ def v8_int(name, default = 0): def _custom_config_impl(ctx): defs = [] defs.append("V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP=" + - str(ctx.attr._v8_typed_array_max_size_in_heap[FlagInfo].value)) + str(ctx.attr._v8_typed_array_max_size_in_heap[FlagInfo].value)) context = cc_common.create_compilation_context(defines = depset(defs)) return [CcInfo(compilation_context = context)] v8_custom_config = rule( implementation = _custom_config_impl, attrs = { - "_v8_typed_array_max_size_in_heap": - attr.label(default = ":v8_typed_array_max_size_in_heap"), - } + "_v8_typed_array_max_size_in_heap": attr.label(default = ":v8_typed_array_max_size_in_heap"), + }, ) def _config_impl(ctx): hdrs = [] + # Add headers for h in ctx.attr.hdrs: hdrs += h[DefaultInfo].files.to_list() defs = [] + # Add conditional_defines for f, d in ctx.attr.conditional_defines.items(): if f[FlagInfo].value: defs.append(d) + # Add defines for d in ctx.attr.defines: defs.append(d) @@ -87,13 +85,14 @@ v8_config = rule( }, ) -def _default_args(configs): +def _default_args(): return struct( - deps = configs + [":define_flags"], + deps = [":define_flags"], copts = [ "-fPIC", "-Werror", "-Wextra", + "-Wno-bitwise-instead-of-logical", "-Wno-builtin-assume-aligned-alignment", "-Wno-unused-parameter", "-Wno-implicit-int-float-conversion", @@ -104,140 +103,248 @@ def _default_args(configs): ], includes = ["include"], linkopts = [ - "-pthread" + "-pthread", ] + select({ - ":is_macos": [], - "//conditions:default": [ "-Wl,--no-as-needed -ldl" ], + "@config//:is_macos": [], + "//conditions:default": ["-Wl,--no-as-needed -ldl"], }) + select({ - ":should_add_rdynamic": [ "-rdynamic" ], + ":should_add_rdynamic": ["-rdynamic"], "//conditions:default": [], }), ) +ENABLE_I18N_SUPPORT_DEFINES = [ + "-DV8_INTL_SUPPORT", + "-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC", + # src/regexp/regexp-compiler-tonode.cc uses an unsafe ICU method and + # access a character implicitly. + "-DUNISTR_FROM_CHAR_EXPLICIT=", +] + +def _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, icu_srcs, icu_deps): + return noicu_srcs != [] or noicu_deps != [] or icu_srcs != [] or icu_deps != [] + +# buildifier: disable=function-docstring def v8_binary( name, srcs, - configs = [], deps = [], includes = [], copts = [], linkopts = [], + noicu_srcs = [], + noicu_deps = [], + icu_srcs = [], + icu_deps = [], **kwargs): - default = _default_args(configs) - native.cc_binary( - name = name, - srcs = srcs, - deps = deps + default.deps, - includes = includes + default.includes, - copts = copts + default.copts, - linkopts = linkopts + default.linkopts, - **kwargs - ) + default = _default_args() + if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, icu_srcs, icu_deps): + native.cc_binary( + name = "noicu/" + name, + srcs = srcs + noicu_srcs, + deps = deps + noicu_deps + default.deps, + includes = includes + default.includes, + copts = copts + default.copts, + linkopts = linkopts + default.linkopts, + **kwargs + ) + native.cc_binary( + name = "icu/" + name, + srcs = srcs + icu_srcs, + deps = deps + icu_deps + default.deps, + includes = includes + default.includes, + copts = copts + default.copts + ENABLE_I18N_SUPPORT_DEFINES, + linkopts = linkopts + default.linkopts, + **kwargs + ) + else: + native.cc_binary( + name = name, + srcs = srcs, + deps = deps + default.deps, + includes = includes + default.includes, + copts = copts + default.copts, + linkopts = linkopts + default.linkopts, + **kwargs + ) +# buildifier: disable=function-docstring def v8_library( name, srcs, - configs = [], deps = [], includes = [], copts = [], linkopts = [], + noicu_srcs = [], + noicu_deps = [], + icu_srcs = [], + icu_deps = [], **kwargs): - default = _default_args(configs) - native.cc_library( - name = name, - srcs = srcs, - deps = deps + default.deps, - includes = includes + default.includes, - copts = copts + default.copts, - linkopts = linkopts + default.linkopts, - alwayslink = 1, - **kwargs - ) + default = _default_args() + if _should_emit_noicu_and_icu(noicu_srcs, noicu_deps, icu_srcs, icu_deps): + native.cc_library( + name = "noicu/" + name, + srcs = srcs + noicu_srcs, + deps = deps + noicu_deps + default.deps, + includes = includes + default.includes, + copts = copts + default.copts, + linkopts = linkopts + default.linkopts, + alwayslink = 1, + **kwargs + ) + native.cc_library( + name = "icu/" + name, + srcs = srcs + icu_srcs, + deps = deps + icu_deps + default.deps, + includes = includes + default.includes, + copts = copts + default.copts + ENABLE_I18N_SUPPORT_DEFINES, + linkopts = linkopts + default.linkopts, + alwayslink = 1, + **kwargs + ) + else: + native.cc_library( + name = name, + srcs = srcs, + deps = deps + default.deps, + includes = includes + default.includes, + copts = copts + default.copts, + linkopts = linkopts + default.linkopts, + alwayslink = 1, + **kwargs + ) def _torque_impl(ctx): - v8root = ctx.attr.v8root[FlagInfo].value + v8root = "." + prefix = ctx.attr.prefix + # Arguments args = [] args += ctx.attr.args args.append("-o") - args.append(ctx.bin_dir.path + "/torque-generated") + args.append(ctx.bin_dir.path + "/" + v8root + "/" + ctx.attr.prefix + "/torque-generated") args.append("-strip-v8-root") args.append("-v8-root") args.append(v8root) + # Sources args += [f.path for f in ctx.files.srcs] + # Generate/declare output files outs = [] for src in ctx.files.srcs: root, period, ext = src.path.rpartition(".") + # Strip v8root if root[:len(v8root)] == v8root: root = root[len(v8root):] - file = "torque-generated/" + root + file = ctx.attr.prefix + "/torque-generated/" + root outs.append(ctx.actions.declare_file(file + "-tq-csa.cc")) outs.append(ctx.actions.declare_file(file + "-tq-csa.h")) outs.append(ctx.actions.declare_file(file + "-tq-inl.inc")) outs.append(ctx.actions.declare_file(file + "-tq.inc")) outs.append(ctx.actions.declare_file(file + "-tq.cc")) - outs += [ctx.actions.declare_file("torque-generated/" + f) for f in ctx.attr.extras] + outs += [ctx.actions.declare_file(ctx.attr.prefix + "/torque-generated/" + f) for f in ctx.attr.extras] ctx.actions.run( outputs = outs, inputs = ctx.files.srcs, arguments = args, executable = ctx.executable.tool, + mnemonic = "GenTorque", progress_message = "Generating Torque files", ) return [DefaultInfo(files = depset(outs))] -v8_torque = rule( +_v8_torque = rule( implementation = _torque_impl, + # cfg = v8_target_cpu_transition, attrs = { + "prefix": attr.string(mandatory = True), "srcs": attr.label_list(allow_files = True, mandatory = True), "extras": attr.string_list(), "tool": attr.label( - default = ":torque", allow_files = True, executable = True, - cfg = "host", + cfg = "exec", ), "args": attr.string_list(), "v8root": attr.label(default = ":v8_root"), }, ) +def v8_torque(name, noicu_srcs, icu_srcs, args, extras): + _v8_torque( + name = "noicu/" + name, + prefix = "noicu", + srcs = noicu_srcs, + args = args, + extras = extras, + tool = select({ + "@config//:v8_target_is_32_bits": ":torque_non_pointer_compression", + "//conditions:default": ":torque", + }), + ) + _v8_torque( + name = "icu/" + name, + prefix = "icu", + srcs = icu_srcs, + args = args, + extras = extras, + tool = select({ + "@config//:v8_target_is_32_bits": ":torque_non_pointer_compression", + "//conditions:default": ":torque", + }), + ) + def _mksnapshot(ctx): outs = [ - ctx.actions.declare_file("snapshot.cc"), - ctx.actions.declare_file("embedded.S"), + ctx.actions.declare_file(ctx.attr.prefix + "/snapshot.cc"), + ctx.actions.declare_file(ctx.attr.prefix + "/embedded.S"), ] ctx.actions.run( outputs = outs, inputs = [], arguments = [ "--embedded_variant=Default", - "--startup_src", outs[0].path, - "--embedded_src", outs[1].path, + "--startup_src", + outs[0].path, + "--embedded_src", + outs[1].path, ] + ctx.attr.args, executable = ctx.executable.tool, - progress_message = "Running mksnapshot" + progress_message = "Running mksnapshot", ) return [DefaultInfo(files = depset(outs))] - -v8_mksnapshot = rule( +_v8_mksnapshot = rule( implementation = _mksnapshot, attrs = { "args": attr.string_list(), "tool": attr.label( - default = ":mksnapshot", + mandatory = True, allow_files = True, executable = True, - cfg = "host", + cfg = "exec", ), - } + "prefix": attr.string(mandatory = True), + }, ) +def v8_mksnapshot(name, args): + _v8_mksnapshot( + name = "noicu/" + name, + args = args, + prefix = "noicu", + tool = ":noicu/mksnapshot", + ) + _v8_mksnapshot( + name = "icu/" + name, + args = args, + prefix = "icu", + tool = ":icu/mksnapshot", + ) + def _quote(val): if val[0] == '"' and val[-1] == '"': fail("String", val, "already quoted") @@ -255,11 +362,8 @@ def _json(kv_pairs): content += "}\n" return content -# TODO(victorgomes): Create a rule (instead of a macro), that can -# dynamically populate the build config. -def v8_build_config(name): - cpu = _quote("x64") - content = _json([ +def build_config_content(cpu, icu): + return _json([ ("current_cpu", cpu), ("dcheck_always_on", "false"), ("is_android", "false"), @@ -275,10 +379,11 @@ def v8_build_config(name): ("is_ubsan_vptr", "false"), ("target_cpu", cpu), ("v8_current_cpu", cpu), + ("v8_dict_property_const_tracking", "false"), ("v8_enable_atomic_marking_state", "false"), ("v8_enable_atomic_object_field_writes", "false"), ("v8_enable_concurrent_marking", "false"), - ("v8_enable_i18n_support", "true"), + ("v8_enable_i18n_support", icu), ("v8_enable_verify_predictable", "false"), ("v8_enable_verify_csa", "false"), ("v8_enable_lite_mode", "false"), @@ -289,10 +394,21 @@ def v8_build_config(name): ("v8_enable_webassembly", "false"), ("v8_control_flow_integrity", "false"), ("v8_enable_single_generation", "false"), + ("v8_enable_virtual_memory_cage", "false"), ("v8_target_cpu", cpu), ]) + +# TODO(victorgomes): Create a rule (instead of a macro), that can +# dynamically populate the build config. +def v8_build_config(name): + cpu = _quote("x64") + native.genrule( + name = "noicu/" + name, + outs = ["noicu/" + name + ".json"], + cmd = "echo '" + build_config_content(cpu, "false") + "' > \"$@\"", + ) native.genrule( - name = name, - outs = [name + ".json"], - cmd = "echo '" + content + "' > \"$@\"", + name = "icu/" + name, + outs = ["icu/" + name + ".json"], + cmd = "echo '" + build_config_content(cpu, "true") + "' > \"$@\"", ) diff --git a/deps/v8/bazel/v8-non-pointer-compression.bzl b/deps/v8/bazel/v8-non-pointer-compression.bzl new file mode 100644 index 00000000000000..4f1c6bc003372e --- /dev/null +++ b/deps/v8/bazel/v8-non-pointer-compression.bzl @@ -0,0 +1,59 @@ +def _v8_disable_pointer_compression(settings, attr): + return { + "//third_party/v8/HEAD:v8_enable_pointer_compression": "False", + } + +v8_disable_pointer_compression = transition( + implementation = _v8_disable_pointer_compression, + inputs = [], + outputs = ["//third_party/v8/HEAD:v8_enable_pointer_compression"], +) + +# The implementation of transition_rule: all this does is copy the +# cc_binary's output to its own output and propagate its runfiles +# and executable to use for "$ bazel run". +# +# This makes transition_rule as close to a pure wrapper of cc_binary +# as possible. +def _v8_binary_non_pointer_compression(ctx): + binary = ctx.attr.binary[0] + outfile = ctx.actions.declare_file(ctx.label.name) + cc_binary_outfile = binary[DefaultInfo].files.to_list()[0] + + ctx.actions.run_shell( + inputs = [cc_binary_outfile], + outputs = [outfile], + command = "cp %s %s" % (cc_binary_outfile.path, outfile.path), + ) + return [ + DefaultInfo( + executable = outfile, + data_runfiles = binary[DefaultInfo].data_runfiles, + ), + ] + +# The purpose of this rule is to transition to a config where v8_target_cpu is +# set to the appropriate architecture, which will remain in place through exec +# transitions, so mksnapshot can for instance build on x64 but for arm64. +v8_binary_non_pointer_compression = rule( + implementation = _v8_binary_non_pointer_compression, + attrs = { + # This is the cc_binary whose deps will select() on that feature. + # Note specificaly how it's configured with v8_target_cpu_transition, which + # ensures that setting propagates down the graph. + "binary": attr.label(cfg = v8_disable_pointer_compression), + # This is a stock Bazel requirement for any rule that uses Starlark + # transitions. It's okay to copy the below verbatim for all such rules. + # + # The purpose of this requirement is to give the ability to restrict + # which packages can invoke these rules, since Starlark transitions + # make much larger graphs possible that can have memory and performance + # consequences for your build. The whitelist defaults to "everything". + # But you can redefine it more strictly if you feel that's prudent. + "_allowlist_function_transition": attr.label( + default = "//tools/allowlists/function_transition_allowlist", + ), + }, + # Making this executable means it works with "$ bazel run". + executable = True, +) diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h index a3112dd61fbbdd..69883fb34d1e46 100644 --- a/deps/v8/include/cppgc/allocation.h +++ b/deps/v8/include/cppgc/allocation.h @@ -18,6 +18,23 @@ #include "cppgc/type-traits.h" #include "v8config.h" // NOLINT(build/include_directory) +#if defined(__has_attribute) +#if __has_attribute(assume_aligned) +#define CPPGC_DEFAULT_ALIGNED \ + __attribute__((assume_aligned(api_constants::kDefaultAlignment))) +#define CPPGC_DOUBLE_WORD_ALIGNED \ + __attribute__((assume_aligned(2 * api_constants::kDefaultAlignment))) +#endif // __has_attribute(assume_aligned) +#endif // defined(__has_attribute) + +#if !defined(CPPGC_DEFAULT_ALIGNED) +#define CPPGC_DEFAULT_ALIGNED +#endif + +#if !defined(CPPGC_DOUBLE_WORD_ALIGNED) +#define CPPGC_DOUBLE_WORD_ALIGNED +#endif + namespace cppgc { /** @@ -27,6 +44,9 @@ class AllocationHandle; namespace internal { +// Similar to C++17 std::align_val_t; +enum class AlignVal : size_t {}; + class V8_EXPORT MakeGarbageCollectedTraitInternal { protected: static inline void MarkObjectAsFullyConstructed(const void* payload) { @@ -45,32 +65,72 @@ class V8_EXPORT MakeGarbageCollectedTraitInternal { atomic_mutable_bitfield->store(value, std::memory_order_release); } - template - struct SpacePolicy { - static void* Allocate(AllocationHandle& handle, size_t size) { - // Custom space. + // Dispatch based on compile-time information. + // + // Default implementation is for a custom space with >`kDefaultAlignment` byte + // alignment. + template + struct AllocationDispatcher final { + static void* Invoke(AllocationHandle& handle, size_t size) { static_assert(std::is_base_of::value, "Custom space must inherit from CustomSpaceBase."); + static_assert( + !CustomSpace::kSupportsCompaction, + "Custom spaces that support compaction do not support allocating " + "objects with non-default (i.e. word-sized) alignment."); return MakeGarbageCollectedTraitInternal::Allocate( - handle, size, internal::GCInfoTrait::Index(), - CustomSpace::kSpaceIndex); + handle, size, static_cast(alignment), + internal::GCInfoTrait::Index(), CustomSpace::kSpaceIndex); + } + }; + + // Fast path for regular allocations for the default space with + // `kDefaultAlignment` byte alignment. + template + struct AllocationDispatcher + final { + static void* Invoke(AllocationHandle& handle, size_t size) { + return MakeGarbageCollectedTraitInternal::Allocate( + handle, size, internal::GCInfoTrait::Index()); } }; - template - struct SpacePolicy { - static void* Allocate(AllocationHandle& handle, size_t size) { - // Default space. + // Default space with >`kDefaultAlignment` byte alignment. + template + struct AllocationDispatcher final { + static void* Invoke(AllocationHandle& handle, size_t size) { return MakeGarbageCollectedTraitInternal::Allocate( - handle, size, internal::GCInfoTrait::Index()); + handle, size, static_cast(alignment), + internal::GCInfoTrait::Index()); + } + }; + + // Custom space with `kDefaultAlignment` byte alignment. + template + struct AllocationDispatcher + final { + static void* Invoke(AllocationHandle& handle, size_t size) { + static_assert(std::is_base_of::value, + "Custom space must inherit from CustomSpaceBase."); + return MakeGarbageCollectedTraitInternal::Allocate( + handle, size, internal::GCInfoTrait::Index(), + CustomSpace::kSpaceIndex); } }; private: - static void* Allocate(cppgc::AllocationHandle& handle, size_t size, - GCInfoIndex index); - static void* Allocate(cppgc::AllocationHandle& handle, size_t size, - GCInfoIndex index, CustomSpaceIndex space_index); + static void* CPPGC_DEFAULT_ALIGNED Allocate(cppgc::AllocationHandle&, size_t, + GCInfoIndex); + static void* CPPGC_DOUBLE_WORD_ALIGNED Allocate(cppgc::AllocationHandle&, + size_t, AlignVal, + GCInfoIndex); + static void* CPPGC_DEFAULT_ALIGNED Allocate(cppgc::AllocationHandle&, size_t, + GCInfoIndex, CustomSpaceIndex); + static void* CPPGC_DOUBLE_WORD_ALIGNED Allocate(cppgc::AllocationHandle&, + size_t, AlignVal, GCInfoIndex, + CustomSpaceIndex); friend class HeapObjectHeader; }; @@ -109,10 +169,18 @@ class MakeGarbageCollectedTraitBase std::is_base_of::value, "U of GarbageCollected must be a base of T. Check " "GarbageCollected base class inheritance."); - return SpacePolicy< + static constexpr size_t kWantedAlignment = + alignof(T) < internal::api_constants::kDefaultAlignment + ? internal::api_constants::kDefaultAlignment + : alignof(T); + static_assert( + kWantedAlignment <= internal::api_constants::kMaxSupportedAlignment, + "Requested alignment larger than alignof(std::max_align_t) bytes. " + "Please file a bug to possibly get this restriction lifted."); + return AllocationDispatcher< typename internal::GCInfoFolding< T, typename T::ParentMostGarbageCollectedType>::ResultType, - typename SpaceTrait::Space>::Allocate(handle, size); + typename SpaceTrait::Space, kWantedAlignment>::Invoke(handle, size); } /** @@ -236,4 +304,7 @@ V8_INLINE T* MakeGarbageCollected(AllocationHandle& handle, } // namespace cppgc +#undef CPPGC_DEFAULT_ALIGNED +#undef CPPGC_DOUBLE_WORD_ALIGNED + #endif // INCLUDE_CPPGC_ALLOCATION_H_ diff --git a/deps/v8/include/cppgc/internal/api-constants.h b/deps/v8/include/cppgc/internal/api-constants.h index 7253a47089352c..791039f1ee1326 100644 --- a/deps/v8/include/cppgc/internal/api-constants.h +++ b/deps/v8/include/cppgc/internal/api-constants.h @@ -39,6 +39,11 @@ constexpr size_t kCagedHeapReservationSize = static_cast(4) * kGB; constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize; #endif +static constexpr size_t kDefaultAlignment = sizeof(void*); + +// Maximum support alignment for a type as in `alignof(T)`. +static constexpr size_t kMaxSupportedAlignment = 2 * kDefaultAlignment; + } // namespace api_constants } // namespace internal diff --git a/deps/v8/include/cppgc/internal/persistent-node.h b/deps/v8/include/cppgc/internal/persistent-node.h index 1fea667848b30d..68a8096cb66c6e 100644 --- a/deps/v8/include/cppgc/internal/persistent-node.h +++ b/deps/v8/include/cppgc/internal/persistent-node.h @@ -20,6 +20,7 @@ class Visitor; namespace internal { class CrossThreadPersistentRegion; +class FatalOutOfMemoryHandler; // PersistentNode represents a variant of two states: // 1) traceable node with a back pointer to the Persistent object; @@ -79,7 +80,7 @@ class V8_EXPORT PersistentRegionBase { using PersistentNodeSlots = std::array; public: - PersistentRegionBase() = default; + explicit PersistentRegionBase(const FatalOutOfMemoryHandler& oom_handler); // Clears Persistent fields to avoid stale pointers after heap teardown. ~PersistentRegionBase(); @@ -89,6 +90,7 @@ class V8_EXPORT PersistentRegionBase { PersistentNode* AllocateNode(void* owner, TraceCallback trace) { if (!free_list_head_) { EnsureNodeSlots(); + CPPGC_DCHECK(free_list_head_); } PersistentNode* node = free_list_head_; free_list_head_ = free_list_head_->FreeListNext(); @@ -122,6 +124,7 @@ class V8_EXPORT PersistentRegionBase { std::vector> nodes_; PersistentNode* free_list_head_ = nullptr; size_t nodes_in_use_ = 0; + const FatalOutOfMemoryHandler& oom_handler_; friend class CrossThreadPersistentRegion; }; @@ -130,7 +133,7 @@ class V8_EXPORT PersistentRegionBase { // freeing happens only on the thread that created the region. class V8_EXPORT PersistentRegion final : public PersistentRegionBase { public: - PersistentRegion(); + explicit PersistentRegion(const FatalOutOfMemoryHandler&); // Clears Persistent fields to avoid stale pointers after heap teardown. ~PersistentRegion() = default; @@ -138,21 +141,17 @@ class V8_EXPORT PersistentRegion final : public PersistentRegionBase { PersistentRegion& operator=(const PersistentRegion&) = delete; V8_INLINE PersistentNode* AllocateNode(void* owner, TraceCallback trace) { -#if V8_ENABLE_CHECKS - CheckIsCreationThread(); -#endif // V8_ENABLE_CHECKS + CPPGC_DCHECK(IsCreationThread()); return PersistentRegionBase::AllocateNode(owner, trace); } V8_INLINE void FreeNode(PersistentNode* node) { -#if V8_ENABLE_CHECKS - CheckIsCreationThread(); -#endif // V8_ENABLE_CHECKS + CPPGC_DCHECK(IsCreationThread()); PersistentRegionBase::FreeNode(node); } private: - void CheckIsCreationThread(); + bool IsCreationThread(); int creation_thread_id_; }; @@ -172,7 +171,7 @@ class V8_EXPORT PersistentRegionLock final { class V8_EXPORT CrossThreadPersistentRegion final : protected PersistentRegionBase { public: - CrossThreadPersistentRegion() = default; + explicit CrossThreadPersistentRegion(const FatalOutOfMemoryHandler&); // Clears Persistent fields to avoid stale pointers after heap teardown. ~CrossThreadPersistentRegion(); diff --git a/deps/v8/include/cppgc/internal/pointer-policies.h b/deps/v8/include/cppgc/internal/pointer-policies.h index 7c4f4a0862a67f..853d7031530979 100644 --- a/deps/v8/include/cppgc/internal/pointer-policies.h +++ b/deps/v8/include/cppgc/internal/pointer-policies.h @@ -92,19 +92,19 @@ class DisabledCheckingPolicy { void CheckPointer(const void*) {} }; -#if V8_ENABLE_CHECKS +#ifdef DEBUG // Off heap members are not connected to object graph and thus cannot ressurect // dead objects. using DefaultMemberCheckingPolicy = SameThreadEnabledCheckingPolicy; using DefaultPersistentCheckingPolicy = SameThreadEnabledCheckingPolicy; -#else +#else // !DEBUG using DefaultMemberCheckingPolicy = DisabledCheckingPolicy; using DefaultPersistentCheckingPolicy = DisabledCheckingPolicy; -#endif +#endif // !DEBUG // For CT(W)P neither marking information (for value), nor objectstart bitmap -// (for slot) are guaranteed to be present because there's no synchonization +// (for slot) are guaranteed to be present because there's no synchronization // between heaps after marking. using DefaultCrossThreadPersistentCheckingPolicy = DisabledCheckingPolicy; diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h index 00de81df887fc4..fb79bcfe40784c 100644 --- a/deps/v8/include/libplatform/libplatform.h +++ b/deps/v8/include/libplatform/libplatform.h @@ -95,7 +95,7 @@ V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform, * The |platform| has to be created using |NewDefaultPlatform|. * */ -V8_DEPRECATE_SOON("Access the DefaultPlatform directly") +V8_DEPRECATED("Access the DefaultPlatform directly") V8_PLATFORM_EXPORT void SetTracingController( v8::Platform* platform, v8::platform::tracing::TracingController* tracing_controller); diff --git a/deps/v8/include/v8-context.h b/deps/v8/include/v8-context.h index bd28c6c9c935d1..d398ac4b21baad 100644 --- a/deps/v8/include/v8-context.h +++ b/deps/v8/include/v8-context.h @@ -318,7 +318,7 @@ class V8_EXPORT Context : public Data { * stack may be allocated separately from the native stack. See also * |TryCatch::JSStackComparableAddressPrivate| for details. */ - V8_DEPRECATE_SOON( + V8_DEPRECATED( "This is private V8 information that should not be exposed in the API.") uintptr_t JSStackComparableAddress() const { return JSStackComparableAddressPrivate(); diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h index 813e0842fa7cf7..64b42c2b48b3af 100644 --- a/deps/v8/include/v8-cppgc.h +++ b/deps/v8/include/v8-cppgc.h @@ -195,9 +195,11 @@ class V8_EXPORT JSHeapConsistency final { * \returns whether a write barrier is needed and which barrier to invoke. */ template + V8_DEPRECATE_SOON("Write barriers automatically emitted by TracedReference.") static V8_INLINE WriteBarrierType - GetWriteBarrierType(const TracedReferenceBase& ref, - WriteBarrierParams& params, HeapHandleCallback callback) { + GetWriteBarrierType(const TracedReferenceBase& ref, + WriteBarrierParams& params, + HeapHandleCallback callback) { if (ref.IsEmpty()) return WriteBarrierType::kNone; if (V8_LIKELY(!cppgc::internal::WriteBarrier:: @@ -251,6 +253,7 @@ class V8_EXPORT JSHeapConsistency final { * \param params The parameters retrieved from `GetWriteBarrierType()`. * \param ref The reference being written to. */ + V8_DEPRECATE_SOON("Write barriers automatically emitted by TracedReference.") static V8_INLINE void DijkstraMarkingBarrier(const WriteBarrierParams& params, cppgc::HeapHandle& heap_handle, const TracedReferenceBase& ref) { @@ -280,6 +283,7 @@ class V8_EXPORT JSHeapConsistency final { * \param params The parameters retrieved from `GetWriteBarrierType()`. * \param ref The reference being written to. */ + V8_DEPRECATE_SOON("Write barriers automatically emitted by TracedReference.") static V8_INLINE void GenerationalBarrier(const WriteBarrierParams& params, const TracedReferenceBase& ref) {} @@ -318,8 +322,13 @@ namespace cppgc { template struct TraceTrait> { - static void Trace(Visitor* visitor, const v8::TracedReference* self) { - static_cast(visitor)->Trace(*self); + static cppgc::TraceDescriptor GetTraceDescriptor(const void* self) { + return {nullptr, Trace}; + } + + static void Trace(Visitor* visitor, const void* self) { + static_cast(visitor)->Trace( + *static_cast*>(self)); } }; diff --git a/deps/v8/include/v8-embedder-heap.h b/deps/v8/include/v8-embedder-heap.h index 501a4fc523b78f..c3e5ddc16c7d5c 100644 --- a/deps/v8/include/v8-embedder-heap.h +++ b/deps/v8/include/v8-embedder-heap.h @@ -127,7 +127,7 @@ class V8_EXPORT EmbedderHeapTracer { /** * Called by the embedder to notify V8 of an empty execution stack. */ - V8_DEPRECATE_SOON( + V8_DEPRECATED( "This call only optimized internal caches which V8 is able to figure out " "on its own now.") void NotifyEmptyEmbedderStack(); diff --git a/deps/v8/include/v8-exception.h b/deps/v8/include/v8-exception.h index add882da4c47e6..faa46487f8fb31 100644 --- a/deps/v8/include/v8-exception.h +++ b/deps/v8/include/v8-exception.h @@ -169,7 +169,7 @@ class V8_EXPORT TryCatch { */ void SetCaptureMessage(bool value); - V8_DEPRECATE_SOON( + V8_DEPRECATED( "This is private information that should not be exposed by the API") static void* JSStackComparableAddress(TryCatch* handler) { if (handler == nullptr) return nullptr; diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h index cf90695785393b..a6c1b27353168b 100644 --- a/deps/v8/include/v8-fast-api-calls.h +++ b/deps/v8/include/v8-fast-api-calls.h @@ -460,12 +460,6 @@ class V8_EXPORT CFunction { return ArgUnwrap::Make(func); } - template - V8_DEPRECATED("Use CFunctionBuilder instead.") - static CFunction MakeWithFallbackSupport(F* func) { - return ArgUnwrap::Make(func); - } - CFunction(const void* address, const CFunctionInfo* type_info); private: @@ -684,17 +678,19 @@ struct TypeInfoHelper { #define STATIC_ASSERT_IMPLIES(COND, ASSERTION, MSG) \ static_assert(((COND) == 0) || (ASSERTION), MSG) +} // namespace internal + template -class CTypeInfoBuilder { +class V8_EXPORT CTypeInfoBuilder { public: using BaseType = T; static constexpr CTypeInfo Build() { constexpr CTypeInfo::Flags kFlags = - MergeFlags(TypeInfoHelper::Flags(), Flags...); - constexpr CTypeInfo::Type kType = TypeInfoHelper::Type(); + MergeFlags(internal::TypeInfoHelper::Flags(), Flags...); + constexpr CTypeInfo::Type kType = internal::TypeInfoHelper::Type(); constexpr CTypeInfo::SequenceType kSequenceType = - TypeInfoHelper::SequenceType(); + internal::TypeInfoHelper::SequenceType(); STATIC_ASSERT_IMPLIES( uint8_t(kFlags) & uint8_t(CTypeInfo::Flags::kAllowSharedBit), @@ -722,8 +718,8 @@ class CTypeInfoBuilder { "TypedArrays are only supported from primitive types or void."); // Return the same type with the merged flags. - return CTypeInfo(TypeInfoHelper::Type(), - TypeInfoHelper::SequenceType(), kFlags); + return CTypeInfo(internal::TypeInfoHelper::Type(), + internal::TypeInfoHelper::SequenceType(), kFlags); } private: @@ -735,6 +731,7 @@ class CTypeInfoBuilder { static constexpr CTypeInfo::Flags MergeFlags() { return CTypeInfo::Flags(0); } }; +namespace internal { template class CFunctionBuilderWithFunction { public: @@ -864,24 +861,28 @@ bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer( Local src, T* dst, uint32_t max_length); template <> -bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< - internal::CTypeInfoBuilder::Build().GetId(), int32_t>( - Local src, int32_t* dst, uint32_t max_length); +bool V8_EXPORT V8_WARN_UNUSED_RESULT +TryToCopyAndConvertArrayToCppBuffer::Build().GetId(), + int32_t>(Local src, int32_t* dst, + uint32_t max_length); template <> -bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< - internal::CTypeInfoBuilder::Build().GetId(), uint32_t>( - Local src, uint32_t* dst, uint32_t max_length); +bool V8_EXPORT V8_WARN_UNUSED_RESULT +TryToCopyAndConvertArrayToCppBuffer::Build().GetId(), + uint32_t>(Local src, uint32_t* dst, + uint32_t max_length); template <> -bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< - internal::CTypeInfoBuilder::Build().GetId(), float>( - Local src, float* dst, uint32_t max_length); +bool V8_EXPORT V8_WARN_UNUSED_RESULT +TryToCopyAndConvertArrayToCppBuffer::Build().GetId(), + float>(Local src, float* dst, + uint32_t max_length); template <> -bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< - internal::CTypeInfoBuilder::Build().GetId(), double>( - Local src, double* dst, uint32_t max_length); +bool V8_EXPORT V8_WARN_UNUSED_RESULT +TryToCopyAndConvertArrayToCppBuffer::Build().GetId(), + double>(Local src, double* dst, + uint32_t max_length); } // namespace v8 diff --git a/deps/v8/include/v8-function.h b/deps/v8/include/v8-function.h index 9424a86fdafc40..897e6ed6175931 100644 --- a/deps/v8/include/v8-function.h +++ b/deps/v8/include/v8-function.h @@ -18,6 +18,7 @@ namespace v8 { class Context; +class UnboundScript; /** * A JavaScript function object (ECMA-262, 15.3). @@ -58,6 +59,8 @@ class V8_EXPORT Function : public Object { void SetName(Local name); Local GetName() const; + MaybeLocal GetUnboundScript() const; + /** * Name inferred from variable or property assignment of this function. * Used to facilitate debugging and profiling of JavaScript code written diff --git a/deps/v8/include/v8-initialization.h b/deps/v8/include/v8-initialization.h index 7c9f26b89279d0..822d150371c698 100644 --- a/deps/v8/include/v8-initialization.h +++ b/deps/v8/include/v8-initialization.h @@ -227,6 +227,16 @@ class V8_EXPORT V8 { * this returns zero. */ static size_t GetVirtualMemoryCageSizeInBytes(); + + /** + * Returns whether the virtual memory cage is configured securely. + * + * If V8 cannot create a proper virtual memory cage, it will fall back to + * creating a cage that doesn't have the desired security properties but at + * least still allows V8 to function. This API can be used to determine if + * such an insecure cage is being used, in which case it will return false. + */ + static bool IsUsingSecureVirtualMemoryCage(); #endif /** diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index 74592fdf573e96..8ba21ffd84a560 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -114,7 +114,7 @@ class V8_EXPORT V8StackTrace { virtual int topLineNumber() const = 0; virtual int topColumnNumber() const = 0; virtual int topScriptId() const = 0; - V8_DEPRECATE_SOON("Use V8::StackTrace::topScriptId() instead.") + V8_DEPRECATED("Use V8::StackTrace::topScriptId() instead.") int topScriptIdAsInteger() const { return topScriptId(); } virtual StringView topFunctionName() const = 0; diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index e1aee508bbceac..f0531bcff6ebde 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -494,13 +494,14 @@ constexpr bool VirtualMemoryCageIsEnabled() { #endif } -#ifdef V8_VIRTUAL_MEMORY_CAGE -// Size of the virtual memory cage, excluding the guard regions surrounding it. -constexpr size_t kVirtualMemoryCageSize = size_t{1} << 40; // 1 TB +#ifdef V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE -static_assert(kVirtualMemoryCageSize > Internals::kPtrComprCageReservationSize, - "The virtual memory cage must be larger than the pointer " - "compression cage contained within it."); +#define GB (1ULL << 30) +#define TB (1ULL << 40) + +// Size of the virtual memory cage, excluding the guard regions surrounding it. +constexpr size_t kVirtualMemoryCageSizeLog2 = 40; // 1 TB +constexpr size_t kVirtualMemoryCageSize = 1ULL << kVirtualMemoryCageSizeLog2; // Required alignment of the virtual memory cage. For simplicity, we require the // size of the guard regions to be a multiple of this, so that this specifies @@ -510,10 +511,22 @@ static_assert(kVirtualMemoryCageSize > Internals::kPtrComprCageReservationSize, constexpr size_t kVirtualMemoryCageAlignment = Internals::kPtrComprCageBaseAlignment; +#ifdef V8_CAGED_POINTERS +// CagedPointers are guaranteed to point into the virtual memory cage. This is +// achieved by storing them as offset from the cage base rather than as raw +// pointers. +using CagedPointer_t = Address; + +// For efficiency, the offset is stored shifted to the left, so that +// it is guaranteed that the offset is smaller than the cage size after +// shifting it to the right again. This constant specifies the shift amount. +constexpr uint64_t kCagedPointerShift = 64 - kVirtualMemoryCageSizeLog2; +#endif + // Size of the guard regions surrounding the virtual memory cage. This assumes a // worst-case scenario of a 32-bit unsigned index being used to access an array // of 64-bit values. -constexpr size_t kVirtualMemoryCageGuardRegionSize = size_t{32} << 30; // 32 GB +constexpr size_t kVirtualMemoryCageGuardRegionSize = 32ULL * GB; static_assert((kVirtualMemoryCageGuardRegionSize % kVirtualMemoryCageAlignment) == 0, @@ -525,7 +538,33 @@ static_assert((kVirtualMemoryCageGuardRegionSize % // until either the reservation succeeds or the minimum size is reached. A // minimum of 32GB allows the 4GB pointer compression region as well as the // ArrayBuffer partition and two 10GB WASM memory cages to fit into the cage. -constexpr size_t kVirtualMemoryCageMinimumSize = size_t{32} << 30; // 32 GB +// 32GB should also be the minimum possible size of the userspace address space +// as there are some machine configurations with only 36 virtual address bits. +constexpr size_t kVirtualMemoryCageMinimumSize = 32ULL * GB; + +static_assert(kVirtualMemoryCageMinimumSize <= kVirtualMemoryCageSize, + "The minimal size of the virtual memory cage must be smaller or " + "equal to the regular size."); + +// On OSes where reservation virtual memory is too expensive to create a real +// cage, notably Windows pre 8.1, we create a fake cage that doesn't actually +// reserve most of the memory, and so doesn't have the desired security +// properties, but still ensures that objects that should be located inside the +// cage are allocated within kVirtualMemoryCageSize bytes from the start of the +// cage, and so appear to be inside the cage. The minimum size of the virtual +// memory range that is actually reserved for a fake cage is specified by this +// constant and should be big enough to contain the pointer compression region +// as well as the ArrayBuffer partition. +constexpr size_t kFakeVirtualMemoryCageMinReservationSize = 8ULL * GB; + +static_assert(kVirtualMemoryCageMinimumSize > + Internals::kPtrComprCageReservationSize, + "The virtual memory cage must be larger than the pointer " + "compression cage contained within it."); +static_assert(kFakeVirtualMemoryCageMinReservationSize > + Internals::kPtrComprCageReservationSize, + "The reservation for a fake virtual memory cage must be larger " + "than the pointer compression cage contained within it."); // For now, even if the virtual memory cage is enabled, we still allow backing // stores to be allocated outside of it as fallback. This will simplify the @@ -537,7 +576,10 @@ constexpr bool kAllowBackingStoresOutsideCage = false; constexpr bool kAllowBackingStoresOutsideCage = true; #endif // V8_HEAP_SANDBOX -#endif // V8_VIRTUAL_MEMORY_CAGE +#undef GB +#undef TB + +#endif // V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE // Only perform cast check for types derived from v8::Data since // other types do not implement the Cast method. diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index 39276b34a9d5b2..32b53f1b423557 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -281,6 +281,12 @@ class V8_EXPORT Isolate { */ int embedder_wrapper_type_index = -1; int embedder_wrapper_object_index = -1; + + /** + * The following parameter is experimental and may change significantly. + * This is currently for internal testing. + */ + Isolate* experimental_attach_to_shared_isolate = nullptr; }; /** @@ -585,6 +591,11 @@ class V8_EXPORT Isolate { */ static Isolate* TryGetCurrent(); + /** + * Return true if this isolate is currently active. + **/ + bool IsCurrent() const; + /** * Clears the set of objects held strongly by the heap. This set of * objects are originally built when a WeakRef is created or diff --git a/deps/v8/include/v8-message.h b/deps/v8/include/v8-message.h index be427e79cf21e0..8f09619cba5be3 100644 --- a/deps/v8/include/v8-message.h +++ b/deps/v8/include/v8-message.h @@ -61,7 +61,7 @@ class ScriptOriginOptions { class V8_EXPORT ScriptOrigin { public: #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */ - V8_DEPRECATE_SOON("Use constructor with primitive C++ types") + V8_DEPRECATED("Use constructor with primitive C++ types") #endif ScriptOrigin( Local resource_name, Local resource_line_offset, @@ -74,7 +74,7 @@ class V8_EXPORT ScriptOrigin { Local is_module = Local(), Local host_defined_options = Local()); #if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */ - V8_DEPRECATE_SOON("Use constructor that takes an isolate") + V8_DEPRECATED("Use constructor that takes an isolate") #endif explicit ScriptOrigin( Local resource_name, int resource_line_offset = 0, @@ -103,11 +103,11 @@ class V8_EXPORT ScriptOrigin { host_defined_options_(host_defined_options) {} V8_INLINE Local ResourceName() const; - V8_DEPRECATE_SOON("Use getter with primitive C++ types.") + V8_DEPRECATED("Use getter with primitive C++ types.") V8_INLINE Local ResourceLineOffset() const; - V8_DEPRECATE_SOON("Use getter with primitive C++ types.") + V8_DEPRECATED("Use getter with primitive C++ types.") V8_INLINE Local ResourceColumnOffset() const; - V8_DEPRECATE_SOON("Use getter with primitive C++ types.") + V8_DEPRECATED("Use getter with primitive C++ types.") V8_INLINE Local ScriptID() const; V8_INLINE int LineOffset() const; V8_INLINE int ColumnOffset() const; diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h index 29e54401067d3a..62738442f7ce8c 100644 --- a/deps/v8/include/v8-metrics.h +++ b/deps/v8/include/v8-metrics.h @@ -46,12 +46,12 @@ struct GarbageCollectionFullCycle { GarbageCollectionSizes objects_cpp; GarbageCollectionSizes memory; GarbageCollectionSizes memory_cpp; - double collection_rate_in_percent; - double collection_rate_cpp_in_percent; - double efficiency_in_bytes_per_us; - double efficiency_cpp_in_bytes_per_us; - double main_thread_efficiency_in_bytes_per_us; - double main_thread_efficiency_cpp_in_bytes_per_us; + double collection_rate_in_percent = -1.0; + double collection_rate_cpp_in_percent = -1.0; + double efficiency_in_bytes_per_us = -1.0; + double efficiency_cpp_in_bytes_per_us = -1.0; + double main_thread_efficiency_in_bytes_per_us = -1.0; + double main_thread_efficiency_cpp_in_bytes_per_us = -1.0; }; struct GarbageCollectionFullMainThreadIncrementalMark { diff --git a/deps/v8/include/v8-object.h b/deps/v8/include/v8-object.h index 114e452a380083..6716162df10d75 100644 --- a/deps/v8/include/v8-object.h +++ b/deps/v8/include/v8-object.h @@ -598,6 +598,11 @@ class V8_EXPORT Object : public Value { Local CreationContext(); MaybeLocal GetCreationContext(); + /** + * Shortcut for GetCreationContext().ToLocalChecked(). + **/ + Local GetCreationContextChecked(); + /** Same as above, but works for Persistents */ V8_DEPRECATE_SOON( "Use MaybeLocal GetCreationContext(const " diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index e60e1757b63941..234582f0f6aff2 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -444,13 +444,7 @@ class PageAllocator { * zero-initialized again. The memory must have been previously allocated by a * call to AllocatePages. Returns true on success, false otherwise. */ -#ifdef V8_VIRTUAL_MEMORY_CAGE - // Implementing this API is required when the virtual memory cage is enabled. virtual bool DecommitPages(void* address, size_t size) = 0; -#else - // Otherwise, it is optional for now. - virtual bool DecommitPages(void* address, size_t size) { return false; } -#endif /** * INTERNAL ONLY: This interface has not been stabilised and may change diff --git a/deps/v8/include/v8-primitive.h b/deps/v8/include/v8-primitive.h index 59d959da0572e1..8a95c151bd1f62 100644 --- a/deps/v8/include/v8-primitive.h +++ b/deps/v8/include/v8-primitive.h @@ -575,7 +575,7 @@ class V8_EXPORT Symbol : public Name { /** * Returns the description string of the symbol, or undefined if none. */ - V8_DEPRECATE_SOON("Use Symbol::Description(isolate)") + V8_DEPRECATED("Use Symbol::Description(isolate)") Local Description() const; Local Description(Isolate* isolate) const; diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index f2354cac38e237..ccf15bab2a0cdd 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -603,7 +603,7 @@ class V8_EXPORT ActivityControl { * Notify about current progress. The activity can be stopped by * returning kAbort as the callback result. */ - virtual ControlOption ReportProgressValue(int done, int total) = 0; + virtual ControlOption ReportProgressValue(uint32_t done, uint32_t total) = 0; }; /** diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h index 370903b20a60e4..356b99358be339 100644 --- a/deps/v8/include/v8-script.h +++ b/deps/v8/include/v8-script.h @@ -173,14 +173,14 @@ class V8_EXPORT Module : public Data { /** * Returns the number of modules requested by this module. */ - V8_DEPRECATE_SOON("Use Module::GetModuleRequests() and FixedArray::Length().") + V8_DEPRECATED("Use Module::GetModuleRequests() and FixedArray::Length().") int GetModuleRequestsLength() const; /** * Returns the ith module specifier in this module. * i must be < GetModuleRequestsLength() and >= 0. */ - V8_DEPRECATE_SOON( + V8_DEPRECATED( "Use Module::GetModuleRequests() and ModuleRequest::GetSpecifier().") Local GetModuleRequest(int i) const; @@ -188,7 +188,7 @@ class V8_EXPORT Module : public Data { * Returns the source location (line number and column number) of the ith * module specifier's first occurrence in this module. */ - V8_DEPRECATE_SOON( + V8_DEPRECATED( "Use Module::GetModuleRequests(), ModuleRequest::GetSourceOffset(), and " "Module::SourceOffsetToLocation().") Location GetModuleRequestLocation(int i) const; @@ -223,7 +223,7 @@ class V8_EXPORT Module : public Data { * instantiation. (In the case where the callback throws an exception, that * exception is propagated.) */ - V8_DEPRECATE_SOON( + V8_DEPRECATED( "Use the version of InstantiateModule that takes a ResolveModuleCallback " "parameter") V8_WARN_UNUSED_RESULT Maybe InstantiateModule(Local context, @@ -345,6 +345,12 @@ class V8_EXPORT Script { * Returns the corresponding context-unbound script. */ Local GetUnboundScript(); + + /** + * The name that was passed by the embedder as ResourceName to the + * ScriptOrigin. This can be either a v8::String or v8::Undefined. + */ + Local GetResourceName(); }; enum class ScriptType { kClassic, kModule }; @@ -465,21 +471,16 @@ class V8_EXPORT ScriptCompiler { virtual size_t GetMoreData(const uint8_t** src) = 0; /** - * V8 calls this method to set a 'bookmark' at the current position in - * the source stream, for the purpose of (maybe) later calling - * ResetToBookmark. If ResetToBookmark is called later, then subsequent - * calls to GetMoreData should return the same data as they did when - * SetBookmark was called earlier. - * - * The embedder may return 'false' to indicate it cannot provide this - * functionality. + * [DEPRECATED]: No longer used, will be removed soon. */ - virtual bool SetBookmark(); + V8_DEPRECATED("Not used") + virtual bool SetBookmark() { return false; } /** - * V8 calls this to return to a previously set bookmark. + * [DEPRECATED]: No longer used, will be removed soon. */ - virtual void ResetToBookmark(); + V8_DEPRECATED("Not used") + virtual void ResetToBookmark() {} }; /** @@ -694,6 +695,12 @@ class V8_EXPORT ScriptCompiler { CompileOptions options = kNoCompileOptions, NoCacheReason no_cache_reason = kNoCacheNoReason, Local* script_or_module_out = nullptr); + static V8_WARN_UNUSED_RESULT MaybeLocal CompileFunction( + Local context, Source* source, size_t arguments_count = 0, + Local arguments[] = nullptr, size_t context_extension_count = 0, + Local context_extensions[] = nullptr, + CompileOptions options = kNoCompileOptions, + NoCacheReason no_cache_reason = kNoCacheNoReason); /** * Creates and returns code cache for the specified unbound_script. @@ -712,7 +719,7 @@ class V8_EXPORT ScriptCompiler { /** * Creates and returns code cache for the specified function that was - * previously produced by CompileFunctionInContext. + * previously produced by CompileFunction. * This will return nullptr if the script cannot be serialized. The * CachedData returned by this function should be owned by the caller. */ @@ -722,6 +729,13 @@ class V8_EXPORT ScriptCompiler { static V8_WARN_UNUSED_RESULT MaybeLocal CompileUnboundInternal( Isolate* isolate, Source* source, CompileOptions options, NoCacheReason no_cache_reason); + + static V8_WARN_UNUSED_RESULT MaybeLocal CompileFunctionInternal( + Local context, Source* source, size_t arguments_count, + Local arguments[], size_t context_extension_count, + Local context_extensions[], CompileOptions options, + NoCacheReason no_cache_reason, + Local* script_or_module_out); }; ScriptCompiler::Source::Source(Local string, const ScriptOrigin& origin, diff --git a/deps/v8/include/v8-traced-handle.h b/deps/v8/include/v8-traced-handle.h index 15c9693ecbb967..7db34a970c8d35 100644 --- a/deps/v8/include/v8-traced-handle.h +++ b/deps/v8/include/v8-traced-handle.h @@ -26,13 +26,20 @@ namespace v8 { class Value; namespace internal { + class BasicTracedReferenceExtractor; -} // namespace internal -namespace api_internal { +enum class GlobalHandleDestructionMode { kWithDestructor, kWithoutDestructor }; + +enum class GlobalHandleStoreMode { + kInitializingStore, + kAssigningStore, +}; + V8_EXPORT internal::Address* GlobalizeTracedReference( internal::Isolate* isolate, internal::Address* handle, - internal::Address* slot, bool has_destructor); + internal::Address* slot, GlobalHandleDestructionMode destruction_mode, + GlobalHandleStoreMode store_mode); V8_EXPORT void MoveTracedGlobalReference(internal::Address** from, internal::Address** to); V8_EXPORT void CopyTracedGlobalReference(const internal::Address* const* from, @@ -41,7 +48,8 @@ V8_EXPORT void DisposeTracedGlobal(internal::Address* global_handle); V8_EXPORT void SetFinalizationCallbackTraced( internal::Address* location, void* parameter, WeakCallbackInfo::Callback callback); -} // namespace api_internal + +} // namespace internal /** * Deprecated. Use |TracedReference| instead. @@ -164,15 +172,15 @@ class BasicTracedReference : public TracedReferenceBase { } private: - enum DestructionMode { kWithDestructor, kWithoutDestructor }; - /** * An empty BasicTracedReference without storage cell. */ BasicTracedReference() = default; - V8_INLINE static internal::Address* New(Isolate* isolate, T* that, void* slot, - DestructionMode destruction_mode); + V8_INLINE static internal::Address* New( + Isolate* isolate, T* that, void* slot, + internal::GlobalHandleDestructionMode destruction_mode, + internal::GlobalHandleStoreMode store_mode); friend class EmbedderHeapTracer; template @@ -215,15 +223,17 @@ class TracedGlobal : public BasicTracedReference { */ template TracedGlobal(Isolate* isolate, Local that) : BasicTracedReference() { - this->val_ = this->New(isolate, that.val_, &this->val_, - BasicTracedReference::kWithDestructor); + this->val_ = + this->New(isolate, that.val_, &this->val_, + internal::GlobalHandleDestructionMode::kWithDestructor, + internal::GlobalHandleStoreMode::kInitializingStore); static_assert(std::is_base_of::value, "type check"); } /** * Move constructor initializing TracedGlobal from an existing one. */ - V8_INLINE TracedGlobal(TracedGlobal&& other) { + V8_INLINE TracedGlobal(TracedGlobal&& other) noexcept { // Forward to operator=. *this = std::move(other); } @@ -232,7 +242,7 @@ class TracedGlobal : public BasicTracedReference { * Move constructor initializing TracedGlobal from an existing one. */ template - V8_INLINE TracedGlobal(TracedGlobal&& other) { + V8_INLINE TracedGlobal(TracedGlobal&& other) noexcept { // Forward to operator=. *this = std::move(other); } @@ -257,13 +267,13 @@ class TracedGlobal : public BasicTracedReference { /** * Move assignment operator initializing TracedGlobal from an existing one. */ - V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs); + V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs) noexcept; /** * Move assignment operator initializing TracedGlobal from an existing one. */ template - V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs); + V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs) noexcept; /** * Copy assignment operator initializing TracedGlobal from an existing one. @@ -338,8 +348,10 @@ class TracedReference : public BasicTracedReference { */ template TracedReference(Isolate* isolate, Local that) : BasicTracedReference() { - this->val_ = this->New(isolate, that.val_, &this->val_, - BasicTracedReference::kWithoutDestructor); + this->val_ = + this->New(isolate, that.val_, &this->val_, + internal::GlobalHandleDestructionMode::kWithoutDestructor, + internal::GlobalHandleStoreMode::kInitializingStore); static_assert(std::is_base_of::value, "type check"); } @@ -347,7 +359,7 @@ class TracedReference : public BasicTracedReference { * Move constructor initializing TracedReference from an * existing one. */ - V8_INLINE TracedReference(TracedReference&& other) { + V8_INLINE TracedReference(TracedReference&& other) noexcept { // Forward to operator=. *this = std::move(other); } @@ -357,7 +369,7 @@ class TracedReference : public BasicTracedReference { * existing one. */ template - V8_INLINE TracedReference(TracedReference&& other) { + V8_INLINE TracedReference(TracedReference&& other) noexcept { // Forward to operator=. *this = std::move(other); } @@ -384,13 +396,13 @@ class TracedReference : public BasicTracedReference { /** * Move assignment operator initializing TracedGlobal from an existing one. */ - V8_INLINE TracedReference& operator=(TracedReference&& rhs); + V8_INLINE TracedReference& operator=(TracedReference&& rhs) noexcept; /** * Move assignment operator initializing TracedGlobal from an existing one. */ template - V8_INLINE TracedReference& operator=(TracedReference&& rhs); + V8_INLINE TracedReference& operator=(TracedReference&& rhs) noexcept; /** * Copy assignment operator initializing TracedGlobal from an existing one. @@ -420,18 +432,19 @@ class TracedReference : public BasicTracedReference { // --- Implementation --- template internal::Address* BasicTracedReference::New( - Isolate* isolate, T* that, void* slot, DestructionMode destruction_mode) { + Isolate* isolate, T* that, void* slot, + internal::GlobalHandleDestructionMode destruction_mode, + internal::GlobalHandleStoreMode store_mode) { if (that == nullptr) return nullptr; internal::Address* p = reinterpret_cast(that); - return api_internal::GlobalizeTracedReference( + return internal::GlobalizeTracedReference( reinterpret_cast(isolate), p, - reinterpret_cast(slot), - destruction_mode == kWithDestructor); + reinterpret_cast(slot), destruction_mode, store_mode); } void TracedReferenceBase::Reset() { if (IsEmpty()) return; - api_internal::DisposeTracedGlobal(reinterpret_cast(val_)); + internal::DisposeTracedGlobal(reinterpret_cast(val_)); SetSlotThreadSafe(nullptr); } @@ -484,12 +497,13 @@ void TracedGlobal::Reset(Isolate* isolate, const Local& other) { Reset(); if (other.IsEmpty()) return; this->val_ = this->New(isolate, other.val_, &this->val_, - BasicTracedReference::kWithDestructor); + internal::GlobalHandleDestructionMode::kWithDestructor, + internal::GlobalHandleStoreMode::kAssigningStore); } template template -TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) { +TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) noexcept { static_assert(std::is_base_of::value, "type check"); *this = std::move(rhs.template As()); return *this; @@ -504,9 +518,9 @@ TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { } template -TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) { +TracedGlobal& TracedGlobal::operator=(TracedGlobal&& rhs) noexcept { if (this != &rhs) { - api_internal::MoveTracedGlobalReference( + internal::MoveTracedGlobalReference( reinterpret_cast(&rhs.val_), reinterpret_cast(&this->val_)); } @@ -518,7 +532,7 @@ TracedGlobal& TracedGlobal::operator=(const TracedGlobal& rhs) { if (this != &rhs) { this->Reset(); if (rhs.val_ != nullptr) { - api_internal::CopyTracedGlobalReference( + internal::CopyTracedGlobalReference( reinterpret_cast(&rhs.val_), reinterpret_cast(&this->val_)); } @@ -534,12 +548,14 @@ void TracedReference::Reset(Isolate* isolate, const Local& other) { if (other.IsEmpty()) return; this->SetSlotThreadSafe( this->New(isolate, other.val_, &this->val_, - BasicTracedReference::kWithoutDestructor)); + internal::GlobalHandleDestructionMode::kWithoutDestructor, + internal::GlobalHandleStoreMode::kAssigningStore)); } template template -TracedReference& TracedReference::operator=(TracedReference&& rhs) { +TracedReference& TracedReference::operator=( + TracedReference&& rhs) noexcept { static_assert(std::is_base_of::value, "type check"); *this = std::move(rhs.template As()); return *this; @@ -555,9 +571,10 @@ TracedReference& TracedReference::operator=( } template -TracedReference& TracedReference::operator=(TracedReference&& rhs) { +TracedReference& TracedReference::operator=( + TracedReference&& rhs) noexcept { if (this != &rhs) { - api_internal::MoveTracedGlobalReference( + internal::MoveTracedGlobalReference( reinterpret_cast(&rhs.val_), reinterpret_cast(&this->val_)); } @@ -569,7 +586,7 @@ TracedReference& TracedReference::operator=(const TracedReference& rhs) { if (this != &rhs) { this->Reset(); if (rhs.val_ != nullptr) { - api_internal::CopyTracedGlobalReference( + internal::CopyTracedGlobalReference( reinterpret_cast(&rhs.val_), reinterpret_cast(&this->val_)); } @@ -596,7 +613,7 @@ uint16_t TracedReferenceBase::WrapperClassId() const { template void TracedGlobal::SetFinalizationCallback( void* parameter, typename WeakCallbackInfo::Callback callback) { - api_internal::SetFinalizationCallbackTraced( + internal::SetFinalizationCallbackTraced( reinterpret_cast(this->val_), parameter, callback); } diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 207f81723bfd14..24da2489f7f3f3 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 9 -#define V8_MINOR_VERSION 6 -#define V8_BUILD_NUMBER 180 -#define V8_PATCH_LEVEL 15 +#define V8_MINOR_VERSION 7 +#define V8_BUILD_NUMBER 106 +#define V8_PATCH_LEVEL 18 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8-wasm.h b/deps/v8/include/v8-wasm.h index af47a3eab37167..612ed2fae40c89 100644 --- a/deps/v8/include/v8-wasm.h +++ b/deps/v8/include/v8-wasm.h @@ -151,8 +151,12 @@ class V8_EXPORT WasmStreaming final { * {Finish} should be called after all received bytes where passed to * {OnBytesReceived} to tell V8 that there will be no more bytes. {Finish} * does not have to be called after {Abort} has been called already. + * If {can_use_compiled_module} is true and {SetCompiledModuleBytes} was + * previously called, the compiled module bytes can be used. + * If {can_use_compiled_module} is false, the compiled module bytes previously + * set by {SetCompiledModuleBytes} should not be used. */ - void Finish(); + void Finish(bool can_use_compiled_module = true); /** * Abort streaming compilation. If {exception} has a value, then the promise @@ -167,6 +171,8 @@ class V8_EXPORT WasmStreaming final { * can be used, false otherwise. The buffer passed via {bytes} and {size} * is owned by the caller. If {SetCompiledModuleBytes} returns true, the * buffer must remain valid until either {Finish} or {Abort} completes. + * The compiled module bytes should not be used until {Finish(true)} is + * called, because they can be invalidated later by {Finish(false)}. */ bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size); diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index b010b65dfd648b..ecb992822cff3e 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -553,6 +553,19 @@ V8 shared library set USING_V8_SHARED. #endif // V8_OS_WIN +// The virtual memory cage is available (i.e. defined) when pointer compression +// is enabled, but it is only used when V8_VIRTUAL_MEMORY_CAGE is enabled as +// well. This allows better test coverage of the cage. +#if defined(V8_COMPRESS_POINTERS) +#define V8_VIRTUAL_MEMORY_CAGE_IS_AVAILABLE +#endif + +// CagedPointers are currently only used if the heap sandbox is enabled. +// In the future, they will be enabled when the virtual memory cage is enabled. +#if defined(V8_HEAP_SANDBOX) +#define V8_CAGED_POINTERS +#endif + // clang-format on #undef V8_HAS_CPP_ATTRIBUTE diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index e3afd9787b9e93..049a2e2786aba3 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -18,6 +18,7 @@ 'arm64.debug': 'default_debug_arm64', 'arm64.optdebug': 'default_optdebug_arm64', 'arm64.release': 'default_release_arm64', + 'arm64.release.sample': 'release_arm64_sample', 'ia32.debug': 'default_debug_x86', 'ia32.optdebug': 'default_optdebug_x86', 'ia32.release': 'default_release_x86', @@ -181,8 +182,8 @@ 'V8 Android Arm - builder': 'release_android_arm', 'V8 Linux - arm - sim': 'release_simulate_arm', 'V8 Linux - arm - sim - debug': 'debug_simulate_arm', - 'V8 Linux - arm - sim - lite': 'release_simulate_arm_lite', - 'V8 Linux - arm - sim - lite - debug': 'debug_simulate_arm_lite', + 'V8 Linux - arm - sim - lite - builder': 'release_simulate_arm_lite', + 'V8 Linux - arm - sim - lite - debug builder': 'debug_simulate_arm_lite', # Arm64. 'V8 Android Arm64 - builder': 'release_android_arm64', 'V8 Android Arm64 - debug builder': 'debug_android_arm64', @@ -281,6 +282,7 @@ 'v8_mac64_compile_full_dbg_ng': 'full_debug_x64', 'v8_mac64_asan_rel_ng': 'release_x64_asan_no_lsan', 'v8_linux_arm_rel_ng': 'release_simulate_arm_trybot', + 'v8_linux_arm_lite_compile_dbg': 'debug_simulate_arm_lite', 'v8_linux_arm_lite_rel_ng': 'release_simulate_arm_lite_trybot', 'v8_linux_arm_dbg_ng': 'debug_simulate_arm', 'v8_linux_arm_armv8a_rel': 'release_simulate_arm_trybot', @@ -320,6 +322,8 @@ 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks'], 'default_release_arm64': [ 'release', 'simulate_arm64'], + 'release_arm64_sample': [ + 'release', 'arm64', 'sample'], 'default_debug_mipsel': [ 'debug', 'simulate_mipsel', 'v8_enable_slow_dchecks', 'v8_full_debug'], 'default_optdebug_mipsel': [ diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index f17f651212954b..56e238f4d0b533 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -282,7 +282,7 @@ 'tests': [ {'name': 'mjsunit_sp_frame_access'}, {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 7}, {'name': 'v8testing', 'variant': 'extra', 'shards': 7}, ], @@ -1743,7 +1743,7 @@ 'tests': [ {'name': 'mjsunit_sp_frame_access'}, {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 6}, {'name': 'v8testing', 'variant': 'extra', 'shards': 3}, # Armv8-a. @@ -1791,7 +1791,7 @@ 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 6}, {'name': 'mozilla', 'shards': 6}, - {'name': 'test262', 'variant': 'default'}, + {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 10}, {'name': 'v8testing', 'variant': 'extra', 'shards': 10}, # Armv8-a. diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h index c033c3d2e8f947..20a3d910cea486 100644 --- a/deps/v8/src/api/api-inl.h +++ b/deps/v8/src/api/api-inl.h @@ -111,7 +111,7 @@ MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function) MAKE_TO_LOCAL(ToLocalPrimitive, Object, Primitive) MAKE_TO_LOCAL(FixedArrayToLocal, FixedArray, FixedArray) MAKE_TO_LOCAL(PrimitiveArrayToLocal, FixedArray, PrimitiveArray) -MAKE_TO_LOCAL(ScriptOrModuleToLocal, Script, ScriptOrModule) +MAKE_TO_LOCAL(ToLocal, ScriptOrModule, ScriptOrModule) #undef MAKE_TO_LOCAL_TYPED_ARRAY #undef MAKE_TO_LOCAL diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index f79d0482ed3f1b..3cc4f2b61e0692 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -40,7 +40,6 @@ #include "src/codegen/cpu-features.h" #include "src/codegen/script-details.h" #include "src/common/assert-scope.h" -#include "src/common/external-pointer.h" #include "src/common/globals.h" #include "src/compiler-dispatcher/lazy-compile-dispatcher.h" #include "src/date/date.h" @@ -64,7 +63,6 @@ #include "src/init/icu_util.h" #include "src/init/startup-data-util.h" #include "src/init/v8.h" -#include "src/init/vm-cage.h" #include "src/json/json-parser.h" #include "src/json/json-stringifier.h" #include "src/logging/counters-scopes.h" @@ -110,6 +108,8 @@ #include "src/profiler/tick-sample.h" #include "src/regexp/regexp-utils.h" #include "src/runtime/runtime.h" +#include "src/security/external-pointer.h" +#include "src/security/vm-cage.h" #include "src/snapshot/code-serializer.h" #include "src/snapshot/embedded/embedded-data.h" #include "src/snapshot/snapshot.h" @@ -120,6 +120,7 @@ #include "src/tracing/trace-event.h" #include "src/utils/detachable-vector.h" #include "src/utils/version.h" +#include "src/web-snapshot/web-snapshot.h" #if V8_ENABLE_WEBASSEMBLY #include "src/trap-handler/trap-handler.h" @@ -382,11 +383,11 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) { namespace { -#ifdef V8_VIRTUAL_MEMORY_CAGE -// ArrayBufferAllocator to use when the virtual memory cage is enabled, in which -// case all ArrayBuffer backing stores need to be allocated inside the data -// cage. Note, the current implementation is extremely inefficient as it uses -// the BoundedPageAllocator. In the future, we'll need a proper allocator +#ifdef V8_HEAP_SANDBOX +// ArrayBufferAllocator to use when the heap sandbox is enabled, in which case +// all ArrayBuffer backing stores need to be allocated inside the virtual +// memory cage. Note, the current implementation is extremely inefficient as it +// uses the BoundedPageAllocator. In the future, we'll need a proper allocator // implementation. class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { public: @@ -454,7 +455,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator { return new_data; } }; -#endif // V8_VIRTUAL_MEMORY_CAGE +#endif // V8_HEAP_SANDBOX struct SnapshotCreatorData { explicit SnapshotCreatorData(Isolate* isolate) @@ -829,17 +830,19 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory, } } -namespace api_internal { -i::Address* GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj, - internal::Address* slot, - bool has_destructor) { +namespace internal { + +i::Address* GlobalizeTracedReference( + i::Isolate* isolate, i::Address* obj, internal::Address* slot, + GlobalHandleDestructionMode destruction_mode, + GlobalHandleStoreMode store_mode) { LOG_API(isolate, TracedGlobal, New); #ifdef DEBUG Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference", "the address slot must be not null"); #endif - i::Handle result = - isolate->global_handles()->CreateTraced(*obj, slot, has_destructor); + i::Handle result = isolate->global_handles()->CreateTraced( + *obj, slot, destruction_mode, store_mode); #ifdef VERIFY_HEAP if (i::FLAG_verify_heap) { i::Object(*obj).ObjectVerify(isolate); @@ -848,6 +851,30 @@ i::Address* GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj, return result.location(); } +void MoveTracedGlobalReference(internal::Address** from, + internal::Address** to) { + GlobalHandles::MoveTracedGlobal(from, to); +} + +void CopyTracedGlobalReference(const internal::Address* const* from, + internal::Address** to) { + GlobalHandles::CopyTracedGlobal(from, to); +} + +void DisposeTracedGlobal(internal::Address* location) { + GlobalHandles::DestroyTraced(location); +} + +void SetFinalizationCallbackTraced(internal::Address* location, void* parameter, + WeakCallbackInfo::Callback callback) { + GlobalHandles::SetFinalizationCallbackForTraced(location, parameter, + callback); +} + +} // namespace internal + +namespace api_internal { + i::Address* GlobalizeReference(i::Isolate* isolate, i::Address* obj) { LOG_API(isolate, Persistent, New); i::Handle result = isolate->global_handles()->Create(*obj); @@ -899,26 +926,6 @@ Value* Eternalize(Isolate* v8_isolate, Value* value) { isolate->eternal_handles()->Get(index).location()); } -void MoveTracedGlobalReference(internal::Address** from, - internal::Address** to) { - i::GlobalHandles::MoveTracedGlobal(from, to); -} - -void CopyTracedGlobalReference(const internal::Address* const* from, - internal::Address** to) { - i::GlobalHandles::CopyTracedGlobal(from, to); -} - -void DisposeTracedGlobal(internal::Address* location) { - i::GlobalHandles::DestroyTraced(location); -} - -void SetFinalizationCallbackTraced(internal::Address* location, void* parameter, - WeakCallbackInfo::Callback callback) { - i::GlobalHandles::SetFinalizationCallbackForTraced(location, parameter, - callback); -} - void FromJustIsNothing() { Utils::ApiCheck(false, "v8::FromJust", "Maybe value is Nothing."); } @@ -1962,10 +1969,6 @@ ScriptCompiler::CachedData::~CachedData() { } } -bool ScriptCompiler::ExternalSourceStream::SetBookmark() { return false; } - -void ScriptCompiler::ExternalSourceStream::ResetToBookmark() { UNREACHABLE(); } - ScriptCompiler::StreamedSource::StreamedSource( std::unique_ptr stream, Encoding encoding) : impl_(new i::ScriptStreamingData(std::move(stream), encoding)) {} @@ -2048,7 +2051,8 @@ Local UnboundScript::GetSourceMappingURL() { } MaybeLocal Script::Run(Local context) { - auto isolate = reinterpret_cast(context->GetIsolate()); + auto v8_isolate = context->GetIsolate(); + auto isolate = reinterpret_cast(v8_isolate); TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute"); ENTER_V8(isolate, context, Script, Run, MaybeLocal(), InternalEscapableScope); @@ -2078,10 +2082,26 @@ MaybeLocal Script::Run(Local context) { } } + if (V8_UNLIKELY(i::FLAG_experimental_web_snapshots)) { + i::Handle maybe_script = + handle(fun->shared().script(), isolate); + if (maybe_script->IsScript() && + i::Script::cast(*maybe_script).type() == i::Script::TYPE_WEB_SNAPSHOT) { + i::WebSnapshotDeserializer deserializer(v8_isolate); + deserializer.UseWebSnapshot(i::Handle::cast(maybe_script)); + RETURN_ON_FAILED_EXECUTION(Value); + Local result = v8::Undefined(v8_isolate); + RETURN_ESCAPED(result); + } + } + i::Handle receiver = isolate->global_proxy(); + i::Handle host_defined_options( + i::Script::cast(fun->shared().script()).host_defined_options(), isolate); Local result; has_pending_exception = !ToLocal( - i::Execution::Call(isolate, fun, receiver, 0, nullptr), &result); + i::Execution::CallScript(isolate, fun, receiver, host_defined_options), + &result); if (i::FLAG_script_delay_fraction > 0.0) { delta = v8::base::TimeDelta::FromMillisecondsD( @@ -2097,28 +2117,39 @@ MaybeLocal Script::Run(Local context) { } Local ScriptOrModule::GetResourceName() { - i::Handle obj = Utils::OpenHandle(this); - i::Isolate* isolate = obj->GetIsolate(); + i::Handle obj = Utils::OpenHandle(this); + i::Isolate* isolate = i::GetIsolateFromWritableObject(*obj); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); - i::Handle val(obj->name(), isolate); + i::Handle val(obj->resource_name(), isolate); return ToApiHandle(val); } Local ScriptOrModule::GetHostDefinedOptions() { - i::Handle obj = Utils::OpenHandle(this); - i::Isolate* isolate = obj->GetIsolate(); + i::Handle obj = Utils::OpenHandle(this); + i::Isolate* isolate = i::GetIsolateFromWritableObject(*obj); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); i::Handle val(obj->host_defined_options(), isolate); return ToApiHandle(val); } Local Script::GetUnboundScript() { - i::Handle obj = Utils::OpenHandle(this); - i::SharedFunctionInfo sfi = i::JSFunction::cast(*obj).shared(); + i::DisallowGarbageCollection no_gc; + i::Handle obj = Utils::OpenHandle(this); + i::SharedFunctionInfo sfi = (*obj).shared(); i::Isolate* isolate = sfi.GetIsolate(); return ToApiHandle(i::handle(sfi, isolate)); } +Local Script::GetResourceName() { + i::DisallowGarbageCollection no_gc; + i::Handle func = Utils::OpenHandle(this); + i::SharedFunctionInfo sfi = (*func).shared(); + i::Isolate* isolate = func->GetIsolate(); + CHECK(sfi.script().IsScript()); + return ToApiHandle( + i::handle(i::Script::cast(sfi.script()).name(), isolate)); +} + // static Local PrimitiveArray::New(Isolate* v8_isolate, int length) { i::Isolate* isolate = reinterpret_cast(v8_isolate); @@ -2580,9 +2611,32 @@ bool IsIdentifier(i::Isolate* isolate, i::Handle string) { } return true; } -} // anonymous namespace +} // namespace + +// static +V8_WARN_UNUSED_RESULT MaybeLocal ScriptCompiler::CompileFunction( + Local context, Source* source, size_t arguments_count, + Local arguments[], size_t context_extension_count, + Local context_extensions[], CompileOptions options, + NoCacheReason no_cache_reason) { + return CompileFunctionInternal(context, source, arguments_count, arguments, + context_extension_count, context_extensions, + options, no_cache_reason, nullptr); +} +// static MaybeLocal ScriptCompiler::CompileFunctionInContext( + Local context, Source* source, size_t arguments_count, + Local arguments[], size_t context_extension_count, + Local context_extensions[], CompileOptions options, + NoCacheReason no_cache_reason, + Local* script_or_module_out) { + return CompileFunctionInternal( + context, source, arguments_count, arguments, context_extension_count, + context_extensions, options, no_cache_reason, script_or_module_out); +} + +MaybeLocal ScriptCompiler::CompileFunctionInternal( Local v8_context, Source* source, size_t arguments_count, Local arguments[], size_t context_extension_count, Local context_extensions[], CompileOptions options, @@ -2591,7 +2645,7 @@ MaybeLocal ScriptCompiler::CompileFunctionInContext( Local result; { - PREPARE_FOR_EXECUTION(v8_context, ScriptCompiler, CompileFunctionInContext, + PREPARE_FOR_EXECUTION(v8_context, ScriptCompiler, CompileFunction, Function); TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler"); @@ -2650,16 +2704,26 @@ MaybeLocal ScriptCompiler::CompileFunctionInContext( RETURN_ON_FAILED_EXECUTION(Function); result = handle_scope.Escape(Utils::CallableToLocal(scoped_result)); } - + // TODO(cbruni): remove script_or_module_out paramater if (script_or_module_out != nullptr) { i::Handle function = i::Handle::cast(Utils::OpenHandle(*result)); i::Isolate* isolate = function->GetIsolate(); i::Handle shared(function->shared(), isolate); i::Handle script(i::Script::cast(shared->script()), isolate); - *script_or_module_out = v8::Utils::ScriptOrModuleToLocal(script); + // TODO(cbruni, v8:12302): Avoid creating tempory ScriptOrModule objects. + auto script_or_module = i::Handle::cast( + isolate->factory()->NewStruct(i::SCRIPT_OR_MODULE_TYPE)); + script_or_module->set_resource_name(script->name()); + script_or_module->set_host_defined_options(script->host_defined_options()); +#ifdef V8_SCRIPTORMODULE_LEGACY_LIFETIME + i::Handle list = + i::handle(script->script_or_modules(), isolate); + list = i::ArrayList::Add(isolate, list, script_or_module); + script->set_script_or_modules(*list); +#endif // V8_SCRIPTORMODULE_LEGACY_LIFETIME + *script_or_module_out = v8::Utils::ToLocal(script_or_module); } - return result; } @@ -5000,10 +5064,17 @@ MaybeLocal v8::Object::GetCreationContext() { if (self->GetCreationContext().ToHandle(&context)) { return Utils::ToLocal(context); } - return MaybeLocal(); } +Local v8::Object::GetCreationContextChecked() { + Local context; + Utils::ApiCheck(GetCreationContext().ToLocal(&context), + "v8::Object::GetCreationContextChecked", + "No creation context available"); + return context; +} + int v8::Object::GetIdentityHash() { i::DisallowGarbageCollection no_gc; auto self = Utils::OpenHandle(this); @@ -5257,6 +5328,14 @@ int Function::GetScriptColumnNumber() const { return kLineOffsetNotFound; } +MaybeLocal Function::GetUnboundScript() const { + i::Handle self = Utils::OpenHandle(this); + if (!self->IsJSFunction()) return MaybeLocal(); + i::SharedFunctionInfo sfi = i::JSFunction::cast(*self).shared(); + i::Isolate* isolate = sfi.GetIsolate(); + return ToApiHandle(i::handle(sfi, isolate)); +} + int Function::ScriptId() const { i::JSReceiver self = *Utils::OpenHandle(this); if (!self.IsJSFunction()) return v8::UnboundScript::kNoScriptId; @@ -6089,7 +6168,9 @@ const char* v8::V8::GetVersion() { return i::Version::GetVersion(); } #ifdef V8_VIRTUAL_MEMORY_CAGE PageAllocator* v8::V8::GetVirtualMemoryCagePageAllocator() { - CHECK(i::GetProcessWideVirtualMemoryCage()->is_initialized()); + Utils::ApiCheck(i::GetProcessWideVirtualMemoryCage()->is_initialized(), + "v8::V8::GetVirtualMemoryCagePageAllocator", + "The virtual memory cage must be initialized first."); return i::GetProcessWideVirtualMemoryCage()->page_allocator(); } @@ -6100,6 +6181,17 @@ size_t v8::V8::GetVirtualMemoryCageSizeInBytes() { return i::GetProcessWideVirtualMemoryCage()->size(); } } + +bool v8::V8::IsUsingSecureVirtualMemoryCage() { + Utils::ApiCheck(i::GetProcessWideVirtualMemoryCage()->is_initialized(), + "v8::V8::IsUsingSecureVirtualMemoryCage", + "The virtual memory cage must be initialized first."); + // TODO(saelo) For now, we only treat a fake cage as insecure. Once we use + // caged pointers that assume that the cage has a constant size, we'll also + // treat cages smaller than the default size as insecure because caged + // pointers can then access memory outside of them. + return !i::GetProcessWideVirtualMemoryCage()->is_fake_cage(); +} #endif void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) { @@ -8395,6 +8487,10 @@ Isolate* Isolate::TryGetCurrent() { return reinterpret_cast(isolate); } +bool Isolate::IsCurrent() const { + return reinterpret_cast(this)->IsCurrent(); +} + // static Isolate* Isolate::Allocate() { return reinterpret_cast(i::Isolate::New()); @@ -8445,6 +8541,12 @@ void Isolate::Initialize(Isolate* isolate, reinterpret_cast(params.constraints.stack_limit()); i_isolate->stack_guard()->SetStackLimit(limit); } + + if (params.experimental_attach_to_shared_isolate != nullptr) { + i_isolate->set_shared_isolate(reinterpret_cast( + params.experimental_attach_to_shared_isolate)); + } + // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this. Isolate::Scope isolate_scope(isolate); if (i_isolate->snapshot_blob() == nullptr) { @@ -9117,7 +9219,7 @@ JSEntryStubs Isolate::GetJSEntryStubs() { {i::Builtin::kJSRunMicrotasksEntry, &entry_stubs.js_run_microtasks_entry_stub}}}; for (auto& pair : stubs) { - i::Code js_entry = isolate->heap()->builtin(pair.first); + i::Code js_entry = isolate->builtins()->code(pair.first); pair.second->code.start = reinterpret_cast(js_entry.InstructionStart()); pair.second->code.length_in_bytes = js_entry.InstructionSize(); @@ -10192,7 +10294,7 @@ void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) { UNREACHABLE(); } -void WasmStreaming::Finish() { UNREACHABLE(); } +void WasmStreaming::Finish(bool can_use_compiled_module) { UNREACHABLE(); } void WasmStreaming::Abort(MaybeLocal exception) { UNREACHABLE(); } @@ -10439,9 +10541,10 @@ bool ConvertDouble(double d) { } // namespace internal template <> -bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< - internal::CTypeInfoBuilder::Build().GetId(), int32_t>( - Local src, int32_t* dst, uint32_t max_length) { +bool V8_EXPORT V8_WARN_UNUSED_RESULT +TryToCopyAndConvertArrayToCppBuffer::Build().GetId(), + int32_t>(Local src, int32_t* dst, + uint32_t max_length) { return CopyAndConvertArrayToCppBuffer< CTypeInfo(CTypeInfo::Type::kInt32, CTypeInfo::SequenceType::kIsSequence) .GetId(), @@ -10449,9 +10552,10 @@ bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< } template <> -bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< - internal::CTypeInfoBuilder::Build().GetId(), uint32_t>( - Local src, uint32_t* dst, uint32_t max_length) { +bool V8_EXPORT V8_WARN_UNUSED_RESULT +TryToCopyAndConvertArrayToCppBuffer::Build().GetId(), + uint32_t>(Local src, uint32_t* dst, + uint32_t max_length) { return CopyAndConvertArrayToCppBuffer< CTypeInfo(CTypeInfo::Type::kUint32, CTypeInfo::SequenceType::kIsSequence) .GetId(), @@ -10459,9 +10563,10 @@ bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< } template <> -bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< - internal::CTypeInfoBuilder::Build().GetId(), float>( - Local src, float* dst, uint32_t max_length) { +bool V8_EXPORT V8_WARN_UNUSED_RESULT +TryToCopyAndConvertArrayToCppBuffer::Build().GetId(), + float>(Local src, float* dst, + uint32_t max_length) { return CopyAndConvertArrayToCppBuffer< CTypeInfo(CTypeInfo::Type::kFloat32, CTypeInfo::SequenceType::kIsSequence) .GetId(), @@ -10469,9 +10574,10 @@ bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< } template <> -bool V8_EXPORT V8_WARN_UNUSED_RESULT TryToCopyAndConvertArrayToCppBuffer< - internal::CTypeInfoBuilder::Build().GetId(), double>( - Local src, double* dst, uint32_t max_length) { +bool V8_EXPORT V8_WARN_UNUSED_RESULT +TryToCopyAndConvertArrayToCppBuffer::Build().GetId(), + double>(Local src, double* dst, + uint32_t max_length) { return CopyAndConvertArrayToCppBuffer< CTypeInfo(CTypeInfo::Type::kFloat64, CTypeInfo::SequenceType::kIsSequence) .GetId(), diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index c255dad1e64131..48f549bbb0d513 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -141,7 +141,7 @@ class RegisteredExtension { V(Primitive, Object) \ V(PrimitiveArray, FixedArray) \ V(BigInt, BigInt) \ - V(ScriptOrModule, Script) \ + V(ScriptOrModule, ScriptOrModule) \ V(FixedArray, FixedArray) \ V(ModuleRequest, ModuleRequest) \ IF_WASM(V, WasmMemoryObject, WasmMemoryObject) @@ -254,8 +254,8 @@ class Utils { v8::internal::Handle obj); static inline Local PrimitiveArrayToLocal( v8::internal::Handle obj); - static inline Local ScriptOrModuleToLocal( - v8::internal::Handle obj); + static inline Local ToLocal( + v8::internal::Handle obj); #define DECLARE_OPEN_HANDLE(From, To) \ static inline v8::internal::Handle OpenHandle( \ diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index b6743117fe2b94..c782bbaae7237c 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -398,12 +398,18 @@ void AsmJsParser::ValidateModuleParameters() { FAIL("Expected foreign parameter"); } foreign_name_ = Consume(); + if (stdlib_name_ == foreign_name_) { + FAIL("Duplicate parameter name"); + } if (!Peek(')')) { EXPECT_TOKEN(','); if (!scanner_.IsGlobal()) { FAIL("Expected heap parameter"); } heap_name_ = Consume(); + if (heap_name_ == stdlib_name_ || heap_name_ == foreign_name_) { + FAIL("Duplicate parameter name"); + } } } } diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS index a0077986c61b1c..13586e139c7710 100644 --- a/deps/v8/src/ast/OWNERS +++ b/deps/v8/src/ast/OWNERS @@ -1,5 +1,4 @@ gsathya@chromium.org leszeks@chromium.org marja@chromium.org -neis@chromium.org verwaest@chromium.org diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index bf490a42bb97d6..c179776571c43b 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -679,6 +679,7 @@ void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) { THIS_VARIABLE, derived_constructor ? kNeedsInitialization : kCreatedInitialized, kNotAssigned); + locals_.Add(receiver_); } void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) { @@ -2487,10 +2488,10 @@ void Scope::AllocateVariablesRecursively() { // Allocate variables for this scope. // Parameters must be allocated first, if any. if (scope->is_declaration_scope()) { + scope->AsDeclarationScope()->AllocateReceiver(); if (scope->is_function_scope()) { scope->AsDeclarationScope()->AllocateParameterLocals(); } - scope->AsDeclarationScope()->AllocateReceiver(); } scope->AllocateNonParameterLocalsAndDeclaredGlobals(); diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc index e5f090682f47dd..d33857845a5c34 100644 --- a/deps/v8/src/base/bounded-page-allocator.cc +++ b/deps/v8/src/base/bounded-page-allocator.cc @@ -33,16 +33,25 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size, DCHECK(IsAligned(alignment, region_allocator_.page_size())); DCHECK(IsAligned(alignment, allocate_page_size_)); - Address address; - if (alignment <= allocate_page_size_) { - // TODO(ishell): Consider using randomized version here. - address = region_allocator_.AllocateRegion(size); - } else { - // Currently, this should only be necessary when V8_VIRTUAL_MEMORY_CAGE is - // enabled, in which case a bounded page allocator is used to allocate WASM - // memory buffers, which have a larger alignment. - address = region_allocator_.AllocateAlignedRegion(size, alignment); + Address address = RegionAllocator::kAllocationFailure; + + Address hint_address = reinterpret_cast
(hint); + if (hint_address && IsAligned(hint_address, alignment) && + region_allocator_.contains(hint_address, size)) { + if (region_allocator_.AllocateRegionAt(hint_address, size)) { + address = hint_address; + } + } + + if (address == RegionAllocator::kAllocationFailure) { + if (alignment <= allocate_page_size_) { + // TODO(ishell): Consider using randomized version here. + address = region_allocator_.AllocateRegion(size); + } else { + address = region_allocator_.AllocateAlignedRegion(size, alignment); + } } + if (address == RegionAllocator::kAllocationFailure) { return nullptr; } diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h index a98a2299f8442a..07c5cda3070ed4 100644 --- a/deps/v8/src/base/bounded-page-allocator.h +++ b/deps/v8/src/base/bounded-page-allocator.h @@ -15,8 +15,7 @@ namespace base { // Defines the page initialization mode of a BoundedPageAllocator. enum class PageInitializationMode { // The contents of allocated pages must be zero initialized. This causes any - // committed pages to be decommitted during FreePages and ReleasePages. This - // requires the embedder to provide the PageAllocator::DecommitPages API. + // committed pages to be decommitted during FreePages and ReleasePages. kAllocatedPagesMustBeZeroInitialized, // Allocated pages do not have to be be zero initialized and can contain old // data. This is slightly faster as comitted pages are not decommitted diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc index 9bfc2a55afdf50..ab263c7e7781d9 100644 --- a/deps/v8/src/base/cpu.cc +++ b/deps/v8/src/base/cpu.cc @@ -414,6 +414,7 @@ CPU::CPU() part_(0), icache_line_size_(kUnknownCacheLineSize), dcache_line_size_(kUnknownCacheLineSize), + num_virtual_address_bits_(kUnknownNumVirtualAddressBits), has_fpu_(false), has_cmov_(false), has_sahf_(false), @@ -547,6 +548,12 @@ CPU::CPU() has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0; } + const unsigned virtual_physical_address_bits = 0x80000008; + if (num_ext_ids >= virtual_physical_address_bits) { + __cpuid(cpu_info, virtual_physical_address_bits); + num_virtual_address_bits_ = (cpu_info[0] >> 8) & 0xff; + } + // This logic is replicated from cpu.cc present in chromium.src if (!has_non_stop_time_stamp_counter_ && is_running_in_vm_) { int cpu_info_hv[4] = {}; diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h index 11615aca061824..9fcf90b3bcd845 100644 --- a/deps/v8/src/base/cpu.h +++ b/deps/v8/src/base/cpu.h @@ -105,6 +105,14 @@ class V8_BASE_EXPORT CPU final { return has_non_stop_time_stamp_counter_; } bool is_running_in_vm() const { return is_running_in_vm_; } + bool exposes_num_virtual_address_bits() const { + return num_virtual_address_bits_ != kUnknownNumVirtualAddressBits; + } + int num_virtual_address_bits() const { + DCHECK(exposes_num_virtual_address_bits()); + return num_virtual_address_bits_; + } + static const int kUnknownNumVirtualAddressBits = 0; // arm features bool has_idiva() const { return has_idiva_; } @@ -136,6 +144,7 @@ class V8_BASE_EXPORT CPU final { int part_; int icache_line_size_; int dcache_line_size_; + int num_virtual_address_bits_; bool has_fpu_; bool has_cmov_; bool has_sahf_; diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc index e5a5305d483ee3..b27bfbc8bcf8d2 100644 --- a/deps/v8/src/base/platform/platform-aix.cc +++ b/deps/v8/src/base/platform/platform-aix.cc @@ -129,6 +129,12 @@ void OS::SignalCodeMovingGC() {} void OS::AdjustSchedulingParams() {} +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + return {}; +} + // static Stack::StackSlot Stack::GetStackStart() { // pthread_getthrds_np creates 3 values: diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc index b9da2f1cd592db..5aae01c9c41d81 100644 --- a/deps/v8/src/base/platform/platform-cygwin.cc +++ b/deps/v8/src/base/platform/platform-cygwin.cc @@ -271,5 +271,11 @@ void OS::SignalCodeMovingGC() { void OS::AdjustSchedulingParams() {} +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + return {}; +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc index ac36b0527e7c3b..55d600283f6029 100644 --- a/deps/v8/src/base/platform/platform-freebsd.cc +++ b/deps/v8/src/base/platform/platform-freebsd.cc @@ -97,6 +97,12 @@ void OS::SignalCodeMovingGC() {} void OS::AdjustSchedulingParams() {} +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + return {}; +} + // static Stack::StackSlot Stack::GetStackStart() { pthread_attr_t attr; diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc index 11dba08d79f310..a0fd83e93974fc 100644 --- a/deps/v8/src/base/platform/platform-fuchsia.cc +++ b/deps/v8/src/base/platform/platform-fuchsia.cc @@ -17,7 +17,7 @@ namespace base { namespace { -uint32_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) { +zx_vm_option_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { case OS::MemoryPermission::kNoAccess: case OS::MemoryPermission::kNoAccessWillJitLater: @@ -66,10 +66,24 @@ void* OS::Allocate(void* address, size_t size, size_t alignment, return nullptr; } + zx_vm_option_t options = GetProtectionFromMemoryPermission(access); + + uint64_t vmar_offset = 0; + if (address) { + vmar_offset = reinterpret_cast(address); + options |= ZX_VM_SPECIFIC; + } + zx_vaddr_t reservation; - uint32_t prot = GetProtectionFromMemoryPermission(access); - if (zx::vmar::root_self()->map(prot, 0, vmo, 0, request_size, &reservation) != - ZX_OK) { + zx_status_t status = zx::vmar::root_self()->map(options, vmar_offset, vmo, 0, + request_size, &reservation); + if (status != ZX_OK && address != nullptr) { + // Retry without the hint, if we supplied one. + options &= ~(ZX_VM_SPECIFIC); + status = zx::vmar::root_self()->map(options, 0, vmo, 0, request_size, + &reservation); + } + if (status != ZX_OK) { return nullptr; } @@ -142,10 +156,7 @@ bool OS::DecommitPages(void* address, size_t size) { } // static -bool OS::HasLazyCommits() { - // TODO(scottmg): Port, https://crbug.com/731217. - return false; -} +bool OS::HasLazyCommits() { return true; } std::vector OS::GetSharedLibraryAddresses() { UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217. @@ -177,5 +188,11 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { void OS::AdjustSchedulingParams() {} +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + return {}; +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc index 5b619fb007380d..3ab88060f55e6e 100644 --- a/deps/v8/src/base/platform/platform-linux.cc +++ b/deps/v8/src/base/platform/platform-linux.cc @@ -155,5 +155,56 @@ void* OS::RemapShared(void* old_address, void* new_address, size_t size) { return result; } +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + std::vector result = {}; + // This function assumes that the layout of the file is as follows: + // hex_start_addr-hex_end_addr rwxp [binary_file_name] + // and the lines are arranged in increasing order of address. + // If we encounter an unexpected situation we abort scanning further entries. + FILE* fp = fopen("/proc/self/maps", "r"); + if (fp == nullptr) return {}; + + // Search for the gaps between existing virtual memory (vm) areas. If the gap + // contains enough space for the requested-size range that is within the + // boundary, push the overlapped memory range to the vector. + uintptr_t gap_start = 0, gap_end = 0; + // This loop will terminate once the scanning hits an EOF or reaches the gap + // at the higher address to the end of boundary. + uintptr_t vm_start; + uintptr_t vm_end; + while (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &vm_start, &vm_end) == 2 && + gap_start < boundary_end) { + // Visit the gap at the lower address to this vm. + gap_end = vm_start; + // Skip the gaps at the lower address to the start of boundary. + if (gap_end > boundary_start) { + // The available area is the overlap of the gap and boundary. Push + // the overlapped memory range to the vector if there is enough space. + const uintptr_t overlap_start = + RoundUp(std::max(gap_start, boundary_start), alignment); + const uintptr_t overlap_end = + RoundDown(std::min(gap_end, boundary_end), alignment); + if (overlap_start < overlap_end && + overlap_end - overlap_start >= minimum_size) { + result.push_back({overlap_start, overlap_end}); + } + } + // Continue to visit the next gap. + gap_start = vm_end; + + int c; + // Skip characters until we reach the end of the line or EOF. + do { + c = getc(fp); + } while ((c != EOF) && (c != '\n')); + if (c == EOF) break; + } + + fclose(fp); + return result; +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc index 1b1f4c4ce8a2c3..d1675bdc44d9c8 100644 --- a/deps/v8/src/base/platform/platform-macos.cc +++ b/deps/v8/src/base/platform/platform-macos.cc @@ -93,6 +93,12 @@ void OS::AdjustSchedulingParams() { #endif } +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + return {}; +} + // static Stack::StackSlot Stack::GetStackStart() { return pthread_get_stackaddr_np(pthread_self()); diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc index e4a3cb6f35f0ae..f15800aa878419 100644 --- a/deps/v8/src/base/platform/platform-openbsd.cc +++ b/deps/v8/src/base/platform/platform-openbsd.cc @@ -122,5 +122,11 @@ void OS::SignalCodeMovingGC() { void OS::AdjustSchedulingParams() {} +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + return {}; +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc index f1ba3c6d45e3ca..ccd270cd7fdf8a 100644 --- a/deps/v8/src/base/platform/platform-qnx.cc +++ b/deps/v8/src/base/platform/platform-qnx.cc @@ -148,5 +148,11 @@ void OS::SignalCodeMovingGC() {} void OS::AdjustSchedulingParams() {} +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + return {}; +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc index 90f1617dde1576..8f5dd0c9f12284 100644 --- a/deps/v8/src/base/platform/platform-solaris.cc +++ b/deps/v8/src/base/platform/platform-solaris.cc @@ -65,6 +65,12 @@ void OS::SignalCodeMovingGC() {} void OS::AdjustSchedulingParams() {} +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + return {}; +} + // static Stack::StackSlot Stack::GetStackStart() { pthread_attr_t attr; diff --git a/deps/v8/src/base/platform/platform-starboard.cc b/deps/v8/src/base/platform/platform-starboard.cc index f631d800de8d67..a688c70692a706 100644 --- a/deps/v8/src/base/platform/platform-starboard.cc +++ b/deps/v8/src/base/platform/platform-starboard.cc @@ -474,6 +474,12 @@ void OS::SignalCodeMovingGC() { SB_NOTIMPLEMENTED(); } void OS::AdjustSchedulingParams() {} +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + return {}; +} + bool OS::DiscardSystemPages(void* address, size_t size) { // Starboard API does not support this function yet. return true; diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index 6b5c5df4963e60..919c3ef4df8956 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -1440,6 +1440,44 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) { void OS::AdjustSchedulingParams() {} +std::vector OS::GetFreeMemoryRangesWithin( + OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size, + size_t alignment) { + std::vector result = {}; + + // Search for the virtual memory (vm) ranges within the boundary. + // If a range is free and larger than {minimum_size}, then push it to the + // returned vector. + uintptr_t vm_start = RoundUp(boundary_start, alignment); + uintptr_t vm_end = 0; + MEMORY_BASIC_INFORMATION mi; + // This loop will terminate once the scanning reaches the higher address + // to the end of boundary or the function VirtualQuery fails. + while (vm_start < boundary_end && + VirtualQuery(reinterpret_cast(vm_start), &mi, sizeof(mi)) != + 0) { + vm_start = reinterpret_cast(mi.BaseAddress); + vm_end = vm_start + mi.RegionSize; + if (mi.State == MEM_FREE) { + // The available area is the overlap of the virtual memory range and + // boundary. Push the overlapped memory range to the vector if there is + // enough space. + const uintptr_t overlap_start = + RoundUp(std::max(vm_start, boundary_start), alignment); + const uintptr_t overlap_end = + RoundDown(std::min(vm_end, boundary_end), alignment); + if (overlap_start < overlap_end && + overlap_end - overlap_start >= minimum_size) { + result.push_back({overlap_start, overlap_end}); + } + } + // Continue to visit the next virtual memory range. + vm_start = vm_end; + } + + return result; +} + // static Stack::StackSlot Stack::GetStackStart() { #if defined(V8_TARGET_ARCH_X64) diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index 2e7ad32974feb4..bc1edc9c03e1ec 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -274,6 +274,19 @@ class V8_BASE_EXPORT OS { static void AdjustSchedulingParams(); + using Address = uintptr_t; + + struct MemoryRange { + uintptr_t start = 0; + uintptr_t end = 0; + }; + + // Find gaps between existing virtual memory ranges that have enough space + // to place a region with minimum_size within (boundary_start, boundary_end) + static std::vector GetFreeMemoryRangesWithin( + Address boundary_start, Address boundary_end, size_t minimum_size, + size_t alignment); + [[noreturn]] static void ExitProcess(int exit_code); private: diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc index 9979f33fcecdd1..af214f0a6da308 100644 --- a/deps/v8/src/base/platform/time.cc +++ b/deps/v8/src/base/platform/time.cc @@ -24,6 +24,8 @@ // This has to come after windows.h. #include // For timeGetTime(). +#include + #include "src/base/lazy-instance.h" #include "src/base/win32-headers.h" #endif @@ -619,15 +621,10 @@ using TimeTicksNowFunction = decltype(&TimeTicks::Now); TimeTicksNowFunction g_time_ticks_now_function = &InitialTimeTicksNowFunction; int64_t g_qpc_ticks_per_second = 0; -// As of January 2015, use of is forbidden in Chromium code. This is -// what std::atomic_thread_fence does on Windows on all Intel architectures when -// the memory_order argument is anything but std::memory_order_seq_cst: -#define ATOMIC_THREAD_FENCE(memory_order) _ReadWriteBarrier(); - TimeDelta QPCValueToTimeDelta(LONGLONG qpc_value) { // Ensure that the assignment to |g_qpc_ticks_per_second|, made in // InitializeNowFunctionPointer(), has happened by this point. - ATOMIC_THREAD_FENCE(memory_order_acquire); + std::atomic_thread_fence(std::memory_order_acquire); DCHECK_GT(g_qpc_ticks_per_second, 0); @@ -682,7 +679,7 @@ void InitializeTimeTicksNowFunctionPointer() { // assignment to |g_qpc_ticks_per_second| happens before the function pointers // are changed. g_qpc_ticks_per_second = ticks_per_sec.QuadPart; - ATOMIC_THREAD_FENCE(memory_order_release); + std::atomic_thread_fence(std::memory_order_release); g_time_ticks_now_function = now_function; } @@ -691,8 +688,6 @@ TimeTicks InitialTimeTicksNowFunction() { return g_time_ticks_now_function(); } -#undef ATOMIC_THREAD_FENCE - } // namespace // static diff --git a/deps/v8/src/base/sanitizer/lsan-page-allocator.h b/deps/v8/src/base/sanitizer/lsan-page-allocator.h index 4c8a1f04a0dc93..6a3b28c8ea7dd5 100644 --- a/deps/v8/src/base/sanitizer/lsan-page-allocator.h +++ b/deps/v8/src/base/sanitizer/lsan-page-allocator.h @@ -49,6 +49,10 @@ class V8_BASE_EXPORT LsanPageAllocator : public v8::PageAllocator { return page_allocator_->SetPermissions(address, size, access); } + bool DecommitPages(void* address, size_t size) override { + return page_allocator_->DecommitPages(address, size); + } + private: v8::PageAllocator* const page_allocator_; const size_t allocate_page_size_; diff --git a/deps/v8/src/base/utils/random-number-generator.cc b/deps/v8/src/base/utils/random-number-generator.cc index bae2da1bf9e9c8..f6f9dcfef2a43a 100644 --- a/deps/v8/src/base/utils/random-number-generator.cc +++ b/deps/v8/src/base/utils/random-number-generator.cc @@ -54,6 +54,7 @@ RandomNumberGenerator::RandomNumberGenerator() { DCHECK_EQ(0, result); result = rand_s(&second_half); DCHECK_EQ(0, result); + USE(result); SetSeed((static_cast(first_half) << 32) + second_half); #elif V8_OS_MACOSX || V8_OS_FREEBSD || V8_OS_OPENBSD // Despite its prefix suggests it is not RC4 algorithm anymore. diff --git a/deps/v8/src/baseline/baseline-assembler-inl.h b/deps/v8/src/baseline/baseline-assembler-inl.h index 583db7e6798bf7..5a81dd55b0d2d3 100644 --- a/deps/v8/src/baseline/baseline-assembler-inl.h +++ b/deps/v8/src/baseline/baseline-assembler-inl.h @@ -28,6 +28,10 @@ #include "src/baseline/ia32/baseline-assembler-ia32-inl.h" #elif V8_TARGET_ARCH_ARM #include "src/baseline/arm/baseline-assembler-arm-inl.h" +#elif V8_TARGET_ARCH_PPC64 +#include "src/baseline/ppc/baseline-assembler-ppc-inl.h" +#elif V8_TARGET_ARCH_S390X +#include "src/baseline/s390/baseline-assembler-s390-inl.h" #elif V8_TARGET_ARCH_RISCV64 #include "src/baseline/riscv64/baseline-assembler-riscv64-inl.h" #elif V8_TARGET_ARCH_MIPS64 diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc index 249702bd623e80..a34764744bc16e 100644 --- a/deps/v8/src/baseline/baseline-batch-compiler.cc +++ b/deps/v8/src/baseline/baseline-batch-compiler.cc @@ -15,19 +15,222 @@ #include "src/handles/global-handles-inl.h" #include "src/heap/factory-inl.h" #include "src/heap/heap-inl.h" +#include "src/heap/local-heap-inl.h" +#include "src/heap/parked-scope.h" #include "src/objects/fixed-array-inl.h" #include "src/objects/js-function-inl.h" +#include "src/utils/locked-queue-inl.h" namespace v8 { namespace internal { namespace baseline { +class BaselineCompilerTask { + public: + BaselineCompilerTask(Isolate* isolate, PersistentHandles* handles, + SharedFunctionInfo sfi) + : shared_function_info_(handles->NewHandle(sfi)), + bytecode_(handles->NewHandle(sfi.GetBytecodeArray(isolate))) { + DCHECK(sfi.is_compiled()); + } + + BaselineCompilerTask(const BaselineCompilerTask&) V8_NOEXCEPT = delete; + BaselineCompilerTask(BaselineCompilerTask&&) V8_NOEXCEPT = default; + + // Executed in the background thread. + void Compile(LocalIsolate* local_isolate) { + BaselineCompiler compiler(local_isolate, shared_function_info_, bytecode_); + compiler.GenerateCode(); + maybe_code_ = local_isolate->heap()->NewPersistentMaybeHandle( + compiler.Build(local_isolate)); + Handle code; + if (maybe_code_.ToHandle(&code)) { + local_isolate->heap()->RegisterCodeObject(code); + } + } + + // Executed in the main thread. + void Install(Isolate* isolate) { + Handle code; + if (!maybe_code_.ToHandle(&code)) return; + if (FLAG_print_code) { + code->Print(); + } + shared_function_info_->set_baseline_code(*code, kReleaseStore); + if (V8_LIKELY(FLAG_use_osr)) { + // Arm back edges for OSR + shared_function_info_->GetBytecodeArray(isolate) + .set_osr_loop_nesting_level(AbstractCode::kMaxLoopNestingMarker); + } + if (FLAG_trace_baseline_concurrent_compilation) { + CodeTracer::Scope scope(isolate->GetCodeTracer()); + std::stringstream ss; + ss << "[Concurrent Sparkplug Off Thread] Function "; + shared_function_info_->ShortPrint(ss); + ss << " installed\n"; + OFStream os(scope.file()); + os << ss.str(); + } + } + + private: + Handle shared_function_info_; + Handle bytecode_; + MaybeHandle maybe_code_; +}; + +class BaselineBatchCompilerJob { + public: + BaselineBatchCompilerJob(Isolate* isolate, Handle task_queue, + int batch_size) + : isolate_for_local_isolate_(isolate) { + handles_ = isolate->NewPersistentHandles(); + tasks_.reserve(batch_size); + for (int i = 0; i < batch_size; i++) { + MaybeObject maybe_sfi = task_queue->Get(i); + // TODO(victorgomes): Do I need to clear the value? + task_queue->Set(i, HeapObjectReference::ClearedValue(isolate)); + HeapObject obj; + // Skip functions where weak reference is no longer valid. + if (!maybe_sfi.GetHeapObjectIfWeak(&obj)) continue; + // Skip functions where the bytecode has been flushed. + SharedFunctionInfo shared = SharedFunctionInfo::cast(obj); + if (ShouldSkipFunction(shared)) continue; + tasks_.emplace_back(isolate, handles_.get(), shared); + } + if (FLAG_trace_baseline_concurrent_compilation) { + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintF(scope.file(), "[Concurrent Sparkplug] compiling %zu functions\n", + tasks_.size()); + } + } + + bool ShouldSkipFunction(SharedFunctionInfo shared) { + return !shared.is_compiled() || shared.HasBaselineCode() || + !CanCompileWithBaseline(isolate_for_local_isolate_, shared); + } + + // Executed in the background thread. + void Compile() { +#ifdef V8_RUNTIME_CALL_STATS + WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope( + isolate_for_local_isolate_->counters() + ->worker_thread_runtime_call_stats()); + LocalIsolate local_isolate(isolate_for_local_isolate_, + ThreadKind::kBackground, + runtime_call_stats_scope.Get()); +#else + LocalIsolate local_isolate(isolate_for_local_isolate_, + ThreadKind::kBackground); +#endif + local_isolate.heap()->AttachPersistentHandles(std::move(handles_)); + UnparkedScope unparked_scope(&local_isolate); + LocalHandleScope handle_scope(&local_isolate); + + for (auto& task : tasks_) { + task.Compile(&local_isolate); + } + + // Get the handle back since we'd need them to install the code later. + handles_ = local_isolate.heap()->DetachPersistentHandles(); + } + + // Executed in the main thread. + void Install(Isolate* isolate) { + for (auto& task : tasks_) { + task.Install(isolate); + } + } + + private: + Isolate* isolate_for_local_isolate_; + std::vector tasks_; + std::unique_ptr handles_; +}; + +class ConcurrentBaselineCompiler { + public: + class JobDispatcher : public v8::JobTask { + public: + JobDispatcher( + Isolate* isolate, + LockedQueue>* incoming_queue, + LockedQueue>* outcoming_queue) + : isolate_(isolate), + incoming_queue_(incoming_queue), + outgoing_queue_(outcoming_queue) {} + + void Run(JobDelegate* delegate) override { + while (!incoming_queue_->IsEmpty() && !delegate->ShouldYield()) { + std::unique_ptr job; + incoming_queue_->Dequeue(&job); + job->Compile(); + outgoing_queue_->Enqueue(std::move(job)); + } + isolate_->stack_guard()->RequestInstallBaselineCode(); + } + + size_t GetMaxConcurrency(size_t worker_count) const override { + return incoming_queue_->size(); + } + + private: + Isolate* isolate_; + LockedQueue>* incoming_queue_; + LockedQueue>* outgoing_queue_; + }; + + explicit ConcurrentBaselineCompiler(Isolate* isolate) : isolate_(isolate) { + if (FLAG_concurrent_sparkplug) { + job_handle_ = V8::GetCurrentPlatform()->PostJob( + TaskPriority::kUserVisible, + std::make_unique(isolate_, &incoming_queue_, + &outgoing_queue_)); + } + } + + ~ConcurrentBaselineCompiler() { + if (job_handle_ && job_handle_->IsValid()) { + // Wait for the job handle to complete, so that we know the queue + // pointers are safe. + job_handle_->Cancel(); + } + } + + void CompileBatch(Handle task_queue, int batch_size) { + DCHECK(FLAG_concurrent_sparkplug); + RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileBaseline); + incoming_queue_.Enqueue(std::make_unique( + isolate_, task_queue, batch_size)); + job_handle_->NotifyConcurrencyIncrease(); + } + + void InstallBatch() { + while (!outgoing_queue_.IsEmpty()) { + std::unique_ptr job; + outgoing_queue_.Dequeue(&job); + job->Install(isolate_); + } + } + + private: + Isolate* isolate_; + std::unique_ptr job_handle_ = nullptr; + LockedQueue> incoming_queue_; + LockedQueue> outgoing_queue_; +}; + BaselineBatchCompiler::BaselineBatchCompiler(Isolate* isolate) : isolate_(isolate), compilation_queue_(Handle::null()), last_index_(0), estimated_instruction_size_(0), - enabled_(true) {} + enabled_(true) { + if (FLAG_concurrent_sparkplug) { + concurrent_compiler_ = + std::make_unique(isolate_); + } +} BaselineBatchCompiler::~BaselineBatchCompiler() { if (!compilation_queue_.is_null()) { @@ -36,19 +239,20 @@ BaselineBatchCompiler::~BaselineBatchCompiler() { } } -bool BaselineBatchCompiler::EnqueueFunction(Handle function) { +void BaselineBatchCompiler::EnqueueFunction(Handle function) { Handle shared(function->shared(), isolate_); // Early return if the function is compiled with baseline already or it is not // suitable for baseline compilation. - if (shared->HasBaselineCode()) return true; - if (!CanCompileWithBaseline(isolate_, *shared)) return false; + if (shared->HasBaselineCode()) return; + if (!CanCompileWithBaseline(isolate_, *shared)) return; // Immediately compile the function if batch compilation is disabled. if (!is_enabled()) { IsCompiledScope is_compiled_scope( function->shared().is_compiled_scope(isolate_)); - return Compiler::CompileBaseline( - isolate_, function, Compiler::CLEAR_EXCEPTION, &is_compiled_scope); + Compiler::CompileBaseline(isolate_, function, Compiler::CLEAR_EXCEPTION, + &is_compiled_scope); + return; } int estimated_size; @@ -76,12 +280,26 @@ bool BaselineBatchCompiler::EnqueueFunction(Handle function) { "functions\n", (last_index_ + 1)); } - CompileBatch(function); - return true; + if (FLAG_concurrent_sparkplug) { + Enqueue(shared); + concurrent_compiler_->CompileBatch(compilation_queue_, last_index_); + ClearBatch(); + } else { + CompileBatch(function); + } + } else { + Enqueue(shared); } +} + +void BaselineBatchCompiler::Enqueue(Handle shared) { EnsureQueueCapacity(); compilation_queue_->Set(last_index_++, HeapObjectReference::Weak(*shared)); - return false; +} + +void BaselineBatchCompiler::InstallBatch() { + DCHECK(FLAG_concurrent_sparkplug); + concurrent_compiler_->InstallBatch(); } void BaselineBatchCompiler::EnsureQueueCapacity() { @@ -150,6 +368,8 @@ namespace v8 { namespace internal { namespace baseline { +class ConcurrentBaselineCompiler {}; + BaselineBatchCompiler::BaselineBatchCompiler(Isolate* isolate) : isolate_(isolate), compilation_queue_(Handle::null()), @@ -164,6 +384,8 @@ BaselineBatchCompiler::~BaselineBatchCompiler() { } } +void BaselineBatchCompiler::InstallBatch() { UNREACHABLE(); } + } // namespace baseline } // namespace internal } // namespace v8 diff --git a/deps/v8/src/baseline/baseline-batch-compiler.h b/deps/v8/src/baseline/baseline-batch-compiler.h index 3643064f10be0e..a85a42f9a0c752 100644 --- a/deps/v8/src/baseline/baseline-batch-compiler.h +++ b/deps/v8/src/baseline/baseline-batch-compiler.h @@ -5,6 +5,8 @@ #ifndef V8_BASELINE_BASELINE_BATCH_COMPILER_H_ #define V8_BASELINE_BASELINE_BATCH_COMPILER_H_ +#include + #include "src/handles/global-handles.h" #include "src/handles/handles.h" @@ -12,6 +14,9 @@ namespace v8 { namespace internal { namespace baseline { +class BaselineCompiler; +class ConcurrentBaselineCompiler; + class BaselineBatchCompiler { public: static const int kInitialQueueSize = 32; @@ -19,23 +24,26 @@ class BaselineBatchCompiler { explicit BaselineBatchCompiler(Isolate* isolate); ~BaselineBatchCompiler(); // Enqueues SharedFunctionInfo of |function| for compilation. - // Returns true if the function is compiled (either it was compiled already, - // or the current batch including the function was just compiled). - bool EnqueueFunction(Handle function); + void EnqueueFunction(Handle function); void set_enabled(bool enabled) { enabled_ = enabled; } bool is_enabled() { return enabled_; } + void InstallBatch(); + private: // Ensure there is enough space in the compilation queue to enqueue another // function, growing the queue if necessary. void EnsureQueueCapacity(); + // Enqueues SharedFunctionInfo. + void Enqueue(Handle shared); + // Returns true if the current batch exceeds the threshold and should be // compiled. bool ShouldCompileBatch() const; - // Compiles the current batch and returns the number of functions compiled. + // Compiles the current batch. void CompileBatch(Handle function); // Resets the current batch. @@ -60,6 +68,9 @@ class BaselineBatchCompiler { // Flag indicating whether batch compilation is enabled. // Batch compilation can be dynamically disabled e.g. when creating snapshots. bool enabled_; + + // Handle to the background compilation jobs. + std::unique_ptr concurrent_compiler_; }; } // namespace baseline diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index 63d684e733e4d4..071e46268efb8c 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -24,6 +24,7 @@ #include "src/codegen/macro-assembler-inl.h" #include "src/common/globals.h" #include "src/execution/frame-constants.h" +#include "src/heap/local-factory-inl.h" #include "src/interpreter/bytecode-array-iterator.h" #include "src/interpreter/bytecode-flags.h" #include "src/logging/runtime-call-stats-scope.h" @@ -42,6 +43,10 @@ #include "src/baseline/ia32/baseline-compiler-ia32-inl.h" #elif V8_TARGET_ARCH_ARM #include "src/baseline/arm/baseline-compiler-arm-inl.h" +#elif V8_TARGET_ARCH_PPC64 +#include "src/baseline/ppc/baseline-compiler-ppc-inl.h" +#elif V8_TARGET_ARCH_S390X +#include "src/baseline/s390/baseline-compiler-s390-inl.h" #elif V8_TARGET_ARCH_RISCV64 #include "src/baseline/riscv64/baseline-compiler-riscv64-inl.h" #elif V8_TARGET_ARCH_MIPS64 @@ -243,43 +248,33 @@ namespace { // than pre-allocating a large enough buffer. #ifdef V8_TARGET_ARCH_IA32 const int kAverageBytecodeToInstructionRatio = 5; -const int kMinimumEstimatedInstructionSize = 200; #else const int kAverageBytecodeToInstructionRatio = 7; -const int kMinimumEstimatedInstructionSize = 300; #endif std::unique_ptr AllocateBuffer( - Isolate* isolate, Handle bytecodes, - BaselineCompiler::CodeLocation code_location) { + Handle bytecodes) { int estimated_size; { DisallowHeapAllocation no_gc; estimated_size = BaselineCompiler::EstimateInstructionSize(*bytecodes); } - Heap* heap = isolate->heap(); - // TODO(victorgomes): When compiling on heap, we allocate whatever is left - // over on the page with a minimum of the estimated_size. - if (code_location == BaselineCompiler::kOnHeap && - Code::SizeFor(estimated_size) < - heap->MaxRegularHeapObjectSize(AllocationType::kCode)) { - return NewOnHeapAssemblerBuffer(isolate, estimated_size); - } return NewAssemblerBuffer(RoundUp(estimated_size, 4 * KB)); } } // namespace BaselineCompiler::BaselineCompiler( - Isolate* isolate, Handle shared_function_info, - Handle bytecode, CodeLocation code_location) - : local_isolate_(isolate->AsLocalIsolate()), - stats_(isolate->counters()->runtime_call_stats()), + LocalIsolate* local_isolate, + Handle shared_function_info, + Handle bytecode) + : local_isolate_(local_isolate), + stats_(local_isolate->runtime_call_stats()), shared_function_info_(shared_function_info), bytecode_(bytecode), - masm_(isolate, CodeObjectRequired::kNo, - AllocateBuffer(isolate, bytecode, code_location)), + masm_(local_isolate->GetMainThreadIsolateUnsafe(), + CodeObjectRequired::kNo, AllocateBuffer(bytecode)), basm_(&masm_), iterator_(bytecode_), - zone_(isolate->allocator(), ZONE_NAME), + zone_(local_isolate->allocator(), ZONE_NAME), labels_(zone_.NewArray(bytecode_->length())) { MemsetPointer(labels_, nullptr, bytecode_->length()); @@ -293,9 +288,15 @@ BaselineCompiler::BaselineCompiler( #define __ basm_. +#define RCS_BASELINE_SCOPE(rcs) \ + RCS_SCOPE(stats_, \ + local_isolate_->is_main_thread() \ + ? RuntimeCallCounterId::kCompileBaseline##rcs \ + : RuntimeCallCounterId::kCompileBackgroundBaseline##rcs) + void BaselineCompiler::GenerateCode() { { - RCS_SCOPE(stats_, RuntimeCallCounterId::kCompileBaselinePreVisit); + RCS_BASELINE_SCOPE(PreVisit); for (; !iterator_.done(); iterator_.Advance()) { PreVisitSingleBytecode(); } @@ -307,7 +308,7 @@ void BaselineCompiler::GenerateCode() { __ CodeEntry(); { - RCS_SCOPE(stats_, RuntimeCallCounterId::kCompileBaselineVisit); + RCS_BASELINE_SCOPE(Visit); Prologue(); AddPosition(); for (; !iterator_.done(); iterator_.Advance()) { @@ -317,18 +318,19 @@ void BaselineCompiler::GenerateCode() { } } -MaybeHandle BaselineCompiler::Build(Isolate* isolate) { +MaybeHandle BaselineCompiler::Build(LocalIsolate* local_isolate) { CodeDesc desc; - __ GetCode(isolate, &desc); + __ GetCode(local_isolate->GetMainThreadIsolateUnsafe(), &desc); + // Allocate the bytecode offset table. Handle bytecode_offset_table = - bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate); + bytecode_offset_table_builder_.ToBytecodeOffsetTable(local_isolate); - Factory::CodeBuilder code_builder(isolate, desc, CodeKind::BASELINE); + Factory::CodeBuilder code_builder(local_isolate, desc, CodeKind::BASELINE); code_builder.set_bytecode_offset_table(bytecode_offset_table); if (shared_function_info_->HasInterpreterData()) { code_builder.set_interpreter_data( - handle(shared_function_info_->interpreter_data(), isolate)); + handle(shared_function_info_->interpreter_data(), local_isolate)); } else { code_builder.set_interpreter_data(bytecode_); } @@ -336,8 +338,7 @@ MaybeHandle BaselineCompiler::Build(Isolate* isolate) { } int BaselineCompiler::EstimateInstructionSize(BytecodeArray bytecode) { - return bytecode.length() * kAverageBytecodeToInstructionRatio + - kMinimumEstimatedInstructionSize; + return bytecode.length() * kAverageBytecodeToInstructionRatio; } interpreter::Register BaselineCompiler::RegisterOperand(int operand_index) { @@ -929,10 +930,11 @@ void BaselineCompiler::VisitStaNamedProperty() { } void BaselineCompiler::VisitStaNamedOwnProperty() { - // TODO(v8:11429,ishell): Currently we use StoreOwnIC only for storing - // properties that already exist in the boilerplate therefore we can use - // StoreIC. - VisitStaNamedProperty(); + CallBuiltin( + RegisterOperand(0), // object + Constant(1), // name + kInterpreterAccumulatorRegister, // value + IndexAsTagged(2)); // slot } void BaselineCompiler::VisitStaKeyedProperty() { @@ -943,6 +945,14 @@ void BaselineCompiler::VisitStaKeyedProperty() { IndexAsTagged(2)); // slot } +void BaselineCompiler::VisitStaKeyedPropertyAsDefine() { + CallBuiltin( + RegisterOperand(0), // object + RegisterOperand(1), // key + kInterpreterAccumulatorRegister, // value + IndexAsTagged(2)); // slot +} + void BaselineCompiler::VisitStaInArrayLiteral() { CallBuiltin( RegisterOperand(0), // object diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h index 341e7c0822f6fb..22073922535f02 100644 --- a/deps/v8/src/baseline/baseline-compiler.h +++ b/deps/v8/src/baseline/baseline-compiler.h @@ -14,6 +14,7 @@ #include "src/base/threaded-list.h" #include "src/base/vlq.h" #include "src/baseline/baseline-assembler.h" +#include "src/execution/local-isolate.h" #include "src/handles/handles.h" #include "src/interpreter/bytecode-array-iterator.h" #include "src/interpreter/bytecode-register.h" @@ -51,14 +52,12 @@ class BytecodeOffsetTableBuilder { class BaselineCompiler { public: - enum CodeLocation { kOffHeap, kOnHeap }; - explicit BaselineCompiler( - Isolate* isolate, Handle shared_function_info, - Handle bytecode, - CodeLocation code_location = CodeLocation::kOffHeap); + explicit BaselineCompiler(LocalIsolate* local_isolate, + Handle shared_function_info, + Handle bytecode); void GenerateCode(); - MaybeHandle Build(Isolate* isolate); + MaybeHandle Build(LocalIsolate* local_isolate); static int EstimateInstructionSize(BytecodeArray bytecode); private: diff --git a/deps/v8/src/baseline/baseline.cc b/deps/v8/src/baseline/baseline.cc index 764d2db645a2c0..0a6ada029bab1b 100644 --- a/deps/v8/src/baseline/baseline.cc +++ b/deps/v8/src/baseline/baseline.cc @@ -56,34 +56,14 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) { return true; } -namespace { -MaybeHandle GenerateOnHeapCode(Isolate* isolate, - Handle shared, - Handle bytecode) { - CodePageCollectionMemoryModificationScope code_allocation(isolate->heap()); - baseline::BaselineCompiler compiler(isolate, shared, bytecode, - baseline::BaselineCompiler::kOnHeap); - compiler.GenerateCode(); - return compiler.Build(isolate); -} - -MaybeHandle GenerateOffHeapCode(Isolate* isolate, - Handle shared, - Handle bytecode) { - baseline::BaselineCompiler compiler(isolate, shared, bytecode); - compiler.GenerateCode(); - return compiler.Build(isolate); -} - -} // namespace - MaybeHandle GenerateBaselineCode(Isolate* isolate, Handle shared) { RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline); Handle bytecode(shared->GetBytecodeArray(isolate), isolate); - MaybeHandle code = FLAG_sparkplug_on_heap - ? GenerateOnHeapCode(isolate, shared, bytecode) - : GenerateOffHeapCode(isolate, shared, bytecode); + LocalIsolate* local_isolate = isolate->main_thread_local_isolate(); + baseline::BaselineCompiler compiler(local_isolate, shared, bytecode); + compiler.GenerateCode(); + MaybeHandle code = compiler.Build(local_isolate); if (FLAG_print_code && !code.is_null()) { code.ToHandleChecked()->Print(); } diff --git a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h index 059d932ef9a3f8..33f792fce83e35 100644 --- a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h +++ b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h @@ -483,8 +483,11 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->Add_d(params_size, params_size, 1); // Include the receiver. - __ masm()->Alsl_d(sp, params_size, sp, kPointerSizeLog2); + __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); + __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h index 989d5c4ae5cba3..996b4ba831f93a 100644 --- a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h +++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h @@ -499,8 +499,11 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->Addu(params_size, params_size, 1); // Include the receiver. - __ masm()->Lsa(sp, sp, params_size, kPointerSizeLog2); + __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); + __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h index 561e45249ed847..18e0c3445dd312 100644 --- a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h +++ b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h @@ -497,8 +497,11 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->Daddu(params_size, params_size, 1); // Include the receiver. - __ masm()->Dlsa(sp, sp, params_size, kPointerSizeLog2); + __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); + __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h new file mode 100644 index 00000000000000..110f7b74659c3f --- /dev/null +++ b/deps/v8/src/baseline/ppc/baseline-assembler-ppc-inl.h @@ -0,0 +1,374 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_PPC_BASELINE_ASSEMBLER_PPC_INL_H_ +#define V8_BASELINE_PPC_BASELINE_ASSEMBLER_PPC_INL_H_ + +#include "src/baseline/baseline-assembler.h" +#include "src/codegen/ppc/assembler-ppc-inl.h" +#include "src/codegen/interface-descriptors.h" + +namespace v8 { +namespace internal { +namespace baseline { + +class BaselineAssembler::ScratchRegisterScope { + public: + explicit ScratchRegisterScope(BaselineAssembler* assembler) + : assembler_(assembler), + prev_scope_(assembler->scratch_register_scope_), + wrapped_scope_(assembler->masm()) { + if (!assembler_->scratch_register_scope_) { + // If we haven't opened a scratch scope yet, for the first one add a + // couple of extra registers. + DCHECK(wrapped_scope_.CanAcquire()); + wrapped_scope_.Include(r8, r9); + wrapped_scope_.Include(kInterpreterBytecodeOffsetRegister); + } + assembler_->scratch_register_scope_ = this; + } + ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; } + + Register AcquireScratch() { return wrapped_scope_.Acquire(); } + + private: + BaselineAssembler* assembler_; + ScratchRegisterScope* prev_scope_; + UseScratchRegisterScope wrapped_scope_; +}; + +// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler. +enum class Condition : uint32_t { + kEqual = static_cast(eq), + kNotEqual = static_cast(ne), + + kLessThan = static_cast(lt), + kGreaterThan = static_cast(gt), + kLessThanEqual = static_cast(le), + kGreaterThanEqual = static_cast(ge), + + kUnsignedLessThan = static_cast(lo), + kUnsignedGreaterThan = static_cast(hi), + kUnsignedLessThanEqual = static_cast(ls), + kUnsignedGreaterThanEqual = static_cast(hs), + + kOverflow = static_cast(vs), + kNoOverflow = static_cast(vc), + + kZero = static_cast(eq), + kNotZero = static_cast(ne), +}; + +inline internal::Condition AsMasmCondition(Condition cond) { + UNIMPLEMENTED(); + return static_cast(cond); +} + +namespace detail { + +#ifdef DEBUG +inline bool Clobbers(Register target, MemOperand op) { + UNIMPLEMENTED(); + return false; +} +#endif + +} // namespace detail + +#define __ masm_-> + +MemOperand BaselineAssembler::RegisterFrameOperand( + interpreter::Register interpreter_register) { + UNIMPLEMENTED(); + return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); +} +MemOperand BaselineAssembler::FeedbackVectorOperand() { + UNIMPLEMENTED(); + return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); +} + +void BaselineAssembler::Bind(Label* label) { __ bind(label); } +void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); } + +void BaselineAssembler::JumpTarget() { + // NOP on arm. + UNIMPLEMENTED(); +} + +void BaselineAssembler::Jump(Label* target, Label::Distance distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfSmi(Register value, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::CallBuiltin(Builtin builtin) { UNIMPLEMENTED(); } + +void BaselineAssembler::TailCallBuiltin(Builtin builtin) { + ASM_CODE_COMMENT_STRING(masm_, + __ CommentForOffHeapTrampoline("tail call", builtin)); + UNIMPLEMENTED(); +} + +void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfObjectType(Condition cc, Register object, + InstanceType instance_type, + Register map, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, + InstanceType instance_type, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfPointer(Condition cc, Register value, + MemOperand operand, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfTagged(Condition cc, Register value, + MemOperand operand, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, + Register value, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::Move(interpreter::Register output, Register source) { + UNIMPLEMENTED(); +} +void BaselineAssembler::Move(Register output, TaggedIndex value) { + UNIMPLEMENTED(); +} +void BaselineAssembler::Move(MemOperand output, Register source) { + UNIMPLEMENTED(); +} +void BaselineAssembler::Move(Register output, ExternalReference reference) { + UNIMPLEMENTED(); +} +void BaselineAssembler::Move(Register output, Handle value) { + UNIMPLEMENTED(); +} +void BaselineAssembler::Move(Register output, int32_t value) { + UNIMPLEMENTED(); +} +void BaselineAssembler::MoveMaybeSmi(Register output, Register source) { + UNIMPLEMENTED(); +} +void BaselineAssembler::MoveSmi(Register output, Register source) { + UNIMPLEMENTED(); +} + +namespace detail { + +template +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Arg arg) { + UNIMPLEMENTED(); + return reg; +} +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Register reg) { + return reg; +} + +template +struct PushAllHelper; +template <> +struct PushAllHelper<> { + static int Push(BaselineAssembler* basm) { return 0; } + static int PushReverse(BaselineAssembler* basm) { return 0; } +}; +// TODO(ishell): try to pack sequence of pushes into one instruction by +// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4) +// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4). +template +struct PushAllHelper { + static int Push(BaselineAssembler* basm, Arg arg) { + BaselineAssembler::ScratchRegisterScope scope(basm); + basm->masm()->Push(ToRegister(basm, &scope, arg)); + return 1; + } + static int PushReverse(BaselineAssembler* basm, Arg arg) { + return Push(basm, arg); + } +}; +// TODO(ishell): try to pack sequence of pushes into one instruction by +// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4) +// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4). +template +struct PushAllHelper { + static int Push(BaselineAssembler* basm, Arg arg, Args... args) { + PushAllHelper::Push(basm, arg); + return 1 + PushAllHelper::Push(basm, args...); + } + static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) { + int nargs = PushAllHelper::PushReverse(basm, args...); + PushAllHelper::Push(basm, arg); + return nargs + 1; + } +}; +template <> +struct PushAllHelper { + static int Push(BaselineAssembler* basm, interpreter::RegisterList list) { + for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) { + PushAllHelper::Push(basm, list[reg_index]); + } + return list.register_count(); + } + static int PushReverse(BaselineAssembler* basm, + interpreter::RegisterList list) { + for (int reg_index = list.register_count() - 1; reg_index >= 0; + --reg_index) { + PushAllHelper::Push(basm, list[reg_index]); + } + return list.register_count(); + } +}; + +template +struct PopAllHelper; +template <> +struct PopAllHelper<> { + static void Pop(BaselineAssembler* basm) {} +}; +// TODO(ishell): try to pack sequence of pops into one instruction by +// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4) +// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4). +template <> +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg) { + basm->masm()->Pop(reg); + } +}; +template +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg, T... tail) { + PopAllHelper::Pop(basm, reg); + PopAllHelper::Pop(basm, tail...); + } +}; + +} // namespace detail + +template +int BaselineAssembler::Push(T... vals) { + return detail::PushAllHelper::Push(this, vals...); +} + +template +void BaselineAssembler::PushReverse(T... vals) { + detail::PushAllHelper::PushReverse(this, vals...); +} + +template +void BaselineAssembler::Pop(T... registers) { + detail::PopAllHelper::Pop(this, registers...); +} + +void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, + int offset) { + UNIMPLEMENTED(); +} +void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, + int offset) { + UNIMPLEMENTED(); +} +void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, + int offset) { + UNIMPLEMENTED(); +} +void BaselineAssembler::LoadByteField(Register output, Register source, + int offset) { + UNIMPLEMENTED(); +} +void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, + Smi value) { + UNIMPLEMENTED(); +} +void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, + int offset, + Register value) { + UNIMPLEMENTED(); +} +void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, + int offset, + Register value) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( + int32_t weight, Label* skip_interrupt_label) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( + Register weight, Label* skip_interrupt_label) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { UNIMPLEMENTED(); } + +void BaselineAssembler::Switch(Register reg, int case_value_base, + Label** labels, int num_labels) { + UNIMPLEMENTED(); +} + +#undef __ + +#define __ basm. + +void BaselineAssembler::EmitReturn(MacroAssembler* masm) { UNIMPLEMENTED(); } + +#undef __ + +inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( + Register reg) { + UNIMPLEMENTED(); +} + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_PPC_BASELINE_ASSEMBLER_PPC_INL_H_ diff --git a/deps/v8/src/baseline/ppc/baseline-compiler-ppc-inl.h b/deps/v8/src/baseline/ppc/baseline-compiler-ppc-inl.h new file mode 100644 index 00000000000000..3d395bce7d4223 --- /dev/null +++ b/deps/v8/src/baseline/ppc/baseline-compiler-ppc-inl.h @@ -0,0 +1,27 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_PPC_BASELINE_COMPILER_PPC_INL_H_ +#define V8_BASELINE_PPC_BASELINE_COMPILER_PPC_INL_H_ + +#include "src/base/logging.h" +#include "src/baseline/baseline-compiler.h" + +namespace v8 { +namespace internal { +namespace baseline { + +#define __ basm_. + +void BaselineCompiler::Prologue() { UNIMPLEMENTED(); } + +void BaselineCompiler::PrologueFillFrame() { UNIMPLEMENTED(); } + +void BaselineCompiler::VerifyFrameSize() { UNIMPLEMENTED(); } + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_PPC_BASELINE_COMPILER_PPC_INL_H_ diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h index 7bf6bd2f4ec8ee..85ada600f18d28 100644 --- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h +++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h @@ -503,9 +503,10 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->Add64(params_size, params_size, 1); // Include the receiver. - __ masm()->slli(params_size, params_size, kSystemPointerSizeLog2); - __ masm()->Add64(sp, sp, params_size); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + kJSArgcIncludesReceiver + ? MacroAssembler::kCountIncludesReceiver + : MacroAssembler::kCountExcludesReceiver); __ masm()->Ret(); } diff --git a/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h new file mode 100644 index 00000000000000..c73f080ecb8db0 --- /dev/null +++ b/deps/v8/src/baseline/s390/baseline-assembler-s390-inl.h @@ -0,0 +1,374 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_S390_BASELINE_ASSEMBLER_S390_INL_H_ +#define V8_BASELINE_S390_BASELINE_ASSEMBLER_S390_INL_H_ + +#include "src/baseline/baseline-assembler.h" +#include "src/codegen/s390/assembler-s390-inl.h" +#include "src/codegen/interface-descriptors.h" + +namespace v8 { +namespace internal { +namespace baseline { + +class BaselineAssembler::ScratchRegisterScope { + public: + explicit ScratchRegisterScope(BaselineAssembler* assembler) + : assembler_(assembler), + prev_scope_(assembler->scratch_register_scope_), + wrapped_scope_(assembler->masm()) { + if (!assembler_->scratch_register_scope_) { + // If we haven't opened a scratch scope yet, for the first one add a + // couple of extra registers. + DCHECK(wrapped_scope_.CanAcquire()); + wrapped_scope_.Include(r8, r9); + wrapped_scope_.Include(kInterpreterBytecodeOffsetRegister); + } + assembler_->scratch_register_scope_ = this; + } + ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; } + + Register AcquireScratch() { return wrapped_scope_.Acquire(); } + + private: + BaselineAssembler* assembler_; + ScratchRegisterScope* prev_scope_; + UseScratchRegisterScope wrapped_scope_; +}; + +// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler. +enum class Condition : uint32_t { + kEqual = static_cast(eq), + kNotEqual = static_cast(ne), + + kLessThan = static_cast(lt), + kGreaterThan = static_cast(gt), + kLessThanEqual = static_cast(le), + kGreaterThanEqual = static_cast(ge), + + kUnsignedLessThan = static_cast(lo), + kUnsignedGreaterThan = static_cast(hi), + kUnsignedLessThanEqual = static_cast(ls), + kUnsignedGreaterThanEqual = static_cast(hs), + + kOverflow = static_cast(vs), + kNoOverflow = static_cast(vc), + + kZero = static_cast(eq), + kNotZero = static_cast(ne), +}; + +inline internal::Condition AsMasmCondition(Condition cond) { + UNIMPLEMENTED(); + return static_cast(cond); +} + +namespace detail { + +#ifdef DEBUG +inline bool Clobbers(Register target, MemOperand op) { + UNIMPLEMENTED(); + return false; +} +#endif + +} // namespace detail + +#define __ masm_-> + +MemOperand BaselineAssembler::RegisterFrameOperand( + interpreter::Register interpreter_register) { + UNIMPLEMENTED(); + return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); +} +MemOperand BaselineAssembler::FeedbackVectorOperand() { + UNIMPLEMENTED(); + return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); +} + +void BaselineAssembler::Bind(Label* label) { __ bind(label); } +void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); } + +void BaselineAssembler::JumpTarget() { + // NOP on arm. + UNIMPLEMENTED(); +} + +void BaselineAssembler::Jump(Label* target, Label::Distance distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfSmi(Register value, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::CallBuiltin(Builtin builtin) { UNIMPLEMENTED(); } + +void BaselineAssembler::TailCallBuiltin(Builtin builtin) { + ASM_CODE_COMMENT_STRING(masm_, + __ CommentForOffHeapTrampoline("tail call", builtin)); + UNIMPLEMENTED(); +} + +void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfObjectType(Condition cc, Register object, + InstanceType instance_type, + Register map, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, + InstanceType instance_type, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfPointer(Condition cc, Register value, + MemOperand operand, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfTagged(Condition cc, Register value, + MemOperand operand, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, + Register value, Label* target, + Label::Distance) { + UNIMPLEMENTED(); +} +void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, + Label* target, Label::Distance) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::Move(interpreter::Register output, Register source) { + UNIMPLEMENTED(); +} +void BaselineAssembler::Move(Register output, TaggedIndex value) { + UNIMPLEMENTED(); +} +void BaselineAssembler::Move(MemOperand output, Register source) { + UNIMPLEMENTED(); +} +void BaselineAssembler::Move(Register output, ExternalReference reference) { + UNIMPLEMENTED(); +} +void BaselineAssembler::Move(Register output, Handle value) { + UNIMPLEMENTED(); +} +void BaselineAssembler::Move(Register output, int32_t value) { + UNIMPLEMENTED(); +} +void BaselineAssembler::MoveMaybeSmi(Register output, Register source) { + UNIMPLEMENTED(); +} +void BaselineAssembler::MoveSmi(Register output, Register source) { + UNIMPLEMENTED(); +} + +namespace detail { + +template +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Arg arg) { + UNIMPLEMENTED(); + return reg; +} +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Register reg) { + return reg; +} + +template +struct PushAllHelper; +template <> +struct PushAllHelper<> { + static int Push(BaselineAssembler* basm) { return 0; } + static int PushReverse(BaselineAssembler* basm) { return 0; } +}; +// TODO(ishell): try to pack sequence of pushes into one instruction by +// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4) +// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4). +template +struct PushAllHelper { + static int Push(BaselineAssembler* basm, Arg arg) { + BaselineAssembler::ScratchRegisterScope scope(basm); + basm->masm()->Push(ToRegister(basm, &scope, arg)); + return 1; + } + static int PushReverse(BaselineAssembler* basm, Arg arg) { + return Push(basm, arg); + } +}; +// TODO(ishell): try to pack sequence of pushes into one instruction by +// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4) +// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4). +template +struct PushAllHelper { + static int Push(BaselineAssembler* basm, Arg arg, Args... args) { + PushAllHelper::Push(basm, arg); + return 1 + PushAllHelper::Push(basm, args...); + } + static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) { + int nargs = PushAllHelper::PushReverse(basm, args...); + PushAllHelper::Push(basm, arg); + return nargs + 1; + } +}; +template <> +struct PushAllHelper { + static int Push(BaselineAssembler* basm, interpreter::RegisterList list) { + for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) { + PushAllHelper::Push(basm, list[reg_index]); + } + return list.register_count(); + } + static int PushReverse(BaselineAssembler* basm, + interpreter::RegisterList list) { + for (int reg_index = list.register_count() - 1; reg_index >= 0; + --reg_index) { + PushAllHelper::Push(basm, list[reg_index]); + } + return list.register_count(); + } +}; + +template +struct PopAllHelper; +template <> +struct PopAllHelper<> { + static void Pop(BaselineAssembler* basm) {} +}; +// TODO(ishell): try to pack sequence of pops into one instruction by +// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4) +// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4). +template <> +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg) { + basm->masm()->Pop(reg); + } +}; +template +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg, T... tail) { + PopAllHelper::Pop(basm, reg); + PopAllHelper::Pop(basm, tail...); + } +}; + +} // namespace detail + +template +int BaselineAssembler::Push(T... vals) { + return detail::PushAllHelper::Push(this, vals...); +} + +template +void BaselineAssembler::PushReverse(T... vals) { + detail::PushAllHelper::PushReverse(this, vals...); +} + +template +void BaselineAssembler::Pop(T... registers) { + detail::PopAllHelper::Pop(this, registers...); +} + +void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, + int offset) { + UNIMPLEMENTED(); +} +void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, + int offset) { + UNIMPLEMENTED(); +} +void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, + int offset) { + UNIMPLEMENTED(); +} +void BaselineAssembler::LoadByteField(Register output, Register source, + int offset) { + UNIMPLEMENTED(); +} +void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, + Smi value) { + UNIMPLEMENTED(); +} +void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, + int offset, + Register value) { + UNIMPLEMENTED(); +} +void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, + int offset, + Register value) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( + int32_t weight, Label* skip_interrupt_label) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( + Register weight, Label* skip_interrupt_label) { + UNIMPLEMENTED(); +} + +void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { UNIMPLEMENTED(); } + +void BaselineAssembler::Switch(Register reg, int case_value_base, + Label** labels, int num_labels) { + UNIMPLEMENTED(); +} + +#undef __ + +#define __ basm. + +void BaselineAssembler::EmitReturn(MacroAssembler* masm) { UNIMPLEMENTED(); } + +#undef __ + +inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( + Register reg) { + UNIMPLEMENTED(); +} + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_S390_BASELINE_ASSEMBLER_S390_INL_H_ diff --git a/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h b/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h new file mode 100644 index 00000000000000..c481c549401a30 --- /dev/null +++ b/deps/v8/src/baseline/s390/baseline-compiler-s390-inl.h @@ -0,0 +1,27 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_S390_BASELINE_COMPILER_S390_INL_H_ +#define V8_BASELINE_S390_BASELINE_COMPILER_S390_INL_H_ + +#include "src/base/logging.h" +#include "src/baseline/baseline-compiler.h" + +namespace v8 { +namespace internal { +namespace baseline { + +#define __ basm_. + +void BaselineCompiler::Prologue() { UNIMPLEMENTED(); } + +void BaselineCompiler::PrologueFillFrame() { UNIMPLEMENTED(); } + +void BaselineCompiler::VerifyFrameSize() { UNIMPLEMENTED(); } + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_S390_BASELINE_COMPILER_S390_INL_H_ diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 00f57bcbff8ff0..a3a2209f9fefdd 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -2283,7 +2283,11 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // ----------------------------------- __ AssertFunction(r1); + Label class_constructor; __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset)); + __ tst(r3, Operand(SharedFunctionInfo::IsClassConstructorBit::kMask)); + __ b(ne, &class_constructor); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function @@ -2358,6 +2362,14 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ ldrh(r2, FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(r1, no_reg, r2, r0, InvokeType::kJump); + + // The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ push(r1); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + } } namespace { @@ -2747,6 +2759,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 27d13ecb46f1f2..0cb79c1f04d5a0 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -2648,8 +2648,12 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // ----------------------------------- __ AssertFunction(x1); + Label class_constructor; __ LoadTaggedPointerField( x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); + __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset)); + __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask, + &class_constructor); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function @@ -2725,6 +2729,15 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Ldrh(x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump); + + // The function is a "classConstructor", need to raise an exception. + __ Bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ PushArgument(x1); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + __ Unreachable(); + } } namespace { @@ -3175,6 +3188,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 7716d94288a9b8..3726207e1d6268 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -1168,7 +1168,7 @@ extern macro Int32Constant(constexpr int31): int31; extern macro Int32Constant(constexpr int32): int32; extern macro Int64Constant(constexpr int64): int64; extern macro Uint64Constant(constexpr uint64): uint64; -extern macro Float64Constant(constexpr int31): float64; +extern macro Float64Constant(constexpr int32): float64; extern macro Float64Constant(constexpr float64): float64; extern macro SmiConstant(constexpr int31): Smi; extern macro SmiConstant(constexpr Smi): Smi; @@ -1799,7 +1799,6 @@ macro Float64IsSomeInfinity(value: float64): bool { return value == (Convert(0) - V8_INFINITY); } -@export macro IsIntegerOrSomeInfinity(o: Object): bool { typeswitch (o) { case (Smi): { @@ -1817,20 +1816,6 @@ macro IsIntegerOrSomeInfinity(o: Object): bool { } } -builtin CheckNumberInRange(implicit context: Context)( - value: Number, min: Number, max: Number, nodeId: Smi): Undefined { - if (IsIntegerOrSomeInfinity(value) && min <= value && value <= max) { - return Undefined; - } else { - Print('Range type assertion failed! (value/min/max/nodeId)'); - Print(value); - Print(min); - Print(max); - Print(nodeId); - unreachable; - } -} - // Assert that the objects satisfy SameValue or are both the hole. builtin CheckSameObject(implicit context: Context)( lhs: Object, rhs: Object): Undefined { diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index f7b94c4059393d..db4fc381890ce5 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -273,6 +273,7 @@ namespace internal { /* Handlers */ \ TFH(KeyedLoadIC_PolymorphicName, LoadWithVector) \ TFH(KeyedStoreIC_Megamorphic, Store) \ + TFH(KeyedDefineOwnIC_Megamorphic, Store) \ TFH(LoadGlobalIC_NoFeedback, LoadGlobalNoFeedback) \ TFH(LoadIC_FunctionPrototype, LoadWithVector) \ TFH(LoadIC_StringLength, LoadWithVector) \ @@ -280,6 +281,7 @@ namespace internal { TFH(LoadIC_NoFeedback, LoadNoFeedback) \ TFH(StoreGlobalIC_Slow, StoreWithVector) \ TFH(StoreIC_NoFeedback, Store) \ + TFH(StoreOwnIC_NoFeedback, Store) \ TFH(KeyedLoadIC_SloppyArguments, LoadWithVector) \ TFH(LoadIndexedInterceptorIC, LoadWithVector) \ TFH(KeyedStoreIC_SloppyArguments_Standard, StoreWithVector) \ @@ -630,9 +632,15 @@ namespace internal { TFH(StoreIC, StoreWithVector) \ TFH(StoreICTrampoline, Store) \ TFH(StoreICBaseline, StoreBaseline) \ + TFH(StoreOwnIC, StoreWithVector) \ + TFH(StoreOwnICTrampoline, Store) \ + TFH(StoreOwnICBaseline, StoreBaseline) \ TFH(KeyedStoreIC, StoreWithVector) \ TFH(KeyedStoreICTrampoline, Store) \ TFH(KeyedStoreICBaseline, StoreBaseline) \ + TFH(KeyedDefineOwnIC, StoreWithVector) \ + TFH(KeyedDefineOwnICTrampoline, Store) \ + TFH(KeyedDefineOwnICBaseline, StoreBaseline) \ TFH(StoreInArrayLiteralIC, StoreWithVector) \ TFH(StoreInArrayLiteralICBaseline, StoreBaseline) \ TFH(LookupContextBaseline, LookupBaseline) \ @@ -927,6 +935,7 @@ namespace internal { \ /* Wasm */ \ IF_WASM(ASM, GenericJSToWasmWrapper, Dummy) \ + IF_WASM(ASM, WasmReturnPromiseOnSuspend, Dummy) \ IF_WASM(ASM, WasmCompileLazy, Dummy) \ IF_WASM(ASM, WasmDebugBreak, Dummy) \ IF_WASM(ASM, WasmOnStackReplace, Dummy) \ @@ -1044,7 +1053,579 @@ namespace internal { \ /* CallAsyncModule* are spec anonymyous functions */ \ CPP(CallAsyncModuleFulfilled) \ - CPP(CallAsyncModuleRejected) + CPP(CallAsyncModuleRejected) \ + \ + /* Temporal */ \ + /* Temporal #sec-temporal.now.timezone */ \ + CPP(TemporalNowTimeZone) \ + /* Temporal #sec-temporal.now.instant */ \ + CPP(TemporalNowInstant) \ + /* Temporal #sec-temporal.now.plaindatetime */ \ + CPP(TemporalNowPlainDateTime) \ + /* Temporal #sec-temporal.now.plaindatetimeiso */ \ + CPP(TemporalNowPlainDateTimeISO) \ + /* Temporal #sec-temporal.now.zoneddatetime */ \ + CPP(TemporalNowZonedDateTime) \ + /* Temporal #sec-temporal.now.zoneddatetimeiso */ \ + CPP(TemporalNowZonedDateTimeISO) \ + /* Temporal #sec-temporal.now.plaindate */ \ + CPP(TemporalNowPlainDate) \ + /* Temporal #sec-temporal.now.plaindateiso */ \ + CPP(TemporalNowPlainDateISO) \ + /* There are no Temporal.now.plainTime */ \ + /* See https://github.com/tc39/proposal-temporal/issues/1540 */ \ + /* Temporal #sec-temporal.now.plaintimeiso */ \ + CPP(TemporalNowPlainTimeISO) \ + \ + /* Temporal.PlaneDate */ \ + /* Temporal #sec-temporal.plaindate */ \ + CPP(TemporalPlainDateConstructor) \ + /* Temporal #sec-temporal.plaindate.from */ \ + CPP(TemporalPlainDateFrom) \ + /* Temporal #sec-temporal.plaindate.compare */ \ + CPP(TemporalPlainDateCompare) \ + /* Temporal #sec-get-temporal.plaindate.prototype.calendar */ \ + CPP(TemporalPlainDatePrototypeCalendar) \ + /* Temporal #sec-get-temporal.plaindate.prototype.year */ \ + CPP(TemporalPlainDatePrototypeYear) \ + /* Temporal #sec-get-temporal.plaindate.prototype.month */ \ + CPP(TemporalPlainDatePrototypeMonth) \ + /* Temporal #sec-get-temporal.plaindate.prototype.monthcode */ \ + CPP(TemporalPlainDatePrototypeMonthCode) \ + /* Temporal #sec-get-temporal.plaindate.prototype.day */ \ + CPP(TemporalPlainDatePrototypeDay) \ + /* Temporal #sec-get-temporal.plaindate.prototype.dayofweek */ \ + CPP(TemporalPlainDatePrototypeDayOfWeek) \ + /* Temporal #sec-get-temporal.plaindate.prototype.dayofyear */ \ + CPP(TemporalPlainDatePrototypeDayOfYear) \ + /* Temporal #sec-get-temporal.plaindate.prototype.weekofyear */ \ + CPP(TemporalPlainDatePrototypeWeekOfYear) \ + /* Temporal #sec-get-temporal.plaindate.prototype.daysinweek */ \ + CPP(TemporalPlainDatePrototypeDaysInWeek) \ + /* Temporal #sec-get-temporal.plaindate.prototype.daysinmonth */ \ + CPP(TemporalPlainDatePrototypeDaysInMonth) \ + /* Temporal #sec-get-temporal.plaindate.prototype.daysinyear */ \ + CPP(TemporalPlainDatePrototypeDaysInYear) \ + /* Temporal #sec-get-temporal.plaindate.prototype.monthsinyear */ \ + CPP(TemporalPlainDatePrototypeMonthsInYear) \ + /* Temporal #sec-get-temporal.plaindate.prototype.inleapyear */ \ + CPP(TemporalPlainDatePrototypeInLeapYear) \ + /* Temporal #sec-temporal.plaindate.prototype.toplainyearmonth */ \ + CPP(TemporalPlainDatePrototypeToPlainYearMonth) \ + /* Temporal #sec-temporal.plaindate.prototype.toplainmonthday */ \ + CPP(TemporalPlainDatePrototypeToPlainMonthDay) \ + /* Temporal #sec-temporal.plaindate.prototype.getisofields */ \ + CPP(TemporalPlainDatePrototypeGetISOFields) \ + /* Temporal #sec-temporal.plaindate.prototype.add */ \ + CPP(TemporalPlainDatePrototypeAdd) \ + /* Temporal #sec-temporal.plaindate.prototype.substract */ \ + CPP(TemporalPlainDatePrototypeSubtract) \ + /* Temporal #sec-temporal.plaindate.prototype.with */ \ + CPP(TemporalPlainDatePrototypeWith) \ + /* Temporal #sec-temporal.plaindate.prototype.withcalendar */ \ + CPP(TemporalPlainDatePrototypeWithCalendar) \ + /* Temporal #sec-temporal.plaindate.prototype.until */ \ + CPP(TemporalPlainDatePrototypeUntil) \ + /* Temporal #sec-temporal.plaindate.prototype.since */ \ + CPP(TemporalPlainDatePrototypeSince) \ + /* Temporal #sec-temporal.plaindate.prototype.equals */ \ + CPP(TemporalPlainDatePrototypeEquals) \ + /* Temporal #sec-temporal.plaindate.prototype.toplaindatetime */ \ + CPP(TemporalPlainDatePrototypeToPlainDateTime) \ + /* Temporal #sec-temporal.plaindate.prototype.tozoneddatetime */ \ + CPP(TemporalPlainDatePrototypeToZonedDateTime) \ + /* Temporal #sec-temporal.plaindate.prototype.tostring */ \ + CPP(TemporalPlainDatePrototypeToString) \ + /* Temporal #sec-temporal.plaindate.prototype.tojson */ \ + CPP(TemporalPlainDatePrototypeToJSON) \ + /* Temporal #sec-temporal.plaindate.prototype.valueof */ \ + CPP(TemporalPlainDatePrototypeValueOf) \ + \ + /* Temporal.PlaneTime */ \ + /* Temporal #sec-temporal.plaintime */ \ + CPP(TemporalPlainTimeConstructor) \ + /* Temporal #sec-temporal.plaintime.from */ \ + CPP(TemporalPlainTimeFrom) \ + /* Temporal #sec-temporal.plaintime.compare */ \ + CPP(TemporalPlainTimeCompare) \ + /* Temporal #sec-get-temporal.plaintime.prototype.calendar */ \ + CPP(TemporalPlainTimePrototypeCalendar) \ + /* Temporal #sec-get-temporal.plaintime.prototype.hour */ \ + CPP(TemporalPlainTimePrototypeHour) \ + /* Temporal #sec-get-temporal.plaintime.prototype.minute */ \ + CPP(TemporalPlainTimePrototypeMinute) \ + /* Temporal #sec-get-temporal.plaintime.prototype.second */ \ + CPP(TemporalPlainTimePrototypeSecond) \ + /* Temporal #sec-get-temporal.plaintime.prototype.millisecond */ \ + CPP(TemporalPlainTimePrototypeMillisecond) \ + /* Temporal #sec-get-temporal.plaintime.prototype.microsecond */ \ + CPP(TemporalPlainTimePrototypeMicrosecond) \ + /* Temporal #sec-get-temporal.plaintime.prototype.nanoseond */ \ + CPP(TemporalPlainTimePrototypeNanosecond) \ + /* Temporal #sec-temporal.plaintime.prototype.add */ \ + CPP(TemporalPlainTimePrototypeAdd) \ + /* Temporal #sec-temporal.plaintime.prototype.subtract */ \ + CPP(TemporalPlainTimePrototypeSubtract) \ + /* Temporal #sec-temporal.plaintime.prototype.with */ \ + CPP(TemporalPlainTimePrototypeWith) \ + /* Temporal #sec-temporal.plaintime.prototype.until */ \ + CPP(TemporalPlainTimePrototypeUntil) \ + /* Temporal #sec-temporal.plaintime.prototype.since */ \ + CPP(TemporalPlainTimePrototypeSince) \ + /* Temporal #sec-temporal.plaintime.prototype.round */ \ + CPP(TemporalPlainTimePrototypeRound) \ + /* Temporal #sec-temporal.plaintime.prototype.equals */ \ + CPP(TemporalPlainTimePrototypeEquals) \ + /* Temporal #sec-temporal.plaintime.prototype.toplaindatetime */ \ + CPP(TemporalPlainTimePrototypeToPlainDateTime) \ + /* Temporal #sec-temporal.plaintime.prototype.tozoneddatetime */ \ + CPP(TemporalPlainTimePrototypeToZonedDateTime) \ + /* Temporal #sec-temporal.plaintime.prototype.getisofields */ \ + CPP(TemporalPlainTimePrototypeGetISOFields) \ + /* Temporal #sec-temporal.plaintime.prototype.tostring */ \ + CPP(TemporalPlainTimePrototypeToString) \ + /* Temporal #sec-temporal.plaindtimeprototype.tojson */ \ + CPP(TemporalPlainTimePrototypeToJSON) \ + /* Temporal #sec-temporal.plaintime.prototype.valueof */ \ + CPP(TemporalPlainTimePrototypeValueOf) \ + \ + /* Temporal.PlaneDateTime */ \ + /* Temporal #sec-temporal.plaindatetime */ \ + CPP(TemporalPlainDateTimeConstructor) \ + /* Temporal #sec-temporal.plaindatetime.from */ \ + CPP(TemporalPlainDateTimeFrom) \ + /* Temporal #sec-temporal.plaindatetime.compare */ \ + CPP(TemporalPlainDateTimeCompare) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.calendar */ \ + CPP(TemporalPlainDateTimePrototypeCalendar) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.year */ \ + CPP(TemporalPlainDateTimePrototypeYear) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.month */ \ + CPP(TemporalPlainDateTimePrototypeMonth) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.monthcode */ \ + CPP(TemporalPlainDateTimePrototypeMonthCode) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.day */ \ + CPP(TemporalPlainDateTimePrototypeDay) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.hour */ \ + CPP(TemporalPlainDateTimePrototypeHour) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.minute */ \ + CPP(TemporalPlainDateTimePrototypeMinute) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.second */ \ + CPP(TemporalPlainDateTimePrototypeSecond) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.millisecond */ \ + CPP(TemporalPlainDateTimePrototypeMillisecond) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.microsecond */ \ + CPP(TemporalPlainDateTimePrototypeMicrosecond) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.nanosecond */ \ + CPP(TemporalPlainDateTimePrototypeNanosecond) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.dayofweek */ \ + CPP(TemporalPlainDateTimePrototypeDayOfWeek) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.dayofyear */ \ + CPP(TemporalPlainDateTimePrototypeDayOfYear) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.weekofyear */ \ + CPP(TemporalPlainDateTimePrototypeWeekOfYear) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.daysinweek */ \ + CPP(TemporalPlainDateTimePrototypeDaysInWeek) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.daysinmonth */ \ + CPP(TemporalPlainDateTimePrototypeDaysInMonth) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.daysinyear */ \ + CPP(TemporalPlainDateTimePrototypeDaysInYear) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.monthsinyear */ \ + CPP(TemporalPlainDateTimePrototypeMonthsInYear) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.inleapyear */ \ + CPP(TemporalPlainDateTimePrototypeInLeapYear) \ + /* Temporal #sec-temporal.plaindatetime.prototype.with */ \ + CPP(TemporalPlainDateTimePrototypeWith) \ + /* Temporal #sec-temporal.plaindatetime.prototype.withplainTime */ \ + CPP(TemporalPlainDateTimePrototypeWithPlainTime) \ + /* Temporal #sec-temporal.plaindatetime.prototype.withplainDate */ \ + CPP(TemporalPlainDateTimePrototypeWithPlainDate) \ + /* Temporal #sec-temporal.plaindatetime.prototype.withcalendar */ \ + CPP(TemporalPlainDateTimePrototypeWithCalendar) \ + /* Temporal #sec-temporal.plaindatetime.prototype.add */ \ + CPP(TemporalPlainDateTimePrototypeAdd) \ + /* Temporal #sec-temporal.plaindatetime.prototype.subtract */ \ + CPP(TemporalPlainDateTimePrototypeSubtract) \ + /* Temporal #sec-temporal.plaindatetime.prototype.until */ \ + CPP(TemporalPlainDateTimePrototypeUntil) \ + /* Temporal #sec-temporal.plaindatetime.prototype.since */ \ + CPP(TemporalPlainDateTimePrototypeSince) \ + /* Temporal #sec-temporal.plaindatetime.prototype.round */ \ + CPP(TemporalPlainDateTimePrototypeRound) \ + /* Temporal #sec-temporal.plaindatetime.prototype.equals */ \ + CPP(TemporalPlainDateTimePrototypeEquals) \ + /* Temporal #sec-temporal.plaindatetime.prototype.tostring */ \ + CPP(TemporalPlainDateTimePrototypeToString) \ + /* Temporal #sec-temporal.plainddatetimeprototype.tojson */ \ + CPP(TemporalPlainDateTimePrototypeToJSON) \ + /* Temporal #sec-temporal.plaindatetime.prototype.valueof */ \ + CPP(TemporalPlainDateTimePrototypeValueOf) \ + /* Temporal #sec-temporal.plaindatetime.prototype.tozoneddatetime */ \ + CPP(TemporalPlainDateTimePrototypeToZonedDateTime) \ + /* Temporal #sec-temporal.plaindatetime.prototype.toplaindate */ \ + CPP(TemporalPlainDateTimePrototypeToPlainDate) \ + /* Temporal #sec-temporal.plaindatetime.prototype.toplainyearmonth */ \ + CPP(TemporalPlainDateTimePrototypeToPlainYearMonth) \ + /* Temporal #sec-temporal.plaindatetime.prototype.toplainmonthday */ \ + CPP(TemporalPlainDateTimePrototypeToPlainMonthDay) \ + /* Temporal #sec-temporal.plaindatetime.prototype.toplaintime */ \ + CPP(TemporalPlainDateTimePrototypeToPlainTime) \ + /* Temporal #sec-temporal.plaindatetime.prototype.getisofields */ \ + CPP(TemporalPlainDateTimePrototypeGetISOFields) \ + \ + /* Temporal.ZonedDateTime */ \ + /* Temporal #sec-temporal.zoneddatetime */ \ + CPP(TemporalZonedDateTimeConstructor) \ + /* Temporal #sec-temporal.zoneddatetime.from */ \ + CPP(TemporalZonedDateTimeFrom) \ + /* Temporal #sec-temporal.zoneddatetime.compare */ \ + CPP(TemporalZonedDateTimeCompare) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.calendar */ \ + CPP(TemporalZonedDateTimePrototypeCalendar) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.timezone */ \ + CPP(TemporalZonedDateTimePrototypeTimeZone) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.year */ \ + CPP(TemporalZonedDateTimePrototypeYear) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.month */ \ + CPP(TemporalZonedDateTimePrototypeMonth) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.monthcode */ \ + CPP(TemporalZonedDateTimePrototypeMonthCode) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.day */ \ + CPP(TemporalZonedDateTimePrototypeDay) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.hour */ \ + CPP(TemporalZonedDateTimePrototypeHour) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.minute */ \ + CPP(TemporalZonedDateTimePrototypeMinute) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.second */ \ + CPP(TemporalZonedDateTimePrototypeSecond) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.millisecond */ \ + CPP(TemporalZonedDateTimePrototypeMillisecond) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.microsecond */ \ + CPP(TemporalZonedDateTimePrototypeMicrosecond) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.nanosecond */ \ + CPP(TemporalZonedDateTimePrototypeNanosecond) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.epochsecond */ \ + CPP(TemporalZonedDateTimePrototypeEpochSeconds) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.epochmilliseconds */ \ + CPP(TemporalZonedDateTimePrototypeEpochMilliseconds) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.epochmicroseconds */ \ + CPP(TemporalZonedDateTimePrototypeEpochMicroseconds) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.epochnanoseconds */ \ + CPP(TemporalZonedDateTimePrototypeEpochNanoseconds) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.dayofweek */ \ + CPP(TemporalZonedDateTimePrototypeDayOfWeek) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.dayofyear */ \ + CPP(TemporalZonedDateTimePrototypeDayOfYear) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.weekofyear */ \ + CPP(TemporalZonedDateTimePrototypeWeekOfYear) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.hoursinday */ \ + CPP(TemporalZonedDateTimePrototypeHoursInDay) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.daysinweek */ \ + CPP(TemporalZonedDateTimePrototypeDaysInWeek) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.daysinmonth */ \ + CPP(TemporalZonedDateTimePrototypeDaysInMonth) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.daysinyear */ \ + CPP(TemporalZonedDateTimePrototypeDaysInYear) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.monthsinyear */ \ + CPP(TemporalZonedDateTimePrototypeMonthsInYear) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.inleapyear */ \ + CPP(TemporalZonedDateTimePrototypeInLeapYear) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.offsetnanoseconds */ \ + CPP(TemporalZonedDateTimePrototypeOffsetNanoseconds) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.offset */ \ + CPP(TemporalZonedDateTimePrototypeOffset) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.with */ \ + CPP(TemporalZonedDateTimePrototypeWith) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.withplaintime */ \ + CPP(TemporalZonedDateTimePrototypeWithPlainTime) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.withplaindate */ \ + CPP(TemporalZonedDateTimePrototypeWithPlainDate) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.withtimezone */ \ + CPP(TemporalZonedDateTimePrototypeWithTimeZone) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.withcalendar */ \ + CPP(TemporalZonedDateTimePrototypeWithCalendar) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.add */ \ + CPP(TemporalZonedDateTimePrototypeAdd) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.subtract */ \ + CPP(TemporalZonedDateTimePrototypeSubtract) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.until */ \ + CPP(TemporalZonedDateTimePrototypeUntil) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.since */ \ + CPP(TemporalZonedDateTimePrototypeSince) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.round */ \ + CPP(TemporalZonedDateTimePrototypeRound) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.equals */ \ + CPP(TemporalZonedDateTimePrototypeEquals) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.tostring */ \ + CPP(TemporalZonedDateTimePrototypeToString) \ + /* Temporal #sec-temporal.zonedddatetimeprototype.tojson */ \ + CPP(TemporalZonedDateTimePrototypeToJSON) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.valueof */ \ + CPP(TemporalZonedDateTimePrototypeValueOf) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.startofday */ \ + CPP(TemporalZonedDateTimePrototypeStartOfDay) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.toinstant */ \ + CPP(TemporalZonedDateTimePrototypeToInstant) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.toplaindate */ \ + CPP(TemporalZonedDateTimePrototypeToPlainDate) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.toplaintime */ \ + CPP(TemporalZonedDateTimePrototypeToPlainTime) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.toplaindatetime */ \ + CPP(TemporalZonedDateTimePrototypeToPlainDateTime) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.toplainyearmonth */ \ + CPP(TemporalZonedDateTimePrototypeToPlainYearMonth) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.toplainmonthday */ \ + CPP(TemporalZonedDateTimePrototypeToPlainMonthDay) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.getisofields */ \ + CPP(TemporalZonedDateTimePrototypeGetISOFields) \ + \ + /* Temporal.Duration */ \ + /* Temporal #sec-temporal.duration */ \ + CPP(TemporalDurationConstructor) \ + /* Temporal #sec-temporal.duration.from */ \ + CPP(TemporalDurationFrom) \ + /* Temporal #sec-temporal.duration.compare */ \ + CPP(TemporalDurationCompare) \ + /* Temporal #sec-get-temporal.duration.prototype.years */ \ + CPP(TemporalDurationPrototypeYears) \ + /* Temporal #sec-get-temporal.duration.prototype.months */ \ + CPP(TemporalDurationPrototypeMonths) \ + /* Temporal #sec-get-temporal.duration.prototype.weeks */ \ + CPP(TemporalDurationPrototypeWeeks) \ + /* Temporal #sec-get-temporal.duration.prototype.days */ \ + CPP(TemporalDurationPrototypeDays) \ + /* Temporal #sec-get-temporal.duration.prototype.hours */ \ + CPP(TemporalDurationPrototypeHours) \ + /* Temporal #sec-get-temporal.duration.prototype.minutes */ \ + CPP(TemporalDurationPrototypeMinutes) \ + /* Temporal #sec-get-temporal.duration.prototype.seconds */ \ + CPP(TemporalDurationPrototypeSeconds) \ + /* Temporal #sec-get-temporal.duration.prototype.milliseconds */ \ + CPP(TemporalDurationPrototypeMilliseconds) \ + /* Temporal #sec-get-temporal.duration.prototype.microseconds */ \ + CPP(TemporalDurationPrototypeMicroseconds) \ + /* Temporal #sec-get-temporal.duration.prototype.nanoseconds */ \ + CPP(TemporalDurationPrototypeNanoseconds) \ + /* Temporal #sec-get-temporal.duration.prototype.sign */ \ + CPP(TemporalDurationPrototypeSign) \ + /* Temporal #sec-get-temporal.duration.prototype.blank */ \ + CPP(TemporalDurationPrototypeBlank) \ + /* Temporal #sec-temporal.duration.prototype.with */ \ + CPP(TemporalDurationPrototypeWith) \ + /* Temporal #sec-temporal.duration.prototype.negated */ \ + CPP(TemporalDurationPrototypeNegated) \ + /* Temporal #sec-temporal.duration.prototype.abs */ \ + CPP(TemporalDurationPrototypeAbs) \ + /* Temporal #sec-temporal.duration.prototype.add */ \ + CPP(TemporalDurationPrototypeAdd) \ + /* Temporal #sec-temporal.duration.prototype.subtract */ \ + CPP(TemporalDurationPrototypeSubtract) \ + /* Temporal #sec-temporal.duration.prototype.round */ \ + CPP(TemporalDurationPrototypeRound) \ + /* Temporal #sec-temporal.duration.prototype.total */ \ + CPP(TemporalDurationPrototypeTotal) \ + /* Temporal #sec-temporal.duration.prototype.tostring */ \ + CPP(TemporalDurationPrototypeToString) \ + /* Temporal #sec-temporal.duration.tojson */ \ + CPP(TemporalDurationPrototypeToJSON) \ + /* Temporal #sec-temporal.duration.prototype.valueof */ \ + CPP(TemporalDurationPrototypeValueOf) \ + \ + /* Temporal.Instant */ \ + /* Temporal #sec-temporal.instant */ \ + CPP(TemporalInstantConstructor) \ + /* Temporal #sec-temporal.instant.from */ \ + CPP(TemporalInstantFrom) \ + /* Temporal #sec-temporal.instant.fromepochseconds */ \ + CPP(TemporalInstantFromEpochSeconds) \ + /* Temporal #sec-temporal.instant.fromepochmilliseconds */ \ + CPP(TemporalInstantFromEpochMilliseconds) \ + /* Temporal #sec-temporal.instant.fromepochmicroseconds */ \ + CPP(TemporalInstantFromEpochMicroseconds) \ + /* Temporal #sec-temporal.instant.fromepochnanoseconds */ \ + CPP(TemporalInstantFromEpochNanoseconds) \ + /* Temporal #sec-temporal.instant.compare */ \ + CPP(TemporalInstantCompare) \ + /* Temporal #sec-get-temporal.instant.prototype.epochseconds */ \ + CPP(TemporalInstantPrototypeEpochSeconds) \ + /* Temporal #sec-get-temporal.instant.prototype.epochmilliseconds */ \ + CPP(TemporalInstantPrototypeEpochMilliseconds) \ + /* Temporal #sec-get-temporal.instant.prototype.epochmicroseconds */ \ + CPP(TemporalInstantPrototypeEpochMicroseconds) \ + /* Temporal #sec-get-temporal.instant.prototype.epochnanoseconds */ \ + CPP(TemporalInstantPrototypeEpochNanoseconds) \ + /* Temporal #sec-temporal.instant.prototype.add */ \ + CPP(TemporalInstantPrototypeAdd) \ + /* Temporal #sec-temporal.instant.prototype.subtract */ \ + CPP(TemporalInstantPrototypeSubtract) \ + /* Temporal #sec-temporal.instant.prototype.until */ \ + CPP(TemporalInstantPrototypeUntil) \ + /* Temporal #sec-temporal.instant.prototype.since */ \ + CPP(TemporalInstantPrototypeSince) \ + /* Temporal #sec-temporal.instant.prototype.round */ \ + CPP(TemporalInstantPrototypeRound) \ + /* Temporal #sec-temporal.instant.prototype.equals */ \ + CPP(TemporalInstantPrototypeEquals) \ + /* Temporal #sec-temporal.instant.prototype.tostring */ \ + CPP(TemporalInstantPrototypeToString) \ + /* Temporal #sec-temporal.instant.tojson */ \ + CPP(TemporalInstantPrototypeToJSON) \ + /* Temporal #sec-temporal.instant.prototype.valueof */ \ + CPP(TemporalInstantPrototypeValueOf) \ + /* Temporal #sec-temporal.instant.prototype.tozoneddatetime */ \ + CPP(TemporalInstantPrototypeToZonedDateTime) \ + /* Temporal #sec-temporal.instant.prototype.tozoneddatetimeiso */ \ + CPP(TemporalInstantPrototypeToZonedDateTimeISO) \ + \ + /* Temporal.PlainYearMonth */ \ + /* Temporal #sec-temporal.plainyearmonth */ \ + CPP(TemporalPlainYearMonthConstructor) \ + /* Temporal #sec-temporal.plainyearmonth.from */ \ + CPP(TemporalPlainYearMonthFrom) \ + /* Temporal #sec-temporal.plainyearmonth.compare */ \ + CPP(TemporalPlainYearMonthCompare) \ + /* Temporal #sec-get-temporal.plainyearmonth.prototype.calendar */ \ + CPP(TemporalPlainYearMonthPrototypeCalendar) \ + /* Temporal #sec-get-temporal.plainyearmonth.prototype.year */ \ + CPP(TemporalPlainYearMonthPrototypeYear) \ + /* Temporal #sec-get-temporal.plainyearmonth.prototype.month */ \ + CPP(TemporalPlainYearMonthPrototypeMonth) \ + /* Temporal #sec-get-temporal.plainyearmonth.prototype.monthcode */ \ + CPP(TemporalPlainYearMonthPrototypeMonthCode) \ + /* Temporal #sec-get-temporal.plainyearmonth.prototype.daysinyear */ \ + CPP(TemporalPlainYearMonthPrototypeDaysInYear) \ + /* Temporal #sec-get-temporal.plainyearmonth.prototype.daysinmonth */ \ + CPP(TemporalPlainYearMonthPrototypeDaysInMonth) \ + /* Temporal #sec-get-temporal.plainyearmonth.prototype.monthsinyear */ \ + CPP(TemporalPlainYearMonthPrototypeMonthsInYear) \ + /* Temporal #sec-get-temporal.plainyearmonth.prototype.inleapyear */ \ + CPP(TemporalPlainYearMonthPrototypeInLeapYear) \ + /* Temporal #sec-temporal.plainyearmonth.prototype.with */ \ + CPP(TemporalPlainYearMonthPrototypeWith) \ + /* Temporal #sec-temporal.plainyearmonth.prototype.add */ \ + CPP(TemporalPlainYearMonthPrototypeAdd) \ + /* Temporal #sec-temporal.plainyearmonth.prototype.subtract */ \ + CPP(TemporalPlainYearMonthPrototypeSubtract) \ + /* Temporal #sec-temporal.plainyearmonth.prototype.until */ \ + CPP(TemporalPlainYearMonthPrototypeUntil) \ + /* Temporal #sec-temporal.plainyearmonth.prototype.since */ \ + CPP(TemporalPlainYearMonthPrototypeSince) \ + /* Temporal #sec-temporal.plainyearmonth.prototype.equals */ \ + CPP(TemporalPlainYearMonthPrototypeEquals) \ + /* Temporal #sec-temporal.plainyearmonth.tostring */ \ + CPP(TemporalPlainYearMonthPrototypeToString) \ + /* Temporal #sec-temporal.plainyearmonth.tojson */ \ + CPP(TemporalPlainYearMonthPrototypeToJSON) \ + /* Temporal #sec-temporal.plainyearmonth.prototype.valueof */ \ + CPP(TemporalPlainYearMonthPrototypeValueOf) \ + /* Temporal #sec-temporal.plainyearmonth.prototype.toplaindate */ \ + CPP(TemporalPlainYearMonthPrototypeToPlainDate) \ + /* Temporal #sec-temporal.plainyearmonth.prototype.getisofields */ \ + CPP(TemporalPlainYearMonthPrototypeGetISOFields) \ + \ + /* Temporal.PlainMonthDay */ \ + /* Temporal #sec-temporal.plainmonthday */ \ + CPP(TemporalPlainMonthDayConstructor) \ + /* Temporal #sec-temporal.plainmonthday.from */ \ + CPP(TemporalPlainMonthDayFrom) \ + /* There are no compare for PlainMonthDay */ \ + /* See https://github.com/tc39/proposal-temporal/issues/1547 */ \ + /* Temporal #sec-get-temporal.plainmonthday.prototype.calendar */ \ + CPP(TemporalPlainMonthDayPrototypeCalendar) \ + /* Temporal #sec-get-temporal.plainmonthday.prototype.monthcode */ \ + CPP(TemporalPlainMonthDayPrototypeMonthCode) \ + /* Temporal #sec-get-temporal.plainmonthday.prototype.day */ \ + CPP(TemporalPlainMonthDayPrototypeDay) \ + /* Temporal #sec-temporal.plainmonthday.prototype.with */ \ + CPP(TemporalPlainMonthDayPrototypeWith) \ + /* Temporal #sec-temporal.plainmonthday.prototype.equals */ \ + CPP(TemporalPlainMonthDayPrototypeEquals) \ + /* Temporal #sec-temporal.plainmonthday.prototype.tostring */ \ + CPP(TemporalPlainMonthDayPrototypeToString) \ + /* Temporal #sec-temporal.plainmonthday.tojson */ \ + CPP(TemporalPlainMonthDayPrototypeToJSON) \ + /* Temporal #sec-temporal.plainmonthday.prototype.valueof */ \ + CPP(TemporalPlainMonthDayPrototypeValueOf) \ + /* Temporal #sec-temporal.plainmonthday.prototype.toplaindate */ \ + CPP(TemporalPlainMonthDayPrototypeToPlainDate) \ + /* Temporal #sec-temporal.plainmonthday.prototype.getisofields */ \ + CPP(TemporalPlainMonthDayPrototypeGetISOFields) \ + \ + /* Temporal.TimeZone */ \ + /* Temporal #sec-temporal.timezone */ \ + CPP(TemporalTimeZoneConstructor) \ + /* Temporal #sec-temporal.timezone.from */ \ + CPP(TemporalTimeZoneFrom) \ + /* Temporal #sec-get-temporal.timezone.prototype.id */ \ + CPP(TemporalTimeZonePrototypeId) \ + /* Temporal #sec-temporal.timezone.prototype.getoffsetnanosecondsfor */ \ + CPP(TemporalTimeZonePrototypeGetOffsetNanosecondsFor) \ + /* Temporal #sec-temporal.timezone.prototype.getoffsetstringfor */ \ + CPP(TemporalTimeZonePrototypeGetOffsetStringFor) \ + /* Temporal #sec-temporal.timezone.prototype.getplaindatetimefor */ \ + CPP(TemporalTimeZonePrototypeGetPlainDateTimeFor) \ + /* Temporal #sec-temporal.timezone.prototype.getinstantfor */ \ + CPP(TemporalTimeZonePrototypeGetInstantFor) \ + /* Temporal #sec-temporal.timezone.prototype.getpossibleinstantsfor */ \ + CPP(TemporalTimeZonePrototypeGetPossibleInstantsFor) \ + /* Temporal #sec-temporal.timezone.prototype.getnexttransition */ \ + CPP(TemporalTimeZonePrototypeGetNextTransition) \ + /* Temporal #sec-temporal.timezone.prototype.getprevioustransition */ \ + CPP(TemporalTimeZonePrototypeGetPreviousTransition) \ + /* Temporal #sec-temporal.timezone.prototype.tostring */ \ + CPP(TemporalTimeZonePrototypeToString) \ + /* Temporal #sec-temporal.timezone.prototype.tojson */ \ + CPP(TemporalTimeZonePrototypeToJSON) \ + \ + /* Temporal.Calendar */ \ + /* Temporal #sec-temporal.calendar */ \ + CPP(TemporalCalendarConstructor) \ + /* Temporal #sec-temporal.calendar.from */ \ + CPP(TemporalCalendarFrom) \ + /* Temporal #sec-get-temporal.calendar.prototype.id */ \ + CPP(TemporalCalendarPrototypeId) \ + /* Temporal #sec-temporal.calendar.prototype.datefromfields */ \ + CPP(TemporalCalendarPrototypeDateFromFields) \ + /* Temporal #sec-temporal.calendar.prototype.yearmonthfromfields */ \ + CPP(TemporalCalendarPrototypeYearMonthFromFields) \ + /* Temporal #sec-temporal.calendar.prototype.monthdayfromfields */ \ + CPP(TemporalCalendarPrototypeMonthDayFromFields) \ + /* Temporal #sec-temporal.calendar.prototype.dateadd */ \ + CPP(TemporalCalendarPrototypeDateAdd) \ + /* Temporal #sec-temporal.calendar.prototype.dateuntil */ \ + CPP(TemporalCalendarPrototypeDateUntil) \ + /* Temporal #sec-temporal.calendar.prototype.year */ \ + CPP(TemporalCalendarPrototypeYear) \ + /* Temporal #sec-temporal.calendar.prototype.month */ \ + CPP(TemporalCalendarPrototypeMonth) \ + /* Temporal #sec-temporal.calendar.prototype.monthcode */ \ + CPP(TemporalCalendarPrototypeMonthCode) \ + /* Temporal #sec-temporal.calendar.prototype.day */ \ + CPP(TemporalCalendarPrototypeDay) \ + /* Temporal #sec-temporal.calendar.prototype.dayofweek */ \ + CPP(TemporalCalendarPrototypeDayOfWeek) \ + /* Temporal #sec-temporal.calendar.prototype.dayofyear */ \ + CPP(TemporalCalendarPrototypeDayOfYear) \ + /* Temporal #sec-temporal.calendar.prototype.weekofyear */ \ + CPP(TemporalCalendarPrototypeWeekOfYear) \ + /* Temporal #sec-temporal.calendar.prototype.daysinweek */ \ + CPP(TemporalCalendarPrototypeDaysInWeek) \ + /* Temporal #sec-temporal.calendar.prototype.daysinmonth */ \ + CPP(TemporalCalendarPrototypeDaysInMonth) \ + /* Temporal #sec-temporal.calendar.prototype.daysinyear */ \ + CPP(TemporalCalendarPrototypeDaysInYear) \ + /* Temporal #sec-temporal.calendar.prototype.monthsinyear */ \ + CPP(TemporalCalendarPrototypeMonthsInYear) \ + /* Temporal #sec-temporal.calendar.prototype.inleapyear */ \ + CPP(TemporalCalendarPrototypeInLeapYear) \ + /* Temporal #sec-temporal.calendar.prototype.fields */ \ + CPP(TemporalCalendarPrototypeFields) \ + /* Temporal #sec-temporal.calendar.prototype.mergefields */ \ + CPP(TemporalCalendarPrototypeMergeFields) \ + /* Temporal #sec-temporal.calendar.prototype.tostring */ \ + CPP(TemporalCalendarPrototypeToString) \ + /* Temporal #sec-temporal.calendar.prototype.tojson */ \ + CPP(TemporalCalendarPrototypeToJSON) #define BUILTIN_LIST_BASE(CPP, TFJ, TFC, TFS, TFH, ASM) \ BUILTIN_LIST_BASE_TIER0(CPP, TFJ, TFC, TFS, TFH, ASM) \ @@ -1203,6 +1784,45 @@ namespace internal { /* ES #sec-string.prototype.touppercase */ \ CPP(StringPrototypeToUpperCaseIntl) \ TFS(StringToLowerCaseIntl, kString) \ + \ + /* Temporal */ \ + /* Temporal #sec-temporal.calendar.prototype.era */ \ + CPP(TemporalCalendarPrototypeEra) \ + /* Temporal #sec-temporal.calendar.prototype.erayear */ \ + CPP(TemporalCalendarPrototypeEraYear) \ + /* Temporal #sec-temporal.duration.prototype.tolocalestring */ \ + CPP(TemporalDurationPrototypeToLocaleString) \ + /* Temporal #sec-temporal.instant.prototype.tolocalestring */ \ + CPP(TemporalInstantPrototypeToLocaleString) \ + /* Temporal #sec-get-temporal.plaindate.prototype.era */ \ + CPP(TemporalPlainDatePrototypeEra) \ + /* Temporal #sec-get-temporal.plaindate.prototype.erayear */ \ + CPP(TemporalPlainDatePrototypeEraYear) \ + /* Temporal #sec-temporal.plaindate.prototype.tolocalestring */ \ + CPP(TemporalPlainDatePrototypeToLocaleString) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.era */ \ + CPP(TemporalPlainDateTimePrototypeEra) \ + /* Temporal #sec-get-temporal.plaindatetime.prototype.erayear */ \ + CPP(TemporalPlainDateTimePrototypeEraYear) \ + /* Temporal #sec-temporal.plaindatetime.prototype.tolocalestring */ \ + CPP(TemporalPlainDateTimePrototypeToLocaleString) \ + /* Temporal #sec-temporal.plainmonthday.prototype.tolocalestring */ \ + CPP(TemporalPlainMonthDayPrototypeToLocaleString) \ + /* Temporal #sec-temporal.plaintime.prototype.tolocalestring */ \ + CPP(TemporalPlainTimePrototypeToLocaleString) \ + /* Temporal #sec-get-temporal.plainyearmonth.prototype.era */ \ + CPP(TemporalPlainYearMonthPrototypeEra) \ + /* Temporal #sec-get-temporal.plainyearmonth.prototype.erayear */ \ + CPP(TemporalPlainYearMonthPrototypeEraYear) \ + /* Temporal #sec-temporal.plainyearmonth.prototype.tolocalestring */ \ + CPP(TemporalPlainYearMonthPrototypeToLocaleString) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.era */ \ + CPP(TemporalZonedDateTimePrototypeEra) \ + /* Temporal #sec-get-temporal.zoneddatetime.prototype.erayear */ \ + CPP(TemporalZonedDateTimePrototypeEraYear) \ + /* Temporal #sec-temporal.zoneddatetime.prototype.tolocalestring */ \ + CPP(TemporalZonedDateTimePrototypeToLocaleString) \ + \ CPP(V8BreakIteratorConstructor) \ CPP(V8BreakIteratorInternalAdoptText) \ CPP(V8BreakIteratorInternalBreakType) \ diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc index f3dd16f1d056cb..ed039cc680a18d 100644 --- a/deps/v8/src/builtins/builtins-handler-gen.cc +++ b/deps/v8/src/builtins/builtins-handler-gen.cc @@ -58,11 +58,21 @@ void Builtins::Generate_KeyedStoreIC_Megamorphic( KeyedStoreGenericGenerator::Generate(state); } +void Builtins::Generate_KeyedDefineOwnIC_Megamorphic( + compiler::CodeAssemblerState* state) { + KeyedDefineOwnGenericGenerator::Generate(state); +} + void Builtins::Generate_StoreIC_NoFeedback( compiler::CodeAssemblerState* state) { StoreICNoFeedbackGenerator::Generate(state); } +void Builtins::Generate_StoreOwnIC_NoFeedback( + compiler::CodeAssemblerState* state) { + StoreOwnICNoFeedbackGenerator::Generate(state); +} + // All possible fast-to-fast transitions. Transitions to dictionary mode are not // handled by ElementsTransitionAndStore builtins. #define ELEMENTS_KIND_TRANSITIONS(V) \ diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc index 8a525ef45a4b9a..19bf83cabb48f5 100644 --- a/deps/v8/src/builtins/builtins-ic-gen.cc +++ b/deps/v8/src/builtins/builtins-ic-gen.cc @@ -109,6 +109,20 @@ void Builtins::Generate_StoreICBaseline(compiler::CodeAssemblerState* state) { AccessorAssembler assembler(state); assembler.GenerateStoreICBaseline(); } +void Builtins::Generate_StoreOwnIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreOwnIC(); +} +void Builtins::Generate_StoreOwnICTrampoline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreOwnICTrampoline(); +} +void Builtins::Generate_StoreOwnICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreOwnICBaseline(); +} void Builtins::Generate_KeyedStoreIC(compiler::CodeAssemblerState* state) { AccessorAssembler assembler(state); assembler.GenerateKeyedStoreIC(); @@ -123,6 +137,20 @@ void Builtins::Generate_KeyedStoreICBaseline( AccessorAssembler assembler(state); assembler.GenerateKeyedStoreICBaseline(); } +void Builtins::Generate_KeyedDefineOwnIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedDefineOwnIC(); +} +void Builtins::Generate_KeyedDefineOwnICTrampoline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedDefineOwnICTrampoline(); +} +void Builtins::Generate_KeyedDefineOwnICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedDefineOwnICBaseline(); +} void Builtins::Generate_StoreInArrayLiteralIC( compiler::CodeAssemblerState* state) { AccessorAssembler assembler(state); diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc index d16474feaebcb5..11c11b00b03915 100644 --- a/deps/v8/src/builtins/builtins-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-iterator-gen.cc @@ -316,6 +316,10 @@ void IteratorBuiltinsAssembler::FastIterableToList( TVariable* var_result, Label* slow) { Label done(this), check_string(this), check_map(this), check_set(this); + // Always call the `next()` builtins when the debugger is + // active, to ensure we capture side-effects correctly. + GotoIf(IsDebugActive(), slow); + GotoIfNot( Word32Or(IsFastJSArrayWithNoCustomIteration(context, iterable), IsFastJSArrayForReadWithNoCustomIteration(context, iterable)), diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc index 6c677e922d9b2f..ab7dcf832ff10a 100644 --- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc +++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc @@ -331,7 +331,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask( BIND(&if_exception); { // Report unhandled exceptions from microtasks. - CallRuntime(Runtime::kReportMessageFromMicrotask, current_context, + CallRuntime(Runtime::kReportMessageFromMicrotask, GetCurrentContext(), var_exception.value()); RewindEnteredContext(saved_entered_context_count); SetCurrentContext(current_context); diff --git a/deps/v8/src/builtins/builtins-temporal.cc b/deps/v8/src/builtins/builtins-temporal.cc new file mode 100644 index 00000000000000..bbffa68a1df031 --- /dev/null +++ b/deps/v8/src/builtins/builtins-temporal.cc @@ -0,0 +1,631 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/builtins/builtins-utils-inl.h" +#include "src/builtins/builtins.h" +#include "src/objects/bigint.h" +#include "src/objects/js-temporal-objects-inl.h" + +namespace v8 { +namespace internal { + +#define TO_BE_IMPLEMENTED(id) \ + BUILTIN(id) { \ + HandleScope scope(isolate); \ + UNIMPLEMENTED(); \ + } + +/* Temporal #sec-temporal.now.timezone */ +TO_BE_IMPLEMENTED(TemporalNowTimeZone) +/* Temporal #sec-temporal.now.instant */ +TO_BE_IMPLEMENTED(TemporalNowInstant) +/* Temporal #sec-temporal.now.plaindatetime */ +TO_BE_IMPLEMENTED(TemporalNowPlainDateTime) +/* Temporal #sec-temporal.now.plaindatetimeiso */ +TO_BE_IMPLEMENTED(TemporalNowPlainDateTimeISO) +/* Temporal #sec-temporal.now.zoneddatetime */ +TO_BE_IMPLEMENTED(TemporalNowZonedDateTime) +/* Temporal #sec-temporal.now.zoneddatetimeiso */ +TO_BE_IMPLEMENTED(TemporalNowZonedDateTimeISO) +/* Temporal #sec-temporal.now.plaindate */ +TO_BE_IMPLEMENTED(TemporalNowPlainDate) +/* Temporal #sec-temporal.now.plaindateiso */ +TO_BE_IMPLEMENTED(TemporalNowPlainDateISO) +/* There are no Temporal.now.plainTime */ +/* See https://github.com/tc39/proposal-temporal/issues/1540 */ +/* Temporal #sec-temporal.now.plaintimeiso */ +TO_BE_IMPLEMENTED(TemporalNowPlainTimeISO) + +/* Temporal.PlaneDate */ +/* Temporal #sec-temporal.plaindate */ +TO_BE_IMPLEMENTED(TemporalPlainDateConstructor) +/* Temporal #sec-temporal.plaindate.from */ +TO_BE_IMPLEMENTED(TemporalPlainDateFrom) +/* Temporal #sec-temporal.plaindate.compare */ +TO_BE_IMPLEMENTED(TemporalPlainDateCompare) +/* Temporal #sec-get-temporal.plaindate.prototype.calendar */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeCalendar) +/* Temporal #sec-get-temporal.plaindate.prototype.year */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeYear) +/* Temporal #sec-get-temporal.plaindate.prototype.month */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeMonth) +/* Temporal #sec-get-temporal.plaindate.prototype.monthcode */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeMonthCode) +/* Temporal #sec-get-temporal.plaindate.prototype.day */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDay) +/* Temporal #sec-get-temporal.plaindate.prototype.dayofweek */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDayOfWeek) +/* Temporal #sec-get-temporal.plaindate.prototype.dayofyear */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDayOfYear) +/* Temporal #sec-get-temporal.plaindate.prototype.weekofyear */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeWeekOfYear) +/* Temporal #sec-get-temporal.plaindate.prototype.daysinweek */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDaysInWeek) +/* Temporal #sec-get-temporal.plaindate.prototype.daysinmonth */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDaysInMonth) +/* Temporal #sec-get-temporal.plaindate.prototype.daysinyear */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeDaysInYear) +/* Temporal #sec-get-temporal.plaindate.prototype.monthsinyear */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeMonthsInYear) +/* Temporal #sec-get-temporal.plaindate.prototype.inleapyear */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeInLeapYear) +/* Temporal #sec-temporal.plaindate.prototype.toplainyearmonth */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToPlainYearMonth) +/* Temporal #sec-temporal.plaindate.prototype.toplainmonthday */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToPlainMonthDay) +/* Temporal #sec-temporal.plaindate.prototype.getisofields */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeGetISOFields) +/* Temporal #sec-temporal.plaindate.prototype.add */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeAdd) +/* Temporal #sec-temporal.plaindate.prototype.substract */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeSubtract) +/* Temporal #sec-temporal.plaindate.prototype.with */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeWith) +/* Temporal #sec-temporal.plaindate.prototype.withcalendar */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeWithCalendar) +/* Temporal #sec-temporal.plaindate.prototype.until */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeUntil) +/* Temporal #sec-temporal.plaindate.prototype.since */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeSince) +/* Temporal #sec-temporal.plaindate.prototype.equals */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeEquals) +/* Temporal #sec-temporal.plaindate.prototype.toplaindatetime */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToPlainDateTime) +/* Temporal #sec-temporal.plaindate.prototype.tozoneddatetime */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToZonedDateTime) +/* Temporal #sec-temporal.plaindate.prototype.tostring */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToString) +/* Temporal #sec-temporal.plaindate.prototype.tojson */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToJSON) +/* Temporal #sec-temporal.plaindate.prototype.valueof */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeValueOf) + +/* Temporal.PlaneTime */ +/* Temporal #sec-temporal.plaintime */ +TO_BE_IMPLEMENTED(TemporalPlainTimeConstructor) +/* Temporal #sec-temporal.plaintime.from */ +TO_BE_IMPLEMENTED(TemporalPlainTimeFrom) +/* Temporal #sec-temporal.plaintime.compare */ +TO_BE_IMPLEMENTED(TemporalPlainTimeCompare) +/* Temporal #sec-get-temporal.plaintime.prototype.calendar */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeCalendar) +/* Temporal #sec-get-temporal.plaintime.prototype.hour */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeHour) +/* Temporal #sec-get-temporal.plaintime.prototype.minute */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeMinute) +/* Temporal #sec-get-temporal.plaintime.prototype.second */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeSecond) +/* Temporal #sec-get-temporal.plaintime.prototype.millisecond */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeMillisecond) +/* Temporal #sec-get-temporal.plaintime.prototype.microsecond */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeMicrosecond) +/* Temporal #sec-get-temporal.plaintime.prototype.nanoseond */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeNanosecond) +/* Temporal #sec-temporal.plaintime.prototype.add */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeAdd) +/* Temporal #sec-temporal.plaintime.prototype.subtract */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeSubtract) +/* Temporal #sec-temporal.plaintime.prototype.with */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeWith) +/* Temporal #sec-temporal.plaintime.prototype.until */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeUntil) +/* Temporal #sec-temporal.plaintime.prototype.since */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeSince) +/* Temporal #sec-temporal.plaintime.prototype.round */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeRound) +/* Temporal #sec-temporal.plaintime.prototype.equals */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeEquals) +/* Temporal #sec-temporal.plaintime.prototype.toplaindatetime */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeToPlainDateTime) +/* Temporal #sec-temporal.plaintime.prototype.tozoneddatetime */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeToZonedDateTime) +/* Temporal #sec-temporal.plaintime.prototype.getisofields */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeGetISOFields) +/* Temporal #sec-temporal.plaintime.prototype.tostring */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeToString) +/* Temporal #sec-temporal.plaindtimeprototype.tojson */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeToJSON) +/* Temporal #sec-temporal.plaintime.prototype.valueof */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeValueOf) + +/* Temporal.PlaneDateTime */ +/* Temporal #sec-temporal.plaindatetime */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimeConstructor) +/* Temporal #sec-temporal.plaindatetime.from */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimeFrom) +/* Temporal #sec-temporal.plaindatetime.compare */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimeCompare) +/* Temporal #sec-get-temporal.plaindatetime.prototype.calendar */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeCalendar) +/* Temporal #sec-get-temporal.plaindatetime.prototype.year */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeYear) +/* Temporal #sec-get-temporal.plaindatetime.prototype.month */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMonth) +/* Temporal #sec-get-temporal.plaindatetime.prototype.monthcode */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMonthCode) +/* Temporal #sec-get-temporal.plaindatetime.prototype.day */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDay) +/* Temporal #sec-get-temporal.plaindatetime.prototype.hour */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeHour) +/* Temporal #sec-get-temporal.plaindatetime.prototype.minute */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMinute) +/* Temporal #sec-get-temporal.plaindatetime.prototype.second */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeSecond) +/* Temporal #sec-get-temporal.plaindatetime.prototype.millisecond */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMillisecond) +/* Temporal #sec-get-temporal.plaindatetime.prototype.microsecond */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMicrosecond) +/* Temporal #sec-get-temporal.plaindatetime.prototype.nanosecond */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeNanosecond) +/* Temporal #sec-get-temporal.plaindatetime.prototype.dayofweek */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDayOfWeek) +/* Temporal #sec-get-temporal.plaindatetime.prototype.dayofyear */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDayOfYear) +/* Temporal #sec-get-temporal.plaindatetime.prototype.weekofyear */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeWeekOfYear) +/* Temporal #sec-get-temporal.plaindatetime.prototype.daysinweek */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDaysInWeek) +/* Temporal #sec-get-temporal.plaindatetime.prototype.daysinmonth */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDaysInMonth) +/* Temporal #sec-get-temporal.plaindatetime.prototype.daysinyear */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeDaysInYear) +/* Temporal #sec-get-temporal.plaindatetime.prototype.monthsinyear */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeMonthsInYear) +/* Temporal #sec-get-temporal.plaindatetime.prototype.inleapyear */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeInLeapYear) +/* Temporal #sec-temporal.plaindatetime.prototype.with */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeWith) +/* Temporal #sec-temporal.plaindatetime.prototype.withplainTime */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeWithPlainTime) +/* Temporal #sec-temporal.plaindatetime.prototype.withplainDate */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeWithPlainDate) +/* Temporal #sec-temporal.plaindatetime.prototype.withcalendar */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeWithCalendar) +/* Temporal #sec-temporal.plaindatetime.prototype.add */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeAdd) +/* Temporal #sec-temporal.plaindatetime.prototype.subtract */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeSubtract) +/* Temporal #sec-temporal.plaindatetime.prototype.until */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeUntil) +/* Temporal #sec-temporal.plaindatetime.prototype.since */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeSince) +/* Temporal #sec-temporal.plaindatetime.prototype.round */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeRound) +/* Temporal #sec-temporal.plaindatetime.prototype.equals */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeEquals) +/* Temporal #sec-temporal.plaindatetime.prototype.tostring */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToString) +/* Temporal #sec-temporal.plainddatetimeprototype.tojson */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToJSON) +/* Temporal #sec-temporal.plaindatetime.prototype.valueof */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeValueOf) +/* Temporal #sec-temporal.plaindatetime.prototype.tozoneddatetime */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToZonedDateTime) +/* Temporal #sec-temporal.plaindatetime.prototype.toplaindate */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToPlainDate) +/* Temporal #sec-temporal.plaindatetime.prototype.toplainyearmonth */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToPlainYearMonth) +/* Temporal #sec-temporal.plaindatetime.prototype.toplainmonthday */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToPlainMonthDay) +/* Temporal #sec-temporal.plaindatetime.prototype.toplaintime */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToPlainTime) +/* Temporal #sec-temporal.plaindatetime.prototype.getisofields */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeGetISOFields) + +/* Temporal.ZonedDateTime */ +/* Temporal #sec-temporal.zoneddatetime */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimeConstructor) +/* Temporal #sec-temporal.zoneddatetime.from */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimeFrom) +/* Temporal #sec-temporal.zoneddatetime.compare */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimeCompare) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.calendar */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeCalendar) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.timezone */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeTimeZone) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.year */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeYear) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.month */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMonth) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.monthcode */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMonthCode) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.day */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeDay) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.hour */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeHour) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.minute */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMinute) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.second */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeSecond) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.millisecond */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMillisecond) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.microsecond */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMicrosecond) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.nanosecond */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeNanosecond) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.epochsecond */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEpochSeconds) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.epochmilliseconds */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEpochMilliseconds) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.epochmicroseconds */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEpochMicroseconds) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.epochnanoseconds */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEpochNanoseconds) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.dayofweek */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeDayOfWeek) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.dayofyear */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeDayOfYear) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.weekofyear */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWeekOfYear) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.hoursinday */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeHoursInDay) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.daysinweek */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeDaysInWeek) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.daysinmonth */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeDaysInMonth) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.daysinyear */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeDaysInYear) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.monthsinyear */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeMonthsInYear) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.inleapyear */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeInLeapYear) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.offsetnanoseconds */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeOffsetNanoseconds) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.offset */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeOffset) +/* Temporal #sec-temporal.zoneddatetime.prototype.with */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWith) +/* Temporal #sec-temporal.zoneddatetime.prototype.withplaintime */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWithPlainTime) +/* Temporal #sec-temporal.zoneddatetime.prototype.withplaindate */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWithPlainDate) +/* Temporal #sec-temporal.zoneddatetime.prototype.withtimezone */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWithTimeZone) +/* Temporal #sec-temporal.zoneddatetime.prototype.withcalendar */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeWithCalendar) +/* Temporal #sec-temporal.zoneddatetime.prototype.add */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeAdd) +/* Temporal #sec-temporal.zoneddatetime.prototype.subtract */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeSubtract) +/* Temporal #sec-temporal.zoneddatetime.prototype.until */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeUntil) +/* Temporal #sec-temporal.zoneddatetime.prototype.since */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeSince) +/* Temporal #sec-temporal.zoneddatetime.prototype.round */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeRound) +/* Temporal #sec-temporal.zoneddatetime.prototype.equals */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEquals) +/* Temporal #sec-temporal.zoneddatetime.prototype.tostring */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToString) +/* Temporal #sec-temporal.zonedddatetimeprototype.tojson */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToJSON) +/* Temporal #sec-temporal.zoneddatetime.prototype.valueof */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeValueOf) +/* Temporal #sec-temporal.zoneddatetime.prototype.startofday */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeStartOfDay) +/* Temporal #sec-temporal.zoneddatetime.prototype.toinstant */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToInstant) +/* Temporal #sec-temporal.zoneddatetime.prototype.toplaindate */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainDate) +/* Temporal #sec-temporal.zoneddatetime.prototype.toplaintime */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainTime) +/* Temporal #sec-temporal.zoneddatetime.prototype.toplaindatetime */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainDateTime) +/* Temporal #sec-temporal.zoneddatetime.prototype.toplainyearmonth */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainYearMonth) +/* Temporal #sec-temporal.zoneddatetime.prototype.toplainmonthday */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToPlainMonthDay) +/* Temporal #sec-temporal.zoneddatetime.prototype.getisofields */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeGetISOFields) + +/* Temporal.Duration */ +/* Temporal #sec-temporal.duration */ +TO_BE_IMPLEMENTED(TemporalDurationConstructor) +/* Temporal #sec-temporal.duration.from */ +TO_BE_IMPLEMENTED(TemporalDurationFrom) +/* Temporal #sec-temporal.duration.compare */ +TO_BE_IMPLEMENTED(TemporalDurationCompare) +/* Temporal #sec-get-temporal.duration.prototype.years */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeYears) +/* Temporal #sec-get-temporal.duration.prototype.months */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeMonths) +/* Temporal #sec-get-temporal.duration.prototype.weeks */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeWeeks) +/* Temporal #sec-get-temporal.duration.prototype.days */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeDays) +/* Temporal #sec-get-temporal.duration.prototype.hours */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeHours) +/* Temporal #sec-get-temporal.duration.prototype.minutes */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeMinutes) +/* Temporal #sec-get-temporal.duration.prototype.seconds */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeSeconds) +/* Temporal #sec-get-temporal.duration.prototype.milliseconds */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeMilliseconds) +/* Temporal #sec-get-temporal.duration.prototype.microseconds */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeMicroseconds) +/* Temporal #sec-get-temporal.duration.prototype.nanoseconds */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeNanoseconds) +/* Temporal #sec-get-temporal.duration.prototype.sign */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeSign) +/* Temporal #sec-get-temporal.duration.prototype.blank */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeBlank) +/* Temporal #sec-temporal.duration.prototype.with */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeWith) +/* Temporal #sec-temporal.duration.prototype.negated */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeNegated) +/* Temporal #sec-temporal.duration.prototype.abs */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeAbs) +/* Temporal #sec-temporal.duration.prototype.add */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeAdd) +/* Temporal #sec-temporal.duration.prototype.subtract */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeSubtract) +/* Temporal #sec-temporal.duration.prototype.round */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeRound) +/* Temporal #sec-temporal.duration.prototype.total */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeTotal) +/* Temporal #sec-temporal.duration.prototype.tostring */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeToString) +/* Temporal #sec-temporal.duration.tojson */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeToJSON) +/* Temporal #sec-temporal.duration.prototype.valueof */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeValueOf) + +/* Temporal.Instant */ +/* Temporal #sec-temporal.instant */ +TO_BE_IMPLEMENTED(TemporalInstantConstructor) +/* Temporal #sec-temporal.instant.from */ +TO_BE_IMPLEMENTED(TemporalInstantFrom) +/* Temporal #sec-temporal.instant.fromepochseconds */ +TO_BE_IMPLEMENTED(TemporalInstantFromEpochSeconds) +/* Temporal #sec-temporal.instant.fromepochmilliseconds */ +TO_BE_IMPLEMENTED(TemporalInstantFromEpochMilliseconds) +/* Temporal #sec-temporal.instant.fromepochmicroseconds */ +TO_BE_IMPLEMENTED(TemporalInstantFromEpochMicroseconds) +/* Temporal #sec-temporal.instant.fromepochnanoseconds */ +TO_BE_IMPLEMENTED(TemporalInstantFromEpochNanoseconds) +/* Temporal #sec-temporal.instant.compare */ +TO_BE_IMPLEMENTED(TemporalInstantCompare) +/* Temporal #sec-get-temporal.instant.prototype.epochseconds */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeEpochSeconds) +/* Temporal #sec-get-temporal.instant.prototype.epochmilliseconds */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeEpochMilliseconds) +/* Temporal #sec-get-temporal.instant.prototype.epochmicroseconds */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeEpochMicroseconds) +/* Temporal #sec-get-temporal.instant.prototype.epochnanoseconds */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeEpochNanoseconds) +/* Temporal #sec-temporal.instant.prototype.add */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeAdd) +/* Temporal #sec-temporal.instant.prototype.subtract */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeSubtract) +/* Temporal #sec-temporal.instant.prototype.until */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeUntil) +/* Temporal #sec-temporal.instant.prototype.since */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeSince) +/* Temporal #sec-temporal.instant.prototype.round */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeRound) +/* Temporal #sec-temporal.instant.prototype.equals */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeEquals) +/* Temporal #sec-temporal.instant.prototype.tostring */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeToString) +/* Temporal #sec-temporal.instant.tojson */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeToJSON) +/* Temporal #sec-temporal.instant.prototype.valueof */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeValueOf) +/* Temporal #sec-temporal.instant.prototype.tozoneddatetime */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeToZonedDateTime) +/* Temporal #sec-temporal.instant.prototype.tozoneddatetimeiso */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeToZonedDateTimeISO) + +/* Temporal.PlainYearMonth */ +/* Temporal #sec-temporal.plainyearmonth */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthConstructor) +/* Temporal #sec-temporal.plainyearmonth.from */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthFrom) +/* Temporal #sec-temporal.plainyearmonth.compare */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthCompare) +/* Temporal #sec-get-temporal.plainyearmonth.prototype.calendar */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeCalendar) +/* Temporal #sec-get-temporal.plainyearmonth.prototype.year */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeYear) +/* Temporal #sec-get-temporal.plainyearmonth.prototype.month */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeMonth) +/* Temporal #sec-get-temporal.plainyearmonth.prototype.monthcode */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeMonthCode) +/* Temporal #sec-get-temporal.plainyearmonth.prototype.daysinyear */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeDaysInYear) +/* Temporal #sec-get-temporal.plainyearmonth.prototype.daysinmonth */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeDaysInMonth) +/* Temporal #sec-get-temporal.plainyearmonth.prototype.monthsinyear */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeMonthsInYear) +/* Temporal #sec-get-temporal.plainyearmonth.prototype.inleapyear */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeInLeapYear) +/* Temporal #sec-temporal.plainyearmonth.prototype.with */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeWith) +/* Temporal #sec-temporal.plainyearmonth.prototype.add */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeAdd) +/* Temporal #sec-temporal.plainyearmonth.prototype.subtract */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeSubtract) +/* Temporal #sec-temporal.plainyearmonth.prototype.until */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeUntil) +/* Temporal #sec-temporal.plainyearmonth.prototype.since */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeSince) +/* Temporal #sec-temporal.plainyearmonth.prototype.equals */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeEquals) +/* Temporal #sec-temporal.plainyearmonth.tostring */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeToString) +/* Temporal #sec-temporal.plainyearmonth.tojson */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeToJSON) +/* Temporal #sec-temporal.plainyearmonth.prototype.valueof */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeValueOf) +/* Temporal #sec-temporal.plainyearmonth.prototype.toplaindate */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeToPlainDate) +/* Temporal #sec-temporal.plainyearmonth.prototype.getisofields */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeGetISOFields) + +/* Temporal.PlainMonthDay */ +/* Temporal #sec-temporal.plainmonthday */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayConstructor) +/* Temporal #sec-temporal.plainmonthday.from */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayFrom) +/* There are no compare for PlainMonthDay */ +/* See https://github.com/tc39/proposal-temporal/issues/1547 */ +/* Temporal #sec-get-temporal.plainmonthday.prototype.calendar */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeCalendar) +/* Temporal #sec-get-temporal.plainmonthday.prototype.monthcode */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeMonthCode) +/* Temporal #sec-get-temporal.plainmonthday.prototype.day */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeDay) +/* Temporal #sec-temporal.plainmonthday.prototype.with */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeWith) +/* Temporal #sec-temporal.plainmonthday.prototype.equals */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeEquals) +/* Temporal #sec-temporal.plainmonthday.prototype.tostring */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeToString) +/* Temporal #sec-temporal.plainmonthday.tojson */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeToJSON) +/* Temporal #sec-temporal.plainmonthday.prototype.valueof */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeValueOf) +/* Temporal #sec-temporal.plainmonthday.prototype.toplaindate */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeToPlainDate) +/* Temporal #sec-temporal.plainmonthday.prototype.getisofields */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeGetISOFields) + +/* Temporal.TimeZone */ +/* Temporal #sec-temporal.timezone */ +TO_BE_IMPLEMENTED(TemporalTimeZoneConstructor) +/* Temporal #sec-temporal.timezone.from */ +TO_BE_IMPLEMENTED(TemporalTimeZoneFrom) +/* Temporal #sec-get-temporal.timezone.prototype.id */ +TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeId) +/* Temporal #sec-temporal.timezone.prototype.getoffsetnanosecondsfor */ +TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetOffsetNanosecondsFor) +/* Temporal #sec-temporal.timezone.prototype.getoffsetstringfor */ +TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetOffsetStringFor) +/* Temporal #sec-temporal.timezone.prototype.getplaindatetimefor */ +TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetPlainDateTimeFor) +/* Temporal #sec-temporal.timezone.prototype.getinstantfor */ +TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetInstantFor) +/* Temporal #sec-temporal.timezone.prototype.getpossibleinstantsfor */ +TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetPossibleInstantsFor) +/* Temporal #sec-temporal.timezone.prototype.getnexttransition */ +TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetNextTransition) +/* Temporal #sec-temporal.timezone.prototype.getprevioustransition */ +TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeGetPreviousTransition) +/* Temporal #sec-temporal.timezone.prototype.tostring */ +TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeToString) +/* Temporal #sec-temporal.timezone.prototype.tojson */ +TO_BE_IMPLEMENTED(TemporalTimeZonePrototypeToJSON) + +/* Temporal.Calendar */ +/* Temporal #sec-temporal.calendar */ +TO_BE_IMPLEMENTED(TemporalCalendarConstructor) +/* Temporal #sec-temporal.calendar.from */ +TO_BE_IMPLEMENTED(TemporalCalendarFrom) +/* Temporal #sec-get-temporal.calendar.prototype.id */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeId) +/* Temporal #sec-temporal.calendar.prototype.datefromfields */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDateFromFields) +/* Temporal #sec-temporal.calendar.prototype.yearmonthfromfields */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeYearMonthFromFields) +/* Temporal #sec-temporal.calendar.prototype.monthdayfromfields */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMonthDayFromFields) +/* Temporal #sec-temporal.calendar.prototype.dateadd */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDateAdd) +/* Temporal #sec-temporal.calendar.prototype.dateuntil */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDateUntil) +/* Temporal #sec-temporal.calendar.prototype.year */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeYear) +/* Temporal #sec-temporal.calendar.prototype.month */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMonth) +/* Temporal #sec-temporal.calendar.prototype.monthcode */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMonthCode) +/* Temporal #sec-temporal.calendar.prototype.day */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDay) +/* Temporal #sec-temporal.calendar.prototype.dayofweek */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDayOfWeek) +/* Temporal #sec-temporal.calendar.prototype.dayofyear */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDayOfYear) +/* Temporal #sec-temporal.calendar.prototype.weekofyear */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeWeekOfYear) +/* Temporal #sec-temporal.calendar.prototype.daysinweek */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDaysInWeek) +/* Temporal #sec-temporal.calendar.prototype.daysinmonth */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDaysInMonth) +/* Temporal #sec-temporal.calendar.prototype.daysinyear */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeDaysInYear) +/* Temporal #sec-temporal.calendar.prototype.monthsinyear */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMonthsInYear) +/* Temporal #sec-temporal.calendar.prototype.inleapyear */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeInLeapYear) +/* Temporal #sec-temporal.calendar.prototype.fields */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeFields) +/* Temporal #sec-temporal.calendar.prototype.mergefields */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeMergeFields) +/* Temporal #sec-temporal.calendar.prototype.tostring */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeToString) +/* Temporal #sec-temporal.calendar.prototype.tojson */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeToJSON) + +#ifdef V8_INTL_SUPPORT +/* Temporal */ +/* Temporal #sec-temporal.calendar.prototype.era */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeEra) +/* Temporal #sec-temporal.calendar.prototype.erayear */ +TO_BE_IMPLEMENTED(TemporalCalendarPrototypeEraYear) +/* Temporal #sec-temporal.duration.prototype.tolocalestring */ +TO_BE_IMPLEMENTED(TemporalDurationPrototypeToLocaleString) +/* Temporal #sec-temporal.instant.prototype.tolocalestring */ +TO_BE_IMPLEMENTED(TemporalInstantPrototypeToLocaleString) +/* Temporal #sec-get-temporal.plaindate.prototype.era */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeEra) +/* Temporal #sec-get-temporal.plaindate.prototype.erayear */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeEraYear) +/* Temporal #sec-temporal.plaindate.prototype.tolocalestring */ +TO_BE_IMPLEMENTED(TemporalPlainDatePrototypeToLocaleString) +/* Temporal #sec-get-temporal.plaindatetime.prototype.era */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeEra) +/* Temporal #sec-get-temporal.plaindatetime.prototype.erayear */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeEraYear) +/* Temporal #sec-temporal.plaindatetime.prototype.tolocalestring */ +TO_BE_IMPLEMENTED(TemporalPlainDateTimePrototypeToLocaleString) +/* Temporal #sec-temporal.plainmonthday.prototype.tolocalestring */ +TO_BE_IMPLEMENTED(TemporalPlainMonthDayPrototypeToLocaleString) +/* Temporal #sec-temporal.plaintime.prototype.tolocalestring */ +TO_BE_IMPLEMENTED(TemporalPlainTimePrototypeToLocaleString) +/* Temporal #sec-get-temporal.plainyearmonth.prototype.era */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeEra) +/* Temporal #sec-get-temporal.plainyearmonth.prototype.erayear */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeEraYear) +/* Temporal #sec-temporal.plainyearmonth.prototype.tolocalestring */ +TO_BE_IMPLEMENTED(TemporalPlainYearMonthPrototypeToLocaleString) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.era */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEra) +/* Temporal #sec-get-temporal.zoneddatetime.prototype.erayear */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeEraYear) +/* Temporal #sec-temporal.zoneddatetime.prototype.tolocalestring */ +TO_BE_IMPLEMENTED(TemporalZonedDateTimePrototypeToLocaleString) +#endif // V8_INTL_SUPPORT + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc index a7827e7d9f068b..f64d2aeab64eca 100644 --- a/deps/v8/src/builtins/builtins-typed-array.cc +++ b/deps/v8/src/builtins/builtins-typed-array.cc @@ -218,7 +218,7 @@ BUILTIN(TypedArrayPrototypeIncludes) { if (args.length() < 2) return ReadOnlyRoots(isolate).false_value(); - int64_t len = array->length(); + int64_t len = array->GetLength(); if (len == 0) return ReadOnlyRoots(isolate).false_value(); int64_t index = 0; diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index d0045b43d5d328..af1e7490b0eb48 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -165,18 +165,33 @@ Handle Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) { UNREACHABLE(); } +FullObjectSlot Builtins::builtin_slot(Builtin builtin) { + Address* location = &isolate_->builtin_table()[Builtins::ToInt(builtin)]; + return FullObjectSlot(location); +} + +FullObjectSlot Builtins::builtin_tier0_slot(Builtin builtin) { + DCHECK(IsTier0(builtin)); + Address* location = + &isolate_->builtin_tier0_table()[Builtins::ToInt(builtin)]; + return FullObjectSlot(location); +} + void Builtins::set_code(Builtin builtin, Code code) { DCHECK_EQ(builtin, code.builtin_id()); - isolate_->heap()->set_builtin(builtin, code); + DCHECK(Internals::HasHeapObjectTag(code.ptr())); + // The given builtin may be uninitialized thus we cannot check its type here. + isolate_->builtin_table()[Builtins::ToInt(builtin)] = code.ptr(); } -Code Builtins::code(Builtin builtin_enum) { - return isolate_->heap()->builtin(builtin_enum); +Code Builtins::code(Builtin builtin) { + Address ptr = isolate_->builtin_table()[Builtins::ToInt(builtin)]; + return Code::cast(Object(ptr)); } Handle Builtins::code_handle(Builtin builtin) { - return Handle( - reinterpret_cast(isolate_->heap()->builtin_address(builtin))); + Address* location = &isolate_->builtin_table()[Builtins::ToInt(builtin)]; + return Handle(location); } // static @@ -272,15 +287,12 @@ bool Builtins::IsBuiltin(const Code code) { bool Builtins::IsBuiltinHandle(Handle maybe_code, Builtin* builtin) const { - Heap* heap = isolate_->heap(); - Address handle_location = maybe_code.address(); - Address end = - heap->builtin_address(static_cast(Builtins::kBuiltinCount)); - if (handle_location >= end) return false; - Address start = heap->builtin_address(static_cast(0)); - if (handle_location < start) return false; - *builtin = FromInt(static_cast(handle_location - start) >> - kSystemPointerSizeLog2); + Address* handle_location = maybe_code.location(); + Address* builtins_table = isolate_->builtin_table(); + if (handle_location < builtins_table) return false; + Address* builtins_table_end = &builtins_table[Builtins::kBuiltinCount]; + if (handle_location >= builtins_table_end) return false; + *builtin = FromInt(static_cast(handle_location - builtins_table)); return true; } @@ -298,8 +310,8 @@ void Builtins::InitializeIsolateDataTables(Isolate* isolate) { // The entry table. for (Builtin i = Builtins::kFirst; i <= Builtins::kLast; ++i) { - DCHECK(Builtins::IsBuiltinId(isolate->heap()->builtin(i).builtin_id())); - DCHECK(isolate->heap()->builtin(i).is_off_heap_trampoline()); + DCHECK(Builtins::IsBuiltinId(isolate->builtins()->code(i).builtin_id())); + DCHECK(isolate->builtins()->code(i).is_off_heap_trampoline()); isolate_data->builtin_entry_table()[ToInt(i)] = embedded_data.InstructionStartOfBuiltin(i); } diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h index e606a3881e1402..79e4da840cd09b 100644 --- a/deps/v8/src/builtins/builtins.h +++ b/deps/v8/src/builtins/builtins.h @@ -276,6 +276,11 @@ class Builtins { js_entry_handler_offset_ = offset; } + // Returns given builtin's slot in the main builtin table. + FullObjectSlot builtin_slot(Builtin builtin); + // Returns given builtin's slot in the tier0 builtin table. + FullObjectSlot builtin_tier0_slot(Builtin builtin); + private: static void Generate_CallFunction(MacroAssembler* masm, ConvertReceiverMode mode); diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq index 6a3c157db8e256..2a36badfb74c87 100644 --- a/deps/v8/src/builtins/convert.tq +++ b/deps/v8/src/builtins/convert.tq @@ -88,6 +88,9 @@ FromConstexpr(i: constexpr int31): uintptr { FromConstexpr(i: constexpr int31): float64 { return Float64Constant(i); } +FromConstexpr(i: constexpr int32): float64 { + return Float64Constant(i); +} FromConstexpr(i: constexpr float64): float64 { return Float64Constant(i); } diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index c140a2c812fec5..aed3333c71ecbf 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -2447,7 +2447,11 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, StackArgumentsAccessor args(eax); __ AssertFunction(edi, edx); + Label class_constructor; __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ test(FieldOperand(edx, SharedFunctionInfo::kFlagsOffset), + Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask)); + __ j(not_zero, &class_constructor); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function @@ -2528,6 +2532,14 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ movzx_w( ecx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(edi, no_reg, ecx, eax, InvokeType::kJump); + + // The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ push(edi); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + } } namespace { @@ -3007,6 +3019,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc index 714353fc96dc20..30632232270ffd 100644 --- a/deps/v8/src/builtins/loong64/builtins-loong64.cc +++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc @@ -71,6 +71,34 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, namespace { +enum class ArgumentsElementType { + kRaw, // Push arguments as they are. + kHandle // Dereference arguments before pushing. +}; + +void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, + Register scratch, Register scratch2, + ArgumentsElementType element_type) { + DCHECK(!AreAliased(array, argc, scratch)); + Label loop, entry; + if (kJSArgcIncludesReceiver) { + __ Sub_d(scratch, argc, Operand(kJSArgcReceiverSlots)); + } else { + __ mov(scratch, argc); + } + __ Branch(&entry); + __ bind(&loop); + __ Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7); + __ Ld_d(scratch2, MemOperand(scratch2, 0)); + if (element_type == ArgumentsElementType::kHandle) { + __ Ld_d(scratch2, MemOperand(scratch2, 0)); + } + __ Push(scratch2); + __ bind(&entry); + __ Add_d(scratch, scratch, Operand(-1)); + __ Branch(&loop, greater_equal, scratch, Operand(zero_reg)); +} + void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : number of arguments @@ -90,12 +118,14 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ Push(cp, a0); __ SmiUntag(a0); - // Set up pointer to last argument (skip receiver). + // Set up pointer to first argument (skip receiver). __ Add_d( t2, fp, Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); // Copy arguments and receiver to the expression stack. - __ PushArray(t2, a0, t3, t0); + // t2: Pointer to start of arguments. + // a0: Number of arguments. + Generate_PushArguments(masm, t2, a0, t3, t0, ArgumentsElementType::kRaw); // The receiver for the builtin/api call. __ PushRoot(RootIndex::kTheHoleValue); @@ -113,9 +143,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ SmiScale(t3, t3, kPointerSizeLog2); - __ Add_d(sp, sp, t3); - __ Add_d(sp, sp, kPointerSize); + __ DropArguments(t3, TurboAssembler::kCountIsSmi, + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver, + t3); __ Ret(); } @@ -221,7 +253,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // InvokeFunction. // Copy arguments and receiver to the expression stack. - __ PushArray(t2, a0, t0, t1); + // t2: Pointer to start of argument. + // a0: Number of arguments. + Generate_PushArguments(masm, t2, a0, t0, t1, ArgumentsElementType::kRaw); // We need two copies because we may have to return the original one // and the calling conventions dictate that the called function pops the // receiver. The second copy is pushed after the arguments, @@ -267,9 +301,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ SmiScale(a4, a1, kPointerSizeLog2); - __ Add_d(sp, sp, a4); - __ Add_d(sp, sp, kPointerSize); + __ DropArguments(a1, TurboAssembler::kCountIsSmi, + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver, + a4); __ Ret(); __ bind(&check_receiver); @@ -391,6 +427,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); __ Ld_hu( a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); + if (kJSArgcIncludesReceiver) { + __ Sub_d(a3, a3, Operand(kJSArgcReceiverSlots)); + } __ Ld_d(t1, FieldMemOperand( a1, JSGeneratorObject::kParametersAndRegistersOffset)); { @@ -723,23 +762,17 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ Push(a2); // Check if we have enough stack space to push all arguments. - __ addi_d(a6, a4, 1); + if (kJSArgcIncludesReceiver) { + __ mov(a6, a4); + } else { + __ addi_d(a6, a4, 1); + } Generate_CheckStackOverflow(masm, a6, a0, s2); - // Copy arguments to the stack in a loop. + // Copy arguments to the stack. // a4: argc // a5: argv, i.e. points to first arg - Label loop, entry; - __ Alsl_d(s1, a4, a5, kPointerSizeLog2, t7); - __ b(&entry); - // s1 points past last arg. - __ bind(&loop); - __ addi_d(s1, s1, -kPointerSize); - __ Ld_d(s2, MemOperand(s1, 0)); // Read next parameter. - __ Ld_d(s2, MemOperand(s2, 0)); // Dereference handle. - __ Push(s2); // Push parameter. - __ bind(&entry); - __ Branch(&loop, ne, a5, Operand(s1)); + Generate_PushArguments(masm, a5, a4, s1, s2, ArgumentsElementType::kHandle); // Push the receive. __ Push(a3); @@ -814,7 +847,10 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ Ld_d(actual_params_size, MemOperand(fp, StandardFrameConstants::kArgCOffset)); __ slli_d(actual_params_size, actual_params_size, kPointerSizeLog2); - __ Add_d(actual_params_size, actual_params_size, Operand(kSystemPointerSize)); + if (!kJSArgcIncludesReceiver) { + __ Add_d(actual_params_size, actual_params_size, + Operand(kSystemPointerSize)); + } // If actual is bigger than formal, then we should use it to free up the stack // arguments. @@ -825,7 +861,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ Add_d(sp, sp, params_size); + __ DropArguments(params_size, TurboAssembler::kCountIsBytes, + TurboAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_marker| == |expected_marker| @@ -1192,7 +1229,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // stack left to right. // // The live registers are: -// o a0 : actual argument count (not including the receiver) +// o a0 : actual argument count // o a1: the JS function object being called. // o a3: the incoming new target or generator object // o cp: our context @@ -1458,7 +1495,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( InterpreterPushArgsMode mode) { DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a2 : the address of the first argument to be pushed. Subsequent // arguments should be consecutive above this, in the same order as // they are to be pushed onto the stack. @@ -1470,15 +1507,18 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( __ Sub_d(a0, a0, Operand(1)); } - __ Add_d(a3, a0, Operand(1)); // Add one for receiver. - - __ StackOverflowCheck(a3, a4, t0, &stack_overflow); - - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - // Don't copy receiver. + const bool skip_receiver = + receiver_mode == ConvertReceiverMode::kNullOrUndefined; + if (kJSArgcIncludesReceiver && skip_receiver) { + __ Sub_d(a3, a0, Operand(kJSArgcReceiverSlots)); + } else if (!kJSArgcIncludesReceiver && !skip_receiver) { + __ Add_d(a3, a0, Operand(1)); + } else { __ mov(a3, a0); } + __ StackOverflowCheck(a3, a4, t0, &stack_overflow); + // This function modifies a2, t0 and a4. GenerateInterpreterPushArgs(masm, a3, a2, a4, t0); @@ -1514,23 +1554,28 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( void Builtins::Generate_InterpreterPushArgsThenConstructImpl( MacroAssembler* masm, InterpreterPushArgsMode mode) { // ----------- S t a t e ------------- - // -- a0 : argument count (not including receiver) + // -- a0 : argument count // -- a3 : new target // -- a1 : constructor to call // -- a2 : allocation site feedback if available, undefined otherwise. // -- a4 : address of the first argument // ----------------------------------- Label stack_overflow; - __ addi_d(a6, a0, 1); - __ StackOverflowCheck(a6, a5, t0, &stack_overflow); + __ StackOverflowCheck(a0, a5, t0, &stack_overflow); if (mode == InterpreterPushArgsMode::kWithFinalSpread) { // The spread argument should not be pushed. __ Sub_d(a0, a0, Operand(1)); } + Register argc_without_receiver = a0; + if (kJSArgcIncludesReceiver) { + argc_without_receiver = a6; + __ Sub_d(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots)); + } + // Push the arguments, This function modifies t0, a4 and a5. - GenerateInterpreterPushArgs(masm, a0, a4, a5, t0); + GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5, t0); // Push a slot for the receiver. __ Push(zero_reg); @@ -1729,13 +1774,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. t0 contains the arguments count, the return value // from LAZY is always the last argument. - __ Add_d(a0, a0, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + constexpr int return_value_offset = + BuiltinContinuationFrameConstants::kFixedSlotCount - + kJSArgcReceiverSlots; + __ Add_d(a0, a0, Operand(return_value_offset)); __ Alsl_d(t0, a0, sp, kSystemPointerSizeLog2, t7); __ St_d(scratch, MemOperand(t0, 0)); // Recover arguments count. - __ Sub_d(a0, a0, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + __ Sub_d(a0, a0, Operand(return_value_offset)); } __ Ld_d( @@ -1856,10 +1902,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // arguments from the stack (including the receiver), and push thisArg (if // present) instead. { - // Claim (2 - argc) dummy arguments form the stack, to put the stack in a - // consistent state for a simple pop operation. - - __ mov(scratch, argc); + __ Sub_d(scratch, argc, JSParameterCount(0)); __ Ld_d(this_arg, MemOperand(sp, kPointerSize)); __ Ld_d(arg_array, MemOperand(sp, 2 * kPointerSize)); __ Movz(arg_array, undefined_value, scratch); // if argc == 0 @@ -1867,8 +1910,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ Sub_d(scratch, scratch, Operand(1)); __ Movz(arg_array, undefined_value, scratch); // if argc == 1 __ Ld_d(receiver, MemOperand(sp, 0)); - __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7); - __ St_d(this_arg, MemOperand(sp, 0)); + __ DropArgumentsAndPushNewReceiver( + argc, this_arg, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1895,7 +1940,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // arguments to the receiver. __ bind(&no_arguments); { - __ mov(a0, zero_reg); + __ li(a0, JSParameterCount(0)); DCHECK(receiver == a1); __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); } @@ -1910,7 +1955,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // a0: actual number of arguments { Label done; - __ Branch(&done, ne, a0, Operand(zero_reg)); + __ Branch(&done, ne, a0, Operand(JSParameterCount(0))); __ PushRoot(RootIndex::kUndefinedValue); __ Add_d(a0, a0, Operand(1)); __ bind(&done); @@ -1948,7 +1993,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { // Claim (3 - argc) dummy arguments form the stack, to put the stack in a // consistent state for a simple pop operation. - __ mov(scratch, argc); + __ Sub_d(scratch, argc, Operand(JSParameterCount(0))); __ Ld_d(target, MemOperand(sp, kPointerSize)); __ Ld_d(this_argument, MemOperand(sp, 2 * kPointerSize)); __ Ld_d(arguments_list, MemOperand(sp, 3 * kPointerSize)); @@ -1961,8 +2006,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ Sub_d(scratch, scratch, Operand(1)); __ Movz(arguments_list, undefined_value, scratch); // if argc == 2 - __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7); - __ St_d(this_argument, MemOperand(sp, 0)); // Overwrite receiver + __ DropArgumentsAndPushNewReceiver( + argc, this_argument, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -2007,7 +2054,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { // Claim (3 - argc) dummy arguments form the stack, to put the stack in a // consistent state for a simple pop operation. - __ mov(scratch, argc); + __ Sub_d(scratch, argc, Operand(JSParameterCount(0))); __ Ld_d(target, MemOperand(sp, kPointerSize)); __ Ld_d(arguments_list, MemOperand(sp, 2 * kPointerSize)); __ Ld_d(new_target, MemOperand(sp, 3 * kPointerSize)); @@ -2020,8 +2067,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ Sub_d(scratch, scratch, Operand(1)); __ Movz(new_target, target, scratch); // if argc == 2 - __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7); - __ St_d(undefined_value, MemOperand(sp, 0)); // Overwrite receiver + __ DropArgumentsAndPushNewReceiver( + argc, undefined_value, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -2044,12 +2093,59 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { RelocInfo::CODE_TARGET); } +namespace { + +// Allocate new stack space for |count| arguments and shift all existing +// arguments already on the stack. |pointer_to_new_space_out| points to the +// first free slot on the stack to copy additional arguments to and +// |argc_in_out| is updated to include |count|. +void Generate_AllocateSpaceAndShiftExistingArguments( + MacroAssembler* masm, Register count, Register argc_in_out, + Register pointer_to_new_space_out, Register scratch1, Register scratch2, + Register scratch3) { + DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1, + scratch2)); + Register old_sp = scratch1; + Register new_space = scratch2; + __ mov(old_sp, sp); + __ slli_d(new_space, count, kPointerSizeLog2); + __ Sub_d(sp, sp, Operand(new_space)); + + Register end = scratch2; + Register value = scratch3; + Register dest = pointer_to_new_space_out; + __ mov(dest, sp); + __ Alsl_d(end, argc_in_out, old_sp, kSystemPointerSizeLog2); + Label loop, done; + if (kJSArgcIncludesReceiver) { + __ Branch(&done, ge, old_sp, Operand(end)); + } else { + __ Branch(&done, gt, old_sp, Operand(end)); + } + __ bind(&loop); + __ Ld_d(value, MemOperand(old_sp, 0)); + __ St_d(value, MemOperand(dest, 0)); + __ Add_d(old_sp, old_sp, Operand(kSystemPointerSize)); + __ Add_d(dest, dest, Operand(kSystemPointerSize)); + if (kJSArgcIncludesReceiver) { + __ Branch(&loop, lt, old_sp, Operand(end)); + } else { + __ Branch(&loop, le, old_sp, Operand(end)); + } + __ bind(&done); + + // Update total number of arguments. + __ Add_d(argc_in_out, argc_in_out, count); +} + +} // namespace + // static void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- // -- a1 : target - // -- a0 : number of parameters on the stack (not including the receiver) + // -- a0 : number of parameters on the stack // -- a2 : arguments list (a FixedArray) // -- a4 : len (number of elements to push from args) // -- a3 : new.target (for [[Construct]]) @@ -2078,24 +2174,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy; - Register src = a6, dest = a7; - __ mov(src, sp); - __ slli_d(t0, a4, kSystemPointerSizeLog2); - __ Sub_d(sp, sp, Operand(t0)); - // Update stack pointer. - __ mov(dest, sp); - __ Add_d(t0, a0, Operand(zero_reg)); - - __ bind(©); - __ Ld_d(t1, MemOperand(src, 0)); - __ St_d(t1, MemOperand(dest, 0)); - __ Sub_d(t0, t0, Operand(1)); - __ Add_d(src, src, Operand(kSystemPointerSize)); - __ Add_d(dest, dest, Operand(kSystemPointerSize)); - __ Branch(©, ge, t0, Operand(zero_reg)); - } + // a4: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // a7: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, a4, a0, a7, a6, t0, t1); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -2104,7 +2186,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Register scratch = len; __ addi_d(src, args, FixedArray::kHeaderSize - kHeapObjectTag); - __ Add_d(a0, a0, len); // The 'len' argument for Call() or Construct(). __ Branch(&done, eq, len, Operand(zero_reg)); __ slli_d(scratch, len, kPointerSizeLog2); __ Sub_d(scratch, sp, Operand(scratch)); @@ -2134,7 +2215,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, CallOrConstructMode mode, Handle code) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a3 : the new.target (for [[Construct]] calls) // -- a1 : the target to call (can be any Object) // -- a2 : start index (to support rest parameters) @@ -2160,7 +2241,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, Label stack_done, stack_overflow; __ Ld_d(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset)); - __ Sub_w(a7, a7, a2); + if (kJSArgcIncludesReceiver) { + __ Sub_d(a7, a7, Operand(kJSArgcReceiverSlots)); + } + __ Sub_d(a7, a7, a2); __ Branch(&stack_done, le, a7, Operand(zero_reg)); { // Check for stack overflow. @@ -2176,31 +2260,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy; - Register src = t0, dest = a2; - __ mov(src, sp); - // Update stack pointer. - __ slli_d(t1, a7, kSystemPointerSizeLog2); - __ Sub_d(sp, sp, Operand(t1)); - __ mov(dest, sp); - __ Add_d(t2, a0, Operand(zero_reg)); - - __ bind(©); - __ Ld_d(t1, MemOperand(src, 0)); - __ St_d(t1, MemOperand(dest, 0)); - __ Sub_d(t2, t2, Operand(1)); - __ Add_d(src, src, Operand(kSystemPointerSize)); - __ Add_d(dest, dest, Operand(kSystemPointerSize)); - __ Branch(©, ge, t2, Operand(zero_reg)); - } + // a7: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // a2: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, a7, a0, a2, t0, t1, + t2); // Copy arguments from the caller frame. // TODO(victorgomes): Consider using forward order as potentially more cache // friendly. { Label loop; - __ Add_d(a0, a0, a7); __ bind(&loop); { __ Sub_w(a7, a7, Operand(1)); @@ -2225,13 +2295,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, void Builtins::Generate_CallFunction(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // ----------------------------------- __ AssertFunction(a1); - // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) - // Check that function is not a "classConstructor". Label class_constructor; __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); @@ -2252,7 +2320,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // -- a2 : the shared function info. // -- cp : the function context. @@ -2304,7 +2372,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ bind(&done_convert); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // -- a2 : the shared function info. // -- cp : the function context. @@ -2326,7 +2394,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // static void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // ----------------------------------- __ AssertBoundFunction(a1); @@ -2342,7 +2410,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // -- a2 : the [[BoundArguments]] (implemented as FixedArray) // -- a4 : the number of [[BoundArguments]] @@ -2397,35 +2465,53 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // static void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_smi; - __ JumpIfSmi(a1, &non_callable); - __ bind(&non_smi); - __ LoadMap(t1, a1); - __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8); + Register argc = a0; + Register target = a1; + Register map = t1; + Register instance_type = t2; + Register scratch = t8; + DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); + + Label non_callable, class_constructor; + __ JumpIfSmi(target, &non_callable); + __ LoadMap(map, target); + __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE, + scratch); __ Jump(masm->isolate()->builtins()->CallFunction(mode), - RelocInfo::CODE_TARGET, ls, t8, - Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, ls, scratch, + Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - + FIRST_CALLABLE_JS_FUNCTION_TYPE)); __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), - RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_BOUND_FUNCTION_TYPE)); // Check if target has a [[Call]] internal method. - __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask)); - __ Branch(&non_callable, eq, t1, Operand(zero_reg)); + { + Register flags = t1; + __ Ld_bu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); + map = no_reg; + __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask)); + __ Branch(&non_callable, eq, flags, Operand(zero_reg)); + } __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, - t2, Operand(JS_PROXY_TYPE)); + instance_type, Operand(JS_PROXY_TYPE)); + + // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) + // Check that the function is not a "classConstructor". + __ Branch(&class_constructor, eq, instance_type, + Operand(JS_CLASS_CONSTRUCTOR_TYPE)); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). // Overwrite the original receiver with the (original) target. - __ StoreReceiver(a1, a0, kScratchReg); + __ StoreReceiver(target, argc, kScratchReg); // Let the "call_as_function_delegate" take care of the rest. - __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); + __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); __ Jump(masm->isolate()->builtins()->CallFunction( ConvertReceiverMode::kNotNullOrUndefined), RelocInfo::CODE_TARGET); @@ -2434,14 +2520,22 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ bind(&non_callable); { FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(a1); + __ Push(target); __ CallRuntime(Runtime::kThrowCalledNonCallable); } + + // 4. The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ Push(target); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + } } void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the constructor to call (checked to be a JSFunction) // -- a3 : the new target (checked to be a constructor) // ----------------------------------- @@ -2471,7 +2565,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // static void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // -- a3 : the new target (checked to be a constructor) // ----------------------------------- @@ -2483,7 +2577,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // -- a2 : the [[BoundArguments]] (implemented as FixedArray) // -- a3 : the new target (checked to be a constructor) @@ -2547,35 +2641,46 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // static void Builtins::Generate_Construct(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the constructor to call (can be any Object) // -- a3 : the new target (either the same as the constructor or // the JSFunction on which new was invoked initially) // ----------------------------------- + Register argc = a0; + Register target = a1; + Register map = t1; + Register instance_type = t2; + Register scratch = t8; + DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); + // Check if target is a Smi. Label non_constructor, non_proxy; - __ JumpIfSmi(a1, &non_constructor); + __ JumpIfSmi(target, &non_constructor); // Check if target has a [[Construct]] internal method. - __ Ld_d(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); - __ Ld_bu(t3, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask)); - __ Branch(&non_constructor, eq, t3, Operand(zero_reg)); + __ Ld_d(map, FieldMemOperand(target, HeapObject::kMapOffset)); + { + Register flags = t3; + __ Ld_bu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask)); + __ Branch(&non_constructor, eq, flags, Operand(zero_reg)); + } // Dispatch based on instance type. - __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8); + __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch); __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), - RelocInfo::CODE_TARGET, ls, t8, + RelocInfo::CODE_TARGET, ls, scratch, Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); // Only dispatch to bound functions after checking whether they are // constructors. __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), - RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_BOUND_FUNCTION_TYPE)); // Only dispatch to proxies after checking whether they are constructors. - __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE)); + __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE)); __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), RelocInfo::CODE_TARGET); @@ -2583,9 +2688,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ bind(&non_proxy); { // Overwrite the original receiver with the (original) target. - __ StoreReceiver(a1, a0, kScratchReg); + __ StoreReceiver(target, argc, kScratchReg); // Let the "call_as_constructor_delegate" take care of the rest. - __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); + __ LoadNativeContextSlot(target, + Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET); } @@ -2678,6 +2784,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); @@ -3072,7 +3183,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- cp : context // -- a1 : api function address - // -- a2 : arguments count (not including the receiver) + // -- a2 : arguments count // -- a3 : call data // -- a0 : holder // -- sp[0] : receiver diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index 9a97f0fa4e6a32..74493abad3228b 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -72,6 +72,34 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, namespace { +enum class ArgumentsElementType { + kRaw, // Push arguments as they are. + kHandle // Dereference arguments before pushing. +}; + +void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, + Register scratch, Register scratch2, + ArgumentsElementType element_type) { + DCHECK(!AreAliased(array, argc, scratch)); + Label loop, entry; + if (kJSArgcIncludesReceiver) { + __ Subu(scratch, argc, Operand(kJSArgcReceiverSlots)); + } else { + __ mov(scratch, argc); + } + __ Branch(&entry); + __ bind(&loop); + __ Lsa(scratch2, array, scratch, kSystemPointerSizeLog2); + __ lw(scratch2, MemOperand(scratch2)); + if (element_type == ArgumentsElementType::kHandle) { + __ lw(scratch2, MemOperand(scratch2)); + } + __ push(scratch2); + __ bind(&entry); + __ Addu(scratch, scratch, Operand(-1)); + __ Branch(&loop, greater_equal, scratch, Operand(zero_reg)); +} + void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : number of arguments @@ -90,12 +118,14 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ SmiTag(a0); __ Push(cp, a0); __ SmiUntag(a0); - // Set up pointer to last argument (skip receiver). + // Set up pointer to first argument (skip receiver). __ Addu( t2, fp, Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); // Copy arguments and receiver to the expression stack. - __ PushArray(t2, a0, t3, t0); + // t2: Pointer to start of arguments. + // a0: Number of arguments. + Generate_PushArguments(masm, t2, a0, t3, t0, ArgumentsElementType::kRaw); // The receiver for the builtin/api call. __ PushRoot(RootIndex::kTheHoleValue); @@ -113,8 +143,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ Lsa(sp, sp, t3, kPointerSizeLog2 - 1); - __ Addu(sp, sp, kPointerSize); + __ DropArguments(t3, TurboAssembler::kCountIsSmi, + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); __ Ret(); } @@ -219,7 +251,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // InvokeFunction. // Copy arguments and receiver to the expression stack. - __ PushArray(t2, a0, t0, t1); + // t2: Pointer to start of argument. + // a0: Number of arguments. + Generate_PushArguments(masm, t2, a0, t0, t1, ArgumentsElementType::kRaw); // We need two copies because we may have to return the original one // and the calling conventions dictate that the called function pops the @@ -266,8 +300,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize); - __ Addu(sp, sp, kPointerSize); + __ DropArguments(a1, TurboAssembler::kCountIsSmi, + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); __ Ret(); __ bind(&check_receiver); @@ -465,6 +501,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, Handle trampoline_code = masm->isolate()->builtins()->code_handle(entry_trampoline); DCHECK_EQ(kPushedStackSpace, pushed_stack_space); + USE(pushed_stack_space); __ Call(trampoline_code, RelocInfo::CODE_TARGET); // Unlink this frame from the handler chain. @@ -546,24 +583,17 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Check if we have enough stack space to push all arguments. // Clobbers a2 and t0. - __ addiu(t1, a0, 1); + if (kJSArgcIncludesReceiver) { + __ mov(t1, a0); + } else { + __ addiu(t1, a0, 1); + } Generate_CheckStackOverflow(masm, t1, t0, t2); - // Copy arguments to the stack in a loop. + // Copy arguments to the stack. // a0: argc // s0: argv, i.e. points to first arg - Label loop, entry; - __ Lsa(t2, s0, a0, kPointerSizeLog2); - __ b(&entry); - __ nop(); // Branch delay slot nop. - // t2 points past last arg. - __ bind(&loop); - __ addiu(t2, t2, -kPointerSize); - __ lw(t0, MemOperand(t2)); // Read next parameter. - __ lw(t0, MemOperand(t0)); // Dereference handle. - __ push(t0); // Push parameter. - __ bind(&entry); - __ Branch(&loop, ne, s0, Operand(t2)); + Generate_PushArguments(masm, s0, a0, t2, t0, ArgumentsElementType::kHandle); // Push the receiver. __ Push(a3); @@ -702,6 +732,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); __ lhu(a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); + if (kJSArgcIncludesReceiver) { + __ Subu(a3, a3, Operand(kJSArgcReceiverSlots)); + } __ lw(t1, FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset)); { @@ -807,7 +840,10 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ Lw(actual_params_size, MemOperand(fp, StandardFrameConstants::kArgCOffset)); __ sll(actual_params_size, actual_params_size, kPointerSizeLog2); - __ Addu(actual_params_size, actual_params_size, Operand(kSystemPointerSize)); + if (!kJSArgcIncludesReceiver) { + __ Addu(actual_params_size, actual_params_size, + Operand(kSystemPointerSize)); + } // If actual is bigger than formal, then we should use it to free up the stack // arguments. @@ -818,7 +854,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ Addu(sp, sp, params_size); + __ DropArguments(params_size, TurboAssembler::kCountIsBytes, + TurboAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_marker| == |expected_marker| @@ -1185,7 +1222,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // stack left to right. // // The live registers are: -// o a0 : actual argument count (not including the receiver) +// o a0 : actual argument count // o a1: the JS function object being called. // o a3: the incoming new target or generator object // o cp: our context @@ -1447,7 +1484,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( InterpreterPushArgsMode mode) { DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a2 : the address of the first argument to be pushed. Subsequent // arguments should be consecutive above this, in the same order as // they are to be pushed onto the stack. @@ -1459,15 +1496,18 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( __ Subu(a0, a0, Operand(1)); } - __ Addu(t0, a0, Operand(1)); // Add one for receiver. - - __ StackOverflowCheck(t0, t4, t1, &stack_overflow); - - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - // Don't copy receiver. + const bool skip_receiver = + receiver_mode == ConvertReceiverMode::kNullOrUndefined; + if (kJSArgcIncludesReceiver && skip_receiver) { + __ Subu(t0, a0, Operand(kJSArgcReceiverSlots)); + } else if (!kJSArgcIncludesReceiver && !skip_receiver) { + __ Addu(t0, a0, Operand(1)); + } else { __ mov(t0, a0); } + __ StackOverflowCheck(t0, t4, t1, &stack_overflow); + // This function modifies a2, t4 and t1. GenerateInterpreterPushArgs(masm, t0, a2, t4, t1); @@ -1503,22 +1543,27 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( void Builtins::Generate_InterpreterPushArgsThenConstructImpl( MacroAssembler* masm, InterpreterPushArgsMode mode) { // ----------- S t a t e ------------- - // -- a0 : argument count (not including receiver) + // -- a0 : argument count // -- a3 : new target // -- a1 : constructor to call // -- a2 : allocation site feedback if available, undefined otherwise. // -- t4 : address of the first argument // ----------------------------------- Label stack_overflow; - __ addiu(t2, a0, 1); - __ StackOverflowCheck(t2, t1, t0, &stack_overflow); + __ StackOverflowCheck(a0, t1, t0, &stack_overflow); if (mode == InterpreterPushArgsMode::kWithFinalSpread) { // The spread argument should not be pushed. __ Subu(a0, a0, Operand(1)); } - GenerateInterpreterPushArgs(masm, a0, t4, t1, t0); + Register argc_without_receiver = a0; + if (kJSArgcIncludesReceiver) { + argc_without_receiver = t2; + __ Subu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots)); + } + + GenerateInterpreterPushArgs(masm, argc_without_receiver, t4, t1, t0); // Push a slot for the receiver. __ push(zero_reg); @@ -1718,13 +1763,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. t0 contains the arguments count, the return value // from LAZY is always the last argument. - __ Addu(a0, a0, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + constexpr int return_value_offset = + BuiltinContinuationFrameConstants::kFixedSlotCount - + kJSArgcReceiverSlots; + __ Addu(a0, a0, Operand(return_value_offset)); __ Lsa(t0, sp, a0, kSystemPointerSizeLog2); __ Sw(scratch, MemOperand(t0)); // Recover arguments count. - __ Subu(a0, a0, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + __ Subu(a0, a0, Operand(return_value_offset)); } __ lw(fp, MemOperand( @@ -1841,13 +1887,15 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ mov(a3, a2); // Lsa() cannot be used hare as scratch value used later. __ lw(a1, MemOperand(sp)); // receiver - __ Branch(&no_arg, eq, a0, Operand(zero_reg)); + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0))); __ lw(a3, MemOperand(sp, kSystemPointerSize)); // thisArg - __ Branch(&no_arg, eq, a0, Operand(1)); + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1))); __ lw(a2, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ bind(&no_arg); - __ Lsa(sp, sp, a0, kPointerSizeLog2); - __ sw(a3, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver( + a0, a3, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1873,7 +1921,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // arguments to the receiver. __ bind(&no_arguments); { - __ mov(a0, zero_reg); + __ li(a0, JSParameterCount(0)); __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); } } @@ -1887,7 +1935,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // a0: actual number of arguments { Label done; - __ Branch(&done, ne, a0, Operand(zero_reg)); + __ Branch(&done, ne, a0, Operand(JSParameterCount(0))); __ PushRoot(RootIndex::kUndefinedValue); __ Addu(a0, a0, Operand(1)); __ bind(&done); @@ -1917,15 +1965,17 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ LoadRoot(a1, RootIndex::kUndefinedValue); __ mov(a2, a1); __ mov(a3, a1); - __ Branch(&no_arg, eq, a0, Operand(zero_reg)); + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0))); __ lw(a1, MemOperand(sp, kSystemPointerSize)); // target - __ Branch(&no_arg, eq, a0, Operand(1)); + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1))); __ lw(a3, MemOperand(sp, 2 * kSystemPointerSize)); // thisArgument - __ Branch(&no_arg, eq, a0, Operand(2)); + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(2))); __ lw(a2, MemOperand(sp, 3 * kSystemPointerSize)); // argumentsList __ bind(&no_arg); - __ Lsa(sp, sp, a0, kPointerSizeLog2); - __ sw(a3, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver( + a0, a3, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1961,16 +2011,18 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ LoadRoot(a1, RootIndex::kUndefinedValue); __ mov(a2, a1); __ mov(t0, a1); - __ Branch(&no_arg, eq, a0, Operand(zero_reg)); + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0))); __ lw(a1, MemOperand(sp, kSystemPointerSize)); // target __ mov(a3, a1); // new.target defaults to target - __ Branch(&no_arg, eq, a0, Operand(1)); + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1))); __ lw(a2, MemOperand(sp, 2 * kSystemPointerSize)); // argumentsList - __ Branch(&no_arg, eq, a0, Operand(2)); + __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(2))); __ lw(a3, MemOperand(sp, 3 * kSystemPointerSize)); // new.target __ bind(&no_arg); - __ Lsa(sp, sp, a0, kPointerSizeLog2); - __ sw(t0, MemOperand(sp)); // set undefined to the receiver + __ DropArgumentsAndPushNewReceiver( + a0, t0, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1993,12 +2045,59 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { RelocInfo::CODE_TARGET); } +namespace { + +// Allocate new stack space for |count| arguments and shift all existing +// arguments already on the stack. |pointer_to_new_space_out| points to the +// first free slot on the stack to copy additional arguments to and +// |argc_in_out| is updated to include |count|. +void Generate_AllocateSpaceAndShiftExistingArguments( + MacroAssembler* masm, Register count, Register argc_in_out, + Register pointer_to_new_space_out, Register scratch1, Register scratch2, + Register scratch3) { + DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1, + scratch2)); + Register old_sp = scratch1; + Register new_space = scratch2; + __ mov(old_sp, sp); + __ sll(new_space, count, kPointerSizeLog2); + __ Subu(sp, sp, Operand(new_space)); + + Register end = scratch2; + Register value = scratch3; + Register dest = pointer_to_new_space_out; + __ mov(dest, sp); + __ Lsa(end, old_sp, argc_in_out, kSystemPointerSizeLog2); + Label loop, done; + if (kJSArgcIncludesReceiver) { + __ Branch(&done, ge, old_sp, Operand(end)); + } else { + __ Branch(&done, gt, old_sp, Operand(end)); + } + __ bind(&loop); + __ lw(value, MemOperand(old_sp, 0)); + __ sw(value, MemOperand(dest, 0)); + __ Addu(old_sp, old_sp, Operand(kSystemPointerSize)); + __ Addu(dest, dest, Operand(kSystemPointerSize)); + if (kJSArgcIncludesReceiver) { + __ Branch(&loop, lt, old_sp, Operand(end)); + } else { + __ Branch(&loop, le, old_sp, Operand(end)); + } + __ bind(&done); + + // Update total number of arguments. + __ Addu(argc_in_out, argc_in_out, count); +} + +} // namespace + // static void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- // -- a1 : target - // -- a0 : number of parameters on the stack (not including the receiver) + // -- a0 : number of parameters on the stack // -- a2 : arguments list (a FixedArray) // -- t0 : len (number of elements to push from args) // -- a3 : new.target (for [[Construct]]) @@ -2024,24 +2123,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy; - Register src = t3, dest = t4; - __ mov(src, sp); - __ sll(t1, t0, kSystemPointerSizeLog2); - __ Subu(sp, sp, Operand(t1)); - // Update stack pointer. - __ mov(dest, sp); - __ Addu(t1, a0, Operand(zero_reg)); - - __ bind(©); - __ Lw(t2, MemOperand(src, 0)); - __ Sw(t2, MemOperand(dest, 0)); - __ Subu(t1, t1, Operand(1)); - __ Addu(src, src, Operand(kSystemPointerSize)); - __ Addu(dest, dest, Operand(kSystemPointerSize)); - __ Branch(©, ge, t1, Operand(zero_reg)); - } + // t0: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // t4: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, t0, a0, t4, t3, t1, t2); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -2060,7 +2145,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ Addu(t4, t4, Operand(kSystemPointerSize)); __ Branch(&loop); __ bind(&done); - __ Addu(a0, a0, t2); } // Tail-call to the actual Call or Construct builtin. @@ -2075,7 +2159,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, CallOrConstructMode mode, Handle code) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a3 : the new.target (for [[Construct]] calls) // -- a1 : the target to call (can be any Object) // -- a2 : start index (to support rest parameters) @@ -2101,6 +2185,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, Label stack_done, stack_overflow; __ Lw(t2, MemOperand(fp, StandardFrameConstants::kArgCOffset)); + if (kJSArgcIncludesReceiver) { + __ Subu(t2, t2, Operand(kJSArgcReceiverSlots)); + } __ Subu(t2, t2, a2); __ Branch(&stack_done, le, t2, Operand(zero_reg)); { @@ -2116,31 +2203,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy; - Register src = t5, dest = a2; - __ mov(src, sp); - // Update stack pointer. - __ sll(t6, t2, kSystemPointerSizeLog2); - __ Subu(sp, sp, Operand(t6)); - __ mov(dest, sp); - __ Addu(t7, a0, Operand(zero_reg)); - - __ bind(©); - __ Lw(t6, MemOperand(src, 0)); - __ Sw(t6, MemOperand(dest, 0)); - __ Subu(t7, t7, Operand(1)); - __ Addu(src, src, Operand(kSystemPointerSize)); - __ Addu(dest, dest, Operand(kSystemPointerSize)); - __ Branch(©, ge, t7, Operand(zero_reg)); - } + // t2: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // a2: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, t2, a0, a2, t5, t6, + t7); // Copy arguments from the caller frame. // TODO(victorgomes): Consider using forward order as potentially more cache // friendly. { Label loop; - __ Addu(a0, a0, t2); __ bind(&loop); { __ Subu(t2, t2, Operand(1)); @@ -2165,13 +2238,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, void Builtins::Generate_CallFunction(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // ----------------------------------- __ AssertFunction(a1); - // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) - // Check that the function is not a "classConstructor". Label class_constructor; __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); @@ -2192,7 +2263,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // -- a2 : the shared function info. // -- cp : the function context. @@ -2244,7 +2315,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ bind(&done_convert); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // -- a2 : the shared function info. // -- cp : the function context. @@ -2266,7 +2337,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // static void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // ----------------------------------- __ AssertBoundFunction(a1); @@ -2283,7 +2354,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ SmiUntag(t0); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // -- a2 : the [[BoundArguments]] (implemented as FixedArray) // -- t0 : the number of [[BoundArguments]] @@ -2337,36 +2408,54 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // static void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_smi; - __ JumpIfSmi(a1, &non_callable); - __ bind(&non_smi); - __ LoadMap(t1, a1); - __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8); + Register argc = a0; + Register target = a1; + Register map = t1; + Register instance_type = t2; + Register scratch = t8; + DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); + + Label non_callable, class_constructor; + __ JumpIfSmi(target, &non_callable); + __ LoadMap(map, target); + __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE, + scratch); __ Jump(masm->isolate()->builtins()->CallFunction(mode), - RelocInfo::CODE_TARGET, ls, t8, - Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, ls, scratch, + Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - + FIRST_CALLABLE_JS_FUNCTION_TYPE)); __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), - RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_BOUND_FUNCTION_TYPE)); // Check if target has a [[Call]] internal method. - __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask)); - __ Branch(&non_callable, eq, t1, Operand(zero_reg)); + { + Register flags = t1; + __ lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); + map = no_reg; + __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask)); + __ Branch(&non_callable, eq, flags, Operand(zero_reg)); + } // Check if target is a proxy and call CallProxy external builtin - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), - RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE)); + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, + instance_type, Operand(JS_PROXY_TYPE)); + + // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) + // Check that the function is not a "classConstructor". + __ Branch(&class_constructor, eq, instance_type, + Operand(JS_CLASS_CONSTRUCTOR_TYPE)); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). // Overwrite the original receiver with the (original) target. - __ StoreReceiver(a1, a0, kScratchReg); + __ StoreReceiver(target, argc, kScratchReg); // Let the "call_as_function_delegate" take care of the rest. - __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); + __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); __ Jump(masm->isolate()->builtins()->CallFunction( ConvertReceiverMode::kNotNullOrUndefined), RelocInfo::CODE_TARGET); @@ -2375,15 +2464,23 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ bind(&non_callable); { FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(a1); + __ Push(target); __ CallRuntime(Runtime::kThrowCalledNonCallable); } + + // 4. The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ Push(target); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + } } // static void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the constructor to call (checked to be a JSFunction) // -- a3 : the new target (checked to be a constructor) // ----------------------------------- @@ -2413,7 +2510,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // static void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // -- a3 : the new target (checked to be a constructor) // ----------------------------------- @@ -2426,7 +2523,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { __ SmiUntag(t0); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // -- a2 : the [[BoundArguments]] (implemented as FixedArray) // -- a3 : the new target (checked to be a constructor) @@ -2488,35 +2585,46 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // static void Builtins::Generate_Construct(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the constructor to call (can be any Object) // -- a3 : the new target (either the same as the constructor or // the JSFunction on which new was invoked initially) // ----------------------------------- + Register argc = a0; + Register target = a1; + Register map = t1; + Register instance_type = t2; + Register scratch = t8; + DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); + // Check if target is a Smi. Label non_constructor, non_proxy; - __ JumpIfSmi(a1, &non_constructor); + __ JumpIfSmi(target, &non_constructor); // Check if target has a [[Construct]] internal method. - __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); - __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask)); - __ Branch(&non_constructor, eq, t3, Operand(zero_reg)); + __ lw(map, FieldMemOperand(target, HeapObject::kMapOffset)); + { + Register flags = t3; + __ lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask)); + __ Branch(&non_constructor, eq, flags, Operand(zero_reg)); + } // Dispatch based on instance type. - __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8); + __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch); __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), - RelocInfo::CODE_TARGET, ls, t8, + RelocInfo::CODE_TARGET, ls, scratch, Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); // Only dispatch to bound functions after checking whether they are // constructors. __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), - RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_BOUND_FUNCTION_TYPE)); // Only dispatch to proxies after checking whether they are constructors. - __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE)); + __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE)); __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), RelocInfo::CODE_TARGET); @@ -2524,9 +2632,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ bind(&non_proxy); { // Overwrite the original receiver with the (original) target. - __ StoreReceiver(a1, a0, kScratchReg); + __ StoreReceiver(target, argc, kScratchReg); // Let the "call_as_constructor_delegate" take care of the rest. - __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); + __ LoadNativeContextSlot(target, + Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET); } @@ -2612,6 +2721,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); @@ -3006,7 +3120,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- cp : context // -- a1 : api function address - // -- a2 : arguments count (not including the receiver) + // -- a2 : arguments count // -- a3 : call data // -- a0 : holder // -- sp[0] : receiver diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 3f8824d97d3f71..a357877acf7060 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -71,6 +71,34 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, namespace { +enum class ArgumentsElementType { + kRaw, // Push arguments as they are. + kHandle // Dereference arguments before pushing. +}; + +void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, + Register scratch, Register scratch2, + ArgumentsElementType element_type) { + DCHECK(!AreAliased(array, argc, scratch)); + Label loop, entry; + if (kJSArgcIncludesReceiver) { + __ Dsubu(scratch, argc, Operand(kJSArgcReceiverSlots)); + } else { + __ mov(scratch, argc); + } + __ Branch(&entry); + __ bind(&loop); + __ Dlsa(scratch2, array, scratch, kSystemPointerSizeLog2); + __ Ld(scratch2, MemOperand(scratch2)); + if (element_type == ArgumentsElementType::kHandle) { + __ Ld(scratch2, MemOperand(scratch2)); + } + __ push(scratch2); + __ bind(&entry); + __ Daddu(scratch, scratch, Operand(-1)); + __ Branch(&loop, greater_equal, scratch, Operand(zero_reg)); +} + void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : number of arguments @@ -90,12 +118,14 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ Push(cp, a0); __ SmiUntag(a0); - // Set up pointer to last argument (skip receiver). + // Set up pointer to first argument (skip receiver). __ Daddu( t2, fp, Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); // Copy arguments and receiver to the expression stack. - __ PushArray(t2, a0, t3, t0); + // t2: Pointer to start of arguments. + // a0: Number of arguments. + Generate_PushArguments(masm, t2, a0, t3, t0, ArgumentsElementType::kRaw); // The receiver for the builtin/api call. __ PushRoot(RootIndex::kTheHoleValue); @@ -113,9 +143,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ SmiScale(t3, t3, kPointerSizeLog2); - __ Daddu(sp, sp, t3); - __ Daddu(sp, sp, kPointerSize); + __ DropArguments(t3, TurboAssembler::kCountIsSmi, + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver, + t3); __ Ret(); } @@ -198,7 +230,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { kSystemPointerSize)); // ----------- S t a t e ------------- - // -- r3: new target + // -- a3: new target // -- sp[0*kPointerSize]: implicit receiver // -- sp[1*kPointerSize]: implicit receiver // -- sp[2*kPointerSize]: padding @@ -221,7 +253,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // InvokeFunction. // Copy arguments and receiver to the expression stack. - __ PushArray(t2, a0, t0, t1); + // t2: Pointer to start of argument. + // a0: Number of arguments. + Generate_PushArguments(masm, t2, a0, t0, t1, ArgumentsElementType::kRaw); // We need two copies because we may have to return the original one // and the calling conventions dictate that the called function pops the // receiver. The second copy is pushed after the arguments, @@ -267,9 +301,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ SmiScale(a4, a1, kPointerSizeLog2); - __ Daddu(sp, sp, a4); - __ Daddu(sp, sp, kPointerSize); + __ DropArguments(a1, TurboAssembler::kCountIsSmi, + kJSArgcIncludesReceiver + ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver, + a4); __ Ret(); __ bind(&check_receiver); @@ -390,6 +426,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); __ Lhu(a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); + if (kJSArgcIncludesReceiver) { + __ Dsubu(a3, a3, Operand(kJSArgcReceiverSlots)); + } __ Ld(t1, FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset)); { @@ -725,24 +764,17 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ Push(a2); // Check if we have enough stack space to push all arguments. - __ daddiu(a6, a4, 1); + if (kJSArgcIncludesReceiver) { + __ mov(a6, a4); + } else { + __ daddiu(a6, a4, 1); + } Generate_CheckStackOverflow(masm, a6, a0, s2); - // Copy arguments to the stack in a loop. + // Copy arguments to the stack. // a4: argc // a5: argv, i.e. points to first arg - Label loop, entry; - __ Dlsa(s1, a5, a4, kPointerSizeLog2); - __ b(&entry); - __ nop(); // Branch delay slot nop. - // s1 points past last arg. - __ bind(&loop); - __ daddiu(s1, s1, -kPointerSize); - __ Ld(s2, MemOperand(s1)); // Read next parameter. - __ Ld(s2, MemOperand(s2)); // Dereference handle. - __ push(s2); // Push parameter. - __ bind(&entry); - __ Branch(&loop, ne, a5, Operand(s1)); + Generate_PushArguments(masm, a5, a4, s1, s2, ArgumentsElementType::kHandle); // Push the receive. __ Push(a3); @@ -820,7 +852,10 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ Ld(actual_params_size, MemOperand(fp, StandardFrameConstants::kArgCOffset)); __ dsll(actual_params_size, actual_params_size, kPointerSizeLog2); - __ Daddu(actual_params_size, actual_params_size, Operand(kSystemPointerSize)); + if (!kJSArgcIncludesReceiver) { + __ Daddu(actual_params_size, actual_params_size, + Operand(kSystemPointerSize)); + } // If actual is bigger than formal, then we should use it to free up the stack // arguments. @@ -831,7 +866,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ Daddu(sp, sp, params_size); + __ DropArguments(params_size, TurboAssembler::kCountIsBytes, + TurboAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_marker| == |expected_marker| @@ -1196,7 +1232,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // stack left to right. // // The live registers are: -// o a0 : actual argument count (not including the receiver) +// o a0 : actual argument count // o a1: the JS function object being called. // o a3: the incoming new target or generator object // o cp: our context @@ -1457,7 +1493,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( InterpreterPushArgsMode mode) { DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a2 : the address of the first argument to be pushed. Subsequent // arguments should be consecutive above this, in the same order as // they are to be pushed onto the stack. @@ -1469,15 +1505,18 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( __ Dsubu(a0, a0, Operand(1)); } - __ Daddu(a3, a0, Operand(1)); // Add one for receiver. - - __ StackOverflowCheck(a3, a4, t0, &stack_overflow); - - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - // Don't copy receiver. + const bool skip_receiver = + receiver_mode == ConvertReceiverMode::kNullOrUndefined; + if (kJSArgcIncludesReceiver && skip_receiver) { + __ Dsubu(a3, a0, Operand(kJSArgcReceiverSlots)); + } else if (!kJSArgcIncludesReceiver && !skip_receiver) { + __ Daddu(a3, a0, Operand(1)); + } else { __ mov(a3, a0); } + __ StackOverflowCheck(a3, a4, t0, &stack_overflow); + // This function modifies a2, t0 and a4. GenerateInterpreterPushArgs(masm, a3, a2, a4, t0); @@ -1513,23 +1552,27 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( void Builtins::Generate_InterpreterPushArgsThenConstructImpl( MacroAssembler* masm, InterpreterPushArgsMode mode) { // ----------- S t a t e ------------- - // -- a0 : argument count (not including receiver) + // -- a0 : argument count // -- a3 : new target // -- a1 : constructor to call // -- a2 : allocation site feedback if available, undefined otherwise. // -- a4 : address of the first argument // ----------------------------------- Label stack_overflow; - __ daddiu(a6, a0, 1); - __ StackOverflowCheck(a6, a5, t0, &stack_overflow); + __ StackOverflowCheck(a0, a5, t0, &stack_overflow); if (mode == InterpreterPushArgsMode::kWithFinalSpread) { // The spread argument should not be pushed. __ Dsubu(a0, a0, Operand(1)); } + Register argc_without_receiver = a0; + if (kJSArgcIncludesReceiver) { + argc_without_receiver = a6; + __ Dsubu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots)); + } // Push the arguments, This function modifies t0, a4 and a5. - GenerateInterpreterPushArgs(masm, a0, a4, a5, t0); + GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5, t0); // Push a slot for the receiver. __ push(zero_reg); @@ -1727,13 +1770,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. t0 contains the arguments count, the return value // from LAZY is always the last argument. - __ Daddu(a0, a0, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + constexpr int return_value_offset = + BuiltinContinuationFrameConstants::kFixedSlotCount - + kJSArgcReceiverSlots; + __ Daddu(a0, a0, Operand(return_value_offset)); __ Dlsa(t0, sp, a0, kSystemPointerSizeLog2); __ Sd(scratch, MemOperand(t0)); // Recover arguments count. - __ Dsubu(a0, a0, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + __ Dsubu(a0, a0, Operand(return_value_offset)); } __ Ld(fp, MemOperand( @@ -1852,10 +1896,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // arguments from the stack (including the receiver), and push thisArg (if // present) instead. { - // Claim (2 - argc) dummy arguments form the stack, to put the stack in a - // consistent state for a simple pop operation. - - __ mov(scratch, argc); + __ Dsubu(scratch, argc, JSParameterCount(0)); __ Ld(this_arg, MemOperand(sp, kPointerSize)); __ Ld(arg_array, MemOperand(sp, 2 * kPointerSize)); __ Movz(arg_array, undefined_value, scratch); // if argc == 0 @@ -1863,8 +1904,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ Dsubu(scratch, scratch, Operand(1)); __ Movz(arg_array, undefined_value, scratch); // if argc == 1 __ Ld(receiver, MemOperand(sp)); - __ Dlsa(sp, sp, argc, kSystemPointerSizeLog2); - __ Sd(this_arg, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver( + argc, this_arg, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1891,7 +1934,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // arguments to the receiver. __ bind(&no_arguments); { - __ mov(a0, zero_reg); + __ li(a0, JSParameterCount(0)); DCHECK(receiver == a1); __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); } @@ -1908,7 +1951,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // a0: actual number of arguments { Label done; - __ Branch(&done, ne, a0, Operand(zero_reg)); + __ Branch(&done, ne, a0, Operand(JSParameterCount(0))); __ PushRoot(RootIndex::kUndefinedValue); __ Daddu(a0, a0, Operand(1)); __ bind(&done); @@ -1946,7 +1989,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { // Claim (3 - argc) dummy arguments form the stack, to put the stack in a // consistent state for a simple pop operation. - __ mov(scratch, argc); + __ Dsubu(scratch, argc, Operand(JSParameterCount(0))); __ Ld(target, MemOperand(sp, kPointerSize)); __ Ld(this_argument, MemOperand(sp, 2 * kPointerSize)); __ Ld(arguments_list, MemOperand(sp, 3 * kPointerSize)); @@ -1959,8 +2002,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ Dsubu(scratch, scratch, Operand(1)); __ Movz(arguments_list, undefined_value, scratch); // if argc == 2 - __ Dlsa(sp, sp, argc, kSystemPointerSizeLog2); - __ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver + __ DropArgumentsAndPushNewReceiver( + argc, this_argument, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -2005,7 +2050,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { // Claim (3 - argc) dummy arguments form the stack, to put the stack in a // consistent state for a simple pop operation. - __ mov(scratch, argc); + __ Dsubu(scratch, argc, Operand(JSParameterCount(0))); __ Ld(target, MemOperand(sp, kPointerSize)); __ Ld(arguments_list, MemOperand(sp, 2 * kPointerSize)); __ Ld(new_target, MemOperand(sp, 3 * kPointerSize)); @@ -2018,8 +2063,10 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ Dsubu(scratch, scratch, Operand(1)); __ Movz(new_target, target, scratch); // if argc == 2 - __ Dlsa(sp, sp, argc, kSystemPointerSizeLog2); - __ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver + __ DropArgumentsAndPushNewReceiver( + argc, undefined_value, TurboAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver + : TurboAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -2042,12 +2089,59 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { RelocInfo::CODE_TARGET); } +namespace { + +// Allocate new stack space for |count| arguments and shift all existing +// arguments already on the stack. |pointer_to_new_space_out| points to the +// first free slot on the stack to copy additional arguments to and +// |argc_in_out| is updated to include |count|. +void Generate_AllocateSpaceAndShiftExistingArguments( + MacroAssembler* masm, Register count, Register argc_in_out, + Register pointer_to_new_space_out, Register scratch1, Register scratch2, + Register scratch3) { + DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1, + scratch2)); + Register old_sp = scratch1; + Register new_space = scratch2; + __ mov(old_sp, sp); + __ dsll(new_space, count, kPointerSizeLog2); + __ Dsubu(sp, sp, Operand(new_space)); + + Register end = scratch2; + Register value = scratch3; + Register dest = pointer_to_new_space_out; + __ mov(dest, sp); + __ Dlsa(end, old_sp, argc_in_out, kSystemPointerSizeLog2); + Label loop, done; + if (kJSArgcIncludesReceiver) { + __ Branch(&done, ge, old_sp, Operand(end)); + } else { + __ Branch(&done, gt, old_sp, Operand(end)); + } + __ bind(&loop); + __ Ld(value, MemOperand(old_sp, 0)); + __ Sd(value, MemOperand(dest, 0)); + __ Daddu(old_sp, old_sp, Operand(kSystemPointerSize)); + __ Daddu(dest, dest, Operand(kSystemPointerSize)); + if (kJSArgcIncludesReceiver) { + __ Branch(&loop, lt, old_sp, Operand(end)); + } else { + __ Branch(&loop, le, old_sp, Operand(end)); + } + __ bind(&done); + + // Update total number of arguments. + __ Daddu(argc_in_out, argc_in_out, count); +} + +} // namespace + // static void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- // -- a1 : target - // -- a0 : number of parameters on the stack (not including the receiver) + // -- a0 : number of parameters on the stack // -- a2 : arguments list (a FixedArray) // -- a4 : len (number of elements to push from args) // -- a3 : new.target (for [[Construct]]) @@ -2076,24 +2170,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy; - Register src = a6, dest = a7; - __ mov(src, sp); - __ dsll(t0, a4, kSystemPointerSizeLog2); - __ Dsubu(sp, sp, Operand(t0)); - // Update stack pointer. - __ mov(dest, sp); - __ Daddu(t0, a0, Operand(zero_reg)); - - __ bind(©); - __ Ld(t1, MemOperand(src, 0)); - __ Sd(t1, MemOperand(dest, 0)); - __ Dsubu(t0, t0, Operand(1)); - __ Daddu(src, src, Operand(kSystemPointerSize)); - __ Daddu(dest, dest, Operand(kSystemPointerSize)); - __ Branch(©, ge, t0, Operand(zero_reg)); - } + // a4: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // a7: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, a4, a0, a7, a6, t0, t1); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -2103,7 +2183,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ daddiu(src, args, FixedArray::kHeaderSize - kHeapObjectTag); __ Branch(&done, eq, len, Operand(zero_reg), i::USE_DELAY_SLOT); - __ Daddu(a0, a0, len); // The 'len' argument for Call() or Construct(). __ dsll(scratch, len, kPointerSizeLog2); __ Dsubu(scratch, sp, Operand(scratch)); __ LoadRoot(t1, RootIndex::kTheHoleValue); @@ -2132,7 +2211,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, CallOrConstructMode mode, Handle code) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a3 : the new.target (for [[Construct]] calls) // -- a1 : the target to call (can be any Object) // -- a2 : start index (to support rest parameters) @@ -2158,7 +2237,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, Label stack_done, stack_overflow; __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset)); - __ Subu(a7, a7, a2); + if (kJSArgcIncludesReceiver) { + __ Dsubu(a7, a7, Operand(kJSArgcReceiverSlots)); + } + __ Dsubu(a7, a7, a2); __ Branch(&stack_done, le, a7, Operand(zero_reg)); { // Check for stack overflow. @@ -2174,31 +2256,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy; - Register src = t0, dest = a2; - __ mov(src, sp); - // Update stack pointer. - __ dsll(t1, a7, kSystemPointerSizeLog2); - __ Dsubu(sp, sp, Operand(t1)); - __ mov(dest, sp); - __ Daddu(t2, a0, Operand(zero_reg)); - - __ bind(©); - __ Ld(t1, MemOperand(src, 0)); - __ Sd(t1, MemOperand(dest, 0)); - __ Dsubu(t2, t2, Operand(1)); - __ Daddu(src, src, Operand(kSystemPointerSize)); - __ Daddu(dest, dest, Operand(kSystemPointerSize)); - __ Branch(©, ge, t2, Operand(zero_reg)); - } + // a7: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // a2: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, a7, a0, a2, t0, t1, + t2); // Copy arguments from the caller frame. // TODO(victorgomes): Consider using forward order as potentially more cache // friendly. { Label loop; - __ Daddu(a0, a0, a7); __ bind(&loop); { __ Subu(a7, a7, Operand(1)); @@ -2223,13 +2291,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, void Builtins::Generate_CallFunction(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // ----------------------------------- __ AssertFunction(a1); - // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) - // Check that function is not a "classConstructor". Label class_constructor; __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); @@ -2250,7 +2316,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // -- a2 : the shared function info. // -- cp : the function context. @@ -2302,7 +2368,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ bind(&done_convert); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // -- a2 : the shared function info. // -- cp : the function context. @@ -2324,7 +2390,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // static void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // ----------------------------------- __ AssertBoundFunction(a1); @@ -2340,7 +2406,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // -- a2 : the [[BoundArguments]] (implemented as FixedArray) // -- a4 : the number of [[BoundArguments]] @@ -2395,35 +2461,52 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // static void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the target to call (can be any Object). // ----------------------------------- - - Label non_callable, non_smi; - __ JumpIfSmi(a1, &non_callable); - __ bind(&non_smi); - __ LoadMap(t1, a1); - __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8); + Register argc = a0; + Register target = a1; + Register map = t1; + Register instance_type = t2; + Register scratch = t8; + DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); + + Label non_callable, class_constructor; + __ JumpIfSmi(target, &non_callable); + __ LoadMap(map, target); + __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE, + scratch); __ Jump(masm->isolate()->builtins()->CallFunction(mode), - RelocInfo::CODE_TARGET, ls, t8, - Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, ls, scratch, + Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - + FIRST_CALLABLE_JS_FUNCTION_TYPE)); __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), - RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_BOUND_FUNCTION_TYPE)); // Check if target has a [[Call]] internal method. - __ Lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask)); - __ Branch(&non_callable, eq, t1, Operand(zero_reg)); + { + Register flags = t1; + __ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); + map = no_reg; + __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask)); + __ Branch(&non_callable, eq, flags, Operand(zero_reg)); + } + + __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, + instance_type, Operand(JS_PROXY_TYPE)); - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), - RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE)); + // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) + // Check that the function is not a "classConstructor". + __ Branch(&class_constructor, eq, instance_type, + Operand(JS_CLASS_CONSTRUCTOR_TYPE)); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). // Overwrite the original receiver with the (original) target. - __ StoreReceiver(a1, a0, kScratchReg); + __ StoreReceiver(target, argc, kScratchReg); // Let the "call_as_function_delegate" take care of the rest. - __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); + __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); __ Jump(masm->isolate()->builtins()->CallFunction( ConvertReceiverMode::kNotNullOrUndefined), RelocInfo::CODE_TARGET); @@ -2432,14 +2515,22 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ bind(&non_callable); { FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(a1); + __ Push(target); __ CallRuntime(Runtime::kThrowCalledNonCallable); } + + // 4. The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ Push(target); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + } } void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the constructor to call (checked to be a JSFunction) // -- a3 : the new target (checked to be a constructor) // ----------------------------------- @@ -2469,7 +2560,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // static void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // -- a3 : the new target (checked to be a constructor) // ----------------------------------- @@ -2481,7 +2572,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset)); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // -- a2 : the [[BoundArguments]] (implemented as FixedArray) // -- a3 : the new target (checked to be a constructor) @@ -2544,35 +2635,46 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // static void Builtins::Generate_Construct(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the constructor to call (can be any Object) // -- a3 : the new target (either the same as the constructor or // the JSFunction on which new was invoked initially) // ----------------------------------- + Register argc = a0; + Register target = a1; + Register map = t1; + Register instance_type = t2; + Register scratch = t8; + DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); + // Check if target is a Smi. Label non_constructor, non_proxy; - __ JumpIfSmi(a1, &non_constructor); + __ JumpIfSmi(target, &non_constructor); // Check if target has a [[Construct]] internal method. - __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset)); - __ Lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask)); - __ Branch(&non_constructor, eq, t3, Operand(zero_reg)); + __ ld(map, FieldMemOperand(target, HeapObject::kMapOffset)); + { + Register flags = t3; + __ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask)); + __ Branch(&non_constructor, eq, flags, Operand(zero_reg)); + } // Dispatch based on instance type. - __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8); + __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch); __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), - RelocInfo::CODE_TARGET, ls, t8, + RelocInfo::CODE_TARGET, ls, scratch, Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); // Only dispatch to bound functions after checking whether they are // constructors. __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), - RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_BOUND_FUNCTION_TYPE)); // Only dispatch to proxies after checking whether they are constructors. - __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE)); + __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE)); __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), RelocInfo::CODE_TARGET); @@ -2580,9 +2682,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ bind(&non_proxy); { // Overwrite the original receiver with the (original) target. - __ StoreReceiver(a1, a0, kScratchReg); + __ StoreReceiver(target, argc, kScratchReg); // Let the "call_as_constructor_delegate" take care of the rest. - __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); + __ LoadNativeContextSlot(target, + Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET); } @@ -2702,6 +2805,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); @@ -3096,7 +3204,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- cp : context // -- a1 : api function address - // -- a2 : arguments count (not including the receiver) + // -- a2 : arguments count // -- a3 : call data // -- a0 : holder // -- sp[0] : receiver diff --git a/deps/v8/src/builtins/number.tq b/deps/v8/src/builtins/number.tq index 777dd210d6ea59..4136b9a69335d7 100644 --- a/deps/v8/src/builtins/number.tq +++ b/deps/v8/src/builtins/number.tq @@ -75,6 +75,9 @@ macro NumberToStringSmi(x: int32, radix: int32): String labels Slow { if (!isNegative) { // Fast case where the result is a one character string. if (x < radix) { + if (x == 0) { + return ZeroStringConstant(); + } return StringFromSingleCharCode(ToCharCode(n)); } } else { diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 4ea4332e19ac9e..56dfcfa2627b3c 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -69,6 +69,32 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, namespace { +enum class ArgumentsElementType { + kRaw, // Push arguments as they are. + kHandle // Dereference arguments before pushing. +}; + +void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, + Register scratch, + ArgumentsElementType element_type) { + DCHECK(!AreAliased(array, argc, scratch)); + Label loop, done; + __ cmpi(argc, Operand::Zero()); + __ beq(&done); + __ ShiftLeftU64(scratch, argc, Operand(kSystemPointerSizeLog2)); + __ add(scratch, array, scratch); + __ mtctr(argc); + + __ bind(&loop); + __ LoadU64WithUpdate(ip, MemOperand(scratch, -kSystemPointerSize)); + if (element_type == ArgumentsElementType::kHandle) { + __ LoadU64(ip, MemOperand(ip)); + } + __ push(ip); + __ bdnz(&loop); + __ bind(&done); +} + void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r3 : number of arguments @@ -99,12 +125,15 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // correct position (including any undefined), instead of delaying this to // InvokeFunction. - // Set up pointer to last argument (skip receiver). + // Set up pointer to first argument (skip receiver). __ addi( r7, fp, Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); // Copy arguments and receiver to the expression stack. - __ PushArray(r7, r3, r8, r0); + // r7: Pointer to start of arguments. + // r3: Number of arguments. + Generate_PushArguments(masm, r7, r3, r8, ArgumentsElementType::kRaw); + // The receiver for the builtin/api call. __ PushRoot(RootIndex::kTheHoleValue); @@ -234,8 +263,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { Label stack_overflow; __ StackOverflowCheck(r3, r8, &stack_overflow); - // Copy arguments and receiver to the expression stack. - __ PushArray(r7, r3, r8, r0); + // Copy arguments to the expression stack. + // r7: Pointer to start of argument. + // r3: Number of arguments. + Generate_PushArguments(masm, r7, r3, r8, ArgumentsElementType::kRaw); // Push implicit receiver. __ Push(r9); @@ -711,25 +742,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ bind(&enough_stack_space); - // Copy arguments to the stack in a loop. + // Copy arguments to the stack. // r4: function // r7: argc // r8: argv, i.e. points to first arg - Label loop, done; - __ cmpi(r7, Operand::Zero()); - __ beq(&done); - - __ ShiftLeftU64(r9, r7, Operand(kSystemPointerSizeLog2)); - __ add(r8, r8, r9); // point to last arg - - __ mtctr(r7); - __ bind(&loop); - __ LoadU64WithUpdate( - r9, MemOperand(r8, -kSystemPointerSize)); // read next parameter - __ LoadU64(r0, MemOperand(r9)); // dereference handle - __ push(r0); // push parameter - __ bdnz(&loop); - __ bind(&done); + Generate_PushArguments(masm, r8, r7, r9, ArgumentsElementType::kHandle); // Push the receiver. __ Push(r6); @@ -1851,6 +1868,40 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { RelocInfo::CODE_TARGET); } +namespace { + +// Allocate new stack space for |count| arguments and shift all existing +// arguments already on the stack. |pointer_to_new_space_out| points to the +// first free slot on the stack to copy additional arguments to and +// |argc_in_out| is updated to include |count|. +void Generate_AllocateSpaceAndShiftExistingArguments( + MacroAssembler* masm, Register count, Register argc_in_out, + Register pointer_to_new_space_out, Register scratch1, Register scratch2) { + DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1, + scratch2)); + Register old_sp = scratch1; + Register new_space = scratch2; + __ addi(old_sp, sp, Operand(-kSystemPointerSize)); + __ ShiftLeftU64(new_space, count, Operand(kSystemPointerSizeLog2)); + __ AllocateStackSpace(new_space); + + Register dest = pointer_to_new_space_out; + __ addi(dest, sp, Operand(-kSystemPointerSize)); + __ addi(r0, argc_in_out, Operand(1)); + __ mtctr(r0); + Label loop; + __ bind(&loop); + __ LoadU64WithUpdate(r0, MemOperand(old_sp, kSystemPointerSize)); + __ StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize)); + __ bdnz(&loop); + + // Update total number of arguments, restore dest. + __ add(argc_in_out, argc_in_out, count); + __ addi(dest, dest, Operand(kSystemPointerSize)); +} + +} // namespace + // static // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, @@ -1891,22 +1942,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy; - Register src = r9, dest = r8; - __ addi(src, sp, Operand(-kSystemPointerSize)); - __ ShiftLeftU64(r0, r7, Operand(kSystemPointerSizeLog2)); - __ sub(sp, sp, r0); - // Update stack pointer. - __ addi(dest, sp, Operand(-kSystemPointerSize)); - __ addi(r0, r3, Operand(1)); - __ mtctr(r0); - - __ bind(©); - __ LoadU64WithUpdate(r0, MemOperand(src, kSystemPointerSize)); - __ StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize)); - __ bdnz(©); - } + // r7: Number of arguments to make room for. + // r3: Number of arguments already on the stack. + // r8: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, r7, r3, r8, ip, r9); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -1923,10 +1962,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ bne(&skip); __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ bind(&skip); - __ StoreU64WithUpdate(scratch, MemOperand(r8, kSystemPointerSize)); + __ StoreU64(scratch, MemOperand(r8)); + __ addi(r8, r8, Operand(kSystemPointerSize)); __ bdnz(&loop); __ bind(&no_args); - __ add(r3, r3, r7); } // Tail-call to the actual Call or Construct builtin. @@ -1995,29 +2034,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy; - Register src = ip, dest = r5; // r7 and r10 are context and root. - __ addi(src, sp, Operand(-kSystemPointerSize)); - // Update stack pointer. - __ ShiftLeftU64(scratch, r8, Operand(kSystemPointerSizeLog2)); - __ sub(sp, sp, scratch); - __ addi(dest, sp, Operand(-kSystemPointerSize)); - __ addi(r0, r3, Operand(1)); - __ mtctr(r0); - - __ bind(©); - __ LoadU64WithUpdate(r0, MemOperand(src, kSystemPointerSize)); - __ StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize)); - __ bdnz(©); - } + // r8: Number of arguments to make room for. + // r3: Number of arguments already on the stack. + // r5: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, r8, r3, r5, scratch, + ip); + // Copy arguments from the caller frame. // TODO(victorgomes): Consider using forward order as potentially more cache // friendly. { Label loop; - __ add(r3, r3, r8); - __ addi(r5, r5, Operand(kSystemPointerSize)); __ bind(&loop); { __ subi(r8, r8, Operand(1)); @@ -2047,8 +2074,12 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // ----------------------------------- __ AssertFunction(r4); + Label class_constructor; __ LoadTaggedPointerField( r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0); + __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset)); + __ TestBitMask(r6, SharedFunctionInfo::IsClassConstructorBit::kMask, r0); + __ bne(&class_constructor, cr0); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function @@ -2126,6 +2157,14 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ LoadU16( r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(r4, no_reg, r5, r3, InvokeType::kJump); + + // The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL); + __ push(r4); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + } } namespace { @@ -2507,6 +2546,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc index 3676ae344198fe..51a08c12967366 100644 --- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -70,6 +70,34 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, namespace { +enum class ArgumentsElementType { + kRaw, // Push arguments as they are. + kHandle // Dereference arguments before pushing. +}; + +void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, + Register scratch, Register scratch2, + ArgumentsElementType element_type) { + DCHECK(!AreAliased(array, argc, scratch)); + Label loop, entry; + if (kJSArgcIncludesReceiver) { + __ Sub64(scratch, argc, Operand(kJSArgcReceiverSlots)); + } else { + __ mv(scratch, argc); + } + __ Branch(&entry); + __ bind(&loop); + __ CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2); + __ Ld(scratch2, MemOperand(scratch2)); + if (element_type == ArgumentsElementType::kHandle) { + __ Ld(scratch2, MemOperand(scratch2)); + } + __ push(scratch2); + __ bind(&entry); + __ Add64(scratch, scratch, Operand(-1)); + __ Branch(&loop, greater_equal, scratch, Operand(zero_reg)); +} + void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : number of arguments @@ -89,15 +117,18 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { __ Push(cp, a0); __ SmiUntag(a0); - // Set up pointer to last argument (skip receiver). - UseScratchRegisterScope temps(masm); - temps.Include(t0); - Register scratch = temps.Acquire(); + // Set up pointer to first argument (skip receiver). __ Add64( - scratch, fp, + t2, fp, Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); - // Copy arguments and receiver to the expression stack. - __ PushArray(scratch, a0); + // t2: Pointer to start of arguments. + // a0: Number of arguments. + { + UseScratchRegisterScope temps(masm); + temps.Include(t0); + Generate_PushArguments(masm, t2, a0, temps.Acquire(), temps.Acquire(), + ArgumentsElementType::kRaw); + } // The receiver for the builtin/api call. __ PushRoot(RootIndex::kTheHoleValue); @@ -115,9 +146,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ SmiScale(kScratchReg, kScratchReg, kSystemPointerSizeLog2); - __ Add64(sp, sp, kScratchReg); - __ Add64(sp, sp, kSystemPointerSize); + __ DropArguments(kScratchReg, MacroAssembler::kCountIsSmi, + kJSArgcIncludesReceiver + ? MacroAssembler::kCountIncludesReceiver + : MacroAssembler::kCountExcludesReceiver, + kScratchReg); __ Ret(); } @@ -201,10 +234,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // since a0 will store the return value of callRuntime. __ Move(a6, a0); - // Set up pointer to last argument. - Register scratch = temps.Acquire(); + // Set up pointer to first argument (skip receiver).. __ Add64( - scratch, fp, + t2, fp, Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); // ----------- S t a t e ------------- @@ -234,7 +266,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // InvokeFunction. // Copy arguments and receiver to the expression stack. - __ PushArray(scratch, a0); + // t2: Pointer to start of argument. + // a0: Number of arguments. + { + UseScratchRegisterScope temps(masm); + Generate_PushArguments(masm, t2, a0, temps.Acquire(), temps.Acquire(), + ArgumentsElementType::kRaw); + } // We need two copies because we may have to return the original one // and the calling conventions dictate that the called function pops the // receiver. The second copy is pushed after the arguments, @@ -283,9 +321,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ SmiScale(a4, a1, kSystemPointerSizeLog2); - __ Add64(sp, sp, a4); - __ Add64(sp, sp, kSystemPointerSize); + __ DropArguments(a1, MacroAssembler::kCountIsSmi, + kJSArgcIncludesReceiver + ? MacroAssembler::kCountIncludesReceiver + : MacroAssembler::kCountExcludesReceiver, + a4); __ Ret(); __ bind(&check_receiver); @@ -411,6 +451,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); __ Lhu(a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); + if (kJSArgcIncludesReceiver) { + __ Sub64(a3, a3, Operand(kJSArgcReceiverSlots)); + } __ LoadTaggedPointerField( t1, FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset)); @@ -754,24 +797,21 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ Push(a2); // Check if we have enough stack space to push all arguments. - __ Add64(a6, a4, 1); + if (kJSArgcIncludesReceiver) { + __ mv(a6, a4); + } else { + __ Add64(a6, a4, 1); + } Generate_CheckStackOverflow(masm, a6, a0, s2); - // Copy arguments to the stack in a loop. + // Copy arguments to the stack. // a4: argc // a5: argv, i.e. points to first arg - Label loop, entry; - __ CalcScaledAddress(s1, a5, a4, kSystemPointerSizeLog2); - __ BranchShort(&entry); - // s1 points past last arg. - __ bind(&loop); - __ Add64(s1, s1, -kSystemPointerSize); - __ Ld(s2, MemOperand(s1)); // Read next parameter. - __ Ld(s2, MemOperand(s2)); // Dereference handle. - __ push(s2); // Push parameter. - __ bind(&entry); - __ Branch(&loop, ne, a5, Operand(s1)); - + { + UseScratchRegisterScope temps(masm); + Generate_PushArguments(masm, a5, a4, temps.Acquire(), temps.Acquire(), + ArgumentsElementType::kHandle); + } // Push the receive. __ Push(a3); @@ -855,8 +895,10 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ Ld(actual_params_size, MemOperand(fp, StandardFrameConstants::kArgCOffset)); __ Sll64(actual_params_size, actual_params_size, kSystemPointerSizeLog2); - __ Add64(actual_params_size, actual_params_size, Operand(kSystemPointerSize)); - + if (!kJSArgcIncludesReceiver) { + __ Add64(actual_params_size, actual_params_size, + Operand(kSystemPointerSize)); + } // If actual is bigger than formal, then we should use it to free up the stack // arguments. __ Branch(&L1, le, actual_params_size, Operand(params_size), @@ -868,7 +910,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ Add64(sp, sp, params_size); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_marker| == |expected_marker| @@ -1241,7 +1284,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // stack left to right. // // The live registers are: -// o a0 : actual argument count (not including the receiver) +// o a0 : actual argument count // o a1: the JS function object being called. // o a3: the incoming new target or generator object // o cp: our context @@ -1538,7 +1581,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( InterpreterPushArgsMode mode) { DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a2 : the address of the first argument to be pushed. Subsequent // arguments should be consecutive above this, in the same order as // they are to be pushed onto the stack. @@ -1550,18 +1593,19 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( __ Sub64(a0, a0, Operand(1)); } - __ Add64(a3, a0, Operand(1)); // Add one for receiver. - - __ StackOverflowCheck(a3, a4, t0, &stack_overflow); - - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - // Don't copy receiver. + const bool skip_receiver = + receiver_mode == ConvertReceiverMode::kNullOrUndefined; + if (kJSArgcIncludesReceiver && skip_receiver) { + __ Sub64(a3, a0, Operand(kJSArgcReceiverSlots)); + } else if (!kJSArgcIncludesReceiver && !skip_receiver) { + __ Add64(a3, a0, Operand(1)); + } else { __ Move(a3, a0); } + __ StackOverflowCheck(a3, a4, t0, &stack_overflow); // This function modifies a2 and a4. GenerateInterpreterPushArgs(masm, a3, a2, a4); - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { __ PushRoot(RootIndex::kUndefinedValue); } @@ -1594,23 +1638,26 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( void Builtins::Generate_InterpreterPushArgsThenConstructImpl( MacroAssembler* masm, InterpreterPushArgsMode mode) { // ----------- S t a t e ------------- - // -- a0 : argument count (not including receiver) + // -- a0 : argument count // -- a3 : new target // -- a1 : constructor to call // -- a2 : allocation site feedback if available, undefined otherwise. // -- a4 : address of the first argument // ----------------------------------- Label stack_overflow; - __ Add64(a6, a0, 1); - __ StackOverflowCheck(a6, a5, t0, &stack_overflow); + __ StackOverflowCheck(a0, a5, t0, &stack_overflow); if (mode == InterpreterPushArgsMode::kWithFinalSpread) { // The spread argument should not be pushed. __ Sub64(a0, a0, Operand(1)); } - + Register argc_without_receiver = a0; + if (kJSArgcIncludesReceiver) { + argc_without_receiver = a6; + __ Sub64(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots)); + } // Push the arguments, This function modifies a4 and a5. - GenerateInterpreterPushArgs(masm, a0, a4, a5); + GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5); // Push a slot for the receiver. __ push(zero_reg); @@ -1813,13 +1860,14 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. t0 contains the arguments count, the return value // from LAZY is always the last argument. - __ Add64(a0, a0, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + constexpr int return_value_offset = + BuiltinContinuationFrameConstants::kFixedSlotCount - + kJSArgcReceiverSlots; + __ Add64(a0, a0, Operand(return_value_offset)); __ CalcScaledAddress(t0, sp, a0, kSystemPointerSizeLog2); __ Sd(scratch, MemOperand(t0)); // Recover arguments count. - __ Sub64(a0, a0, - Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + __ Sub64(a0, a0, Operand(return_value_offset)); } __ Ld(fp, MemOperand( @@ -1945,18 +1993,23 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ Ld(arg_array, MemOperand(sp, 2 * kSystemPointerSize)); Label done0, done1; - __ Branch(&done0, ne, argc, Operand(zero_reg), Label::Distance::kNear); + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ Sub64(scratch, argc, JSParameterCount(0)); + __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear); __ Move(arg_array, undefined_value); // if argc == 0 __ Move(this_arg, undefined_value); // if argc == 0 __ bind(&done0); // else (i.e., argc > 0) - __ Branch(&done1, ne, argc, Operand(1), Label::Distance::kNear); + __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear); __ Move(arg_array, undefined_value); // if argc == 1 __ bind(&done1); // else (i.e., argc > 1) __ Ld(receiver, MemOperand(sp)); - __ CalcScaledAddress(sp, sp, argc, kSystemPointerSizeLog2); - __ Sd(this_arg, MemOperand(sp)); + __ DropArgumentsAndPushNewReceiver( + argc, this_arg, MacroAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? MacroAssembler::kCountIncludesReceiver + : MacroAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -1984,7 +2037,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // arguments to the receiver. __ bind(&no_arguments); { - __ Move(a0, zero_reg); + __ li(a0, JSParameterCount(0)); DCHECK(receiver == a1); __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); } @@ -1999,7 +2052,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { // a0: actual number of arguments { Label done; - __ Branch(&done, ne, a0, Operand(zero_reg), Label::Distance::kNear); + __ Branch(&done, ne, a0, Operand(JSParameterCount(0)), + Label::Distance::kNear); __ PushRoot(RootIndex::kUndefinedValue); __ Add64(a0, a0, Operand(1)); __ bind(&done); @@ -2041,23 +2095,28 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ Ld(arguments_list, MemOperand(sp, 3 * kSystemPointerSize)); Label done0, done1, done2; - __ Branch(&done0, ne, argc, Operand(zero_reg), Label::Distance::kNear); + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ Sub64(scratch, argc, Operand(JSParameterCount(0))); + __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear); __ Move(arguments_list, undefined_value); // if argc == 0 __ Move(this_argument, undefined_value); // if argc == 0 __ Move(target, undefined_value); // if argc == 0 __ bind(&done0); // argc != 0 - __ Branch(&done1, ne, argc, Operand(1), Label::Distance::kNear); + __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear); __ Move(arguments_list, undefined_value); // if argc == 1 __ Move(this_argument, undefined_value); // if argc == 1 __ bind(&done1); // argc > 1 - __ Branch(&done2, ne, argc, Operand(2), Label::Distance::kNear); + __ Branch(&done2, ne, scratch, Operand(2), Label::Distance::kNear); __ Move(arguments_list, undefined_value); // if argc == 2 __ bind(&done2); // argc > 2 - __ CalcScaledAddress(sp, sp, argc, kSystemPointerSizeLog2); - __ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver + __ DropArgumentsAndPushNewReceiver( + argc, this_argument, MacroAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? MacroAssembler::kCountIncludesReceiver + : MacroAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -2104,23 +2163,28 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ Ld(new_target, MemOperand(sp, 3 * kSystemPointerSize)); Label done0, done1, done2; - __ Branch(&done0, ne, argc, Operand(zero_reg), Label::Distance::kNear); + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ Sub64(scratch, argc, Operand(JSParameterCount(0))); + __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear); __ Move(arguments_list, undefined_value); // if argc == 0 __ Move(new_target, undefined_value); // if argc == 0 __ Move(target, undefined_value); // if argc == 0 __ bind(&done0); - __ Branch(&done1, ne, argc, Operand(1), Label::Distance::kNear); + __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear); __ Move(arguments_list, undefined_value); // if argc == 1 __ Move(new_target, target); // if argc == 1 __ bind(&done1); - __ Branch(&done2, ne, argc, Operand(2), Label::Distance::kNear); + __ Branch(&done2, ne, scratch, Operand(2), Label::Distance::kNear); __ Move(new_target, target); // if argc == 2 __ bind(&done2); - __ CalcScaledAddress(sp, sp, argc, kSystemPointerSizeLog2); - __ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver + __ DropArgumentsAndPushNewReceiver( + argc, undefined_value, MacroAssembler::kCountIsInteger, + kJSArgcIncludesReceiver ? MacroAssembler::kCountIncludesReceiver + : MacroAssembler::kCountExcludesReceiver); } // ----------- S t a t e ------------- @@ -2143,6 +2207,56 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { RelocInfo::CODE_TARGET); } +namespace { + +// Allocate new stack space for |count| arguments and shift all existing +// arguments already on the stack. |pointer_to_new_space_out| points to the +// first free slot on the stack to copy additional arguments to and +// |argc_in_out| is updated to include |count|. +void Generate_AllocateSpaceAndShiftExistingArguments( + MacroAssembler* masm, Register count, Register argc_in_out, + Register pointer_to_new_space_out) { + UseScratchRegisterScope temps(masm); + Register scratch1 = temps.Acquire(); + Register scratch2 = temps.Acquire(); + Register scratch3 = temps.Acquire(); + DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1, + scratch2)); + Register old_sp = scratch1; + Register new_space = scratch2; + __ mv(old_sp, sp); + __ slli(new_space, count, kPointerSizeLog2); + __ Sub64(sp, sp, Operand(new_space)); + + Register end = scratch2; + Register value = scratch3; + Register dest = pointer_to_new_space_out; + __ mv(dest, sp); + __ CalcScaledAddress(end, old_sp, argc_in_out, kSystemPointerSizeLog2); + Label loop, done; + if (kJSArgcIncludesReceiver) { + __ Branch(&done, ge, old_sp, Operand(end)); + } else { + __ Branch(&done, gt, old_sp, Operand(end)); + } + __ bind(&loop); + __ Ld(value, MemOperand(old_sp, 0)); + __ Sd(value, MemOperand(dest, 0)); + __ Add64(old_sp, old_sp, Operand(kSystemPointerSize)); + __ Add64(dest, dest, Operand(kSystemPointerSize)); + if (kJSArgcIncludesReceiver) { + __ Branch(&loop, lt, old_sp, Operand(end)); + } else { + __ Branch(&loop, le, old_sp, Operand(end)); + } + __ bind(&done); + + // Update total number of arguments. + __ Add64(argc_in_out, argc_in_out, count); +} + +} // namespace + // static void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { @@ -2150,7 +2264,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, temps.Include(t1, t0); // ----------- S t a t e ------------- // -- a1 : target - // -- a0 : number of parameters on the stack (not including the receiver) + // -- a0 : number of parameters on the stack // -- a2 : arguments list (a FixedArray) // -- a4 : len (number of elements to push from args) // -- a3 : new.target (for [[Construct]]) @@ -2181,27 +2295,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy; - Register src = a6, dest = a7; - UseScratchRegisterScope temps(masm); - Register size = temps.Acquire(); - Register vlaue = temps.Acquire(); - __ Move(src, sp); - __ Sll64(size, len, kSystemPointerSizeLog2); - __ Sub64(sp, sp, Operand(size)); - // Update stack pointer. - __ Move(dest, sp); - __ Add64(size, a0, Operand(zero_reg)); - - __ bind(©); - __ Ld(vlaue, MemOperand(src, 0)); - __ Sd(vlaue, MemOperand(dest, 0)); - __ Sub64(size, size, Operand(1)); - __ Add64(src, src, Operand(kSystemPointerSize)); - __ Add64(dest, dest, Operand(kSystemPointerSize)); - __ Branch(©, ge, size, Operand(zero_reg)); - } + // a4: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // a7: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, a4, a0, a7); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -2211,7 +2308,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, UseScratchRegisterScope temps(masm); Register hole_value = temps.Acquire(); __ Add64(src, args, FixedArray::kHeaderSize - kHeapObjectTag); - __ Add64(a0, a0, len); // The 'len' argument for Call() or Construct(). __ Branch(&done, eq, len, Operand(zero_reg), Label::Distance::kNear); __ Sll64(scratch, len, kTaggedSizeLog2); __ Sub64(scratch, sp, Operand(scratch)); @@ -2241,7 +2337,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, CallOrConstructMode mode, Handle code) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a3 : the new.target (for [[Construct]] calls) // -- a1 : the target to call (can be any Object) // -- a2 : start index (to support rest parameters) @@ -2277,7 +2373,10 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset)); Label stack_done, stack_overflow; - __ Sub32(a7, a7, a2); + if (kJSArgcIncludesReceiver) { + __ Sub64(a7, a7, Operand(kJSArgcReceiverSlots)); + } + __ Sub64(a7, a7, a2); __ Branch(&stack_done, le, a7, Operand(zero_reg)); { // Check for stack overflow. @@ -2293,33 +2392,16 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy; - UseScratchRegisterScope temps(masm); - Register src = temps.Acquire(), dest = a2, scratch = temps.Acquire(); - Register count = temps.Acquire(); - __ Move(src, sp); - // Update stack pointer. - __ Sll64(scratch, a7, kSystemPointerSizeLog2); - __ Sub64(sp, sp, Operand(scratch)); - __ Move(dest, sp); - __ Move(count, a0); - - __ bind(©); - __ Ld(scratch, MemOperand(src, 0)); - __ Sd(scratch, MemOperand(dest, 0)); - __ Sub64(count, count, Operand(1)); - __ Add64(src, src, Operand(kSystemPointerSize)); - __ Add64(dest, dest, Operand(kSystemPointerSize)); - __ Branch(©, ge, count, Operand(zero_reg)); - } + // a7: Number of arguments to make room for. + // a0: Number of arguments already on the stack. + // a2: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, a7, a0, a2); // Copy arguments from the caller frame. // TODO(victorgomes): Consider using forward order as potentially more cache // friendly. { Label loop; - __ Add64(a0, a0, a7); __ bind(&loop); { UseScratchRegisterScope temps(masm); @@ -2346,13 +2428,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, void Builtins::Generate_CallFunction(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // ----------------------------------- __ AssertFunction(a1); - // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) - // Check that function is not a "classConstructor". Label class_constructor; __ LoadTaggedPointerField( a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); @@ -2375,7 +2455,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // -- a2 : the shared function info. // -- cp : the function context. @@ -2429,7 +2509,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ bind(&done_convert); // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // -- a2 : the shared function info. // -- cp : the function context. @@ -2452,7 +2532,7 @@ namespace { void Generate_PushBoundArguments(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : target (checked to be a JSBoundFunction) // -- a3 : new.target (only in case of [[Construct]]) // ----------------------------------- @@ -2469,7 +2549,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ Branch(&no_bound_arguments, eq, bound_argc, Operand(zero_reg)); { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : target (checked to be a JSBoundFunction) // -- a2 : the [[BoundArguments]] (implemented as FixedArray) // -- a3 : new.target (only in case of [[Construct]]) @@ -2523,7 +2603,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // static void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // ----------------------------------- __ AssertBoundFunction(a1); @@ -2550,23 +2630,23 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // static void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the target to call (can be any Object). // ----------------------------------- - Label non_callable, non_smi; + Label non_callable, class_constructor; UseScratchRegisterScope temps(masm); temps.Include(t1, t2); temps.Include(t4); Register map = temps.Acquire(), type = temps.Acquire(), range = temps.Acquire(); __ JumpIfSmi(a1, &non_callable); - __ bind(&non_smi); __ LoadMap(map, a1); - __ GetInstanceTypeRange(map, type, FIRST_JS_FUNCTION_TYPE, range); + __ GetInstanceTypeRange(map, type, FIRST_CALLABLE_JS_FUNCTION_TYPE, range); __ Jump(masm->isolate()->builtins()->CallFunction(mode), RelocInfo::CODE_TARGET, Uless_equal, range, - Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); + Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - + FIRST_CALLABLE_JS_FUNCTION_TYPE)); __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), RelocInfo::CODE_TARGET, eq, type, Operand(JS_BOUND_FUNCTION_TYPE)); Register scratch = map; @@ -2579,6 +2659,10 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, type, Operand(JS_PROXY_TYPE)); + // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) + // Check that the function is not a "classConstructor". + __ Branch(&class_constructor, eq, type, Operand(JS_CLASS_CONSTRUCTOR_TYPE)); + // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). // Overwrite the original receiver with the (original) target. @@ -2596,11 +2680,19 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ Push(a1); __ CallRuntime(Runtime::kThrowCalledNonCallable); } + // 4. The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ Push(a1); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + __ ebreak(); + } } void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the constructor to call (checked to be a JSFunction) // -- a3 : the new target (checked to be a constructor) // ----------------------------------- @@ -2632,7 +2724,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // static void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSBoundFunction) // -- a3 : the new target (checked to be a constructor) // ----------------------------------- @@ -2662,7 +2754,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // static void Builtins::Generate_Construct(MacroAssembler* masm) { // ----------- S t a t e ------------- - // -- a0 : the number of arguments (not including the receiver) + // -- a0 : the number of arguments // -- a1 : the constructor to call (can be any Object) // -- a3 : the new target (either the same as the constructor or // the JSFunction on which new was invoked initially) @@ -3051,6 +3143,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); @@ -3188,7 +3285,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- cp : context // -- a1 : api function address - // -- a2 : arguments count (not including the receiver) + // -- a2 : arguments count // -- a3 : call data // -- a0 : holder // -- diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index 65fffbba7956ae..3b51a086ec0fcf 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -69,6 +69,32 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, namespace { +enum class ArgumentsElementType { + kRaw, // Push arguments as they are. + kHandle // Dereference arguments before pushing. +}; + +void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, + Register scratch, + ArgumentsElementType element_type) { + DCHECK(!AreAliased(array, argc, scratch)); + Register counter = scratch; + Register value = ip; + Label loop, entry; + __ mov(counter, argc); + __ b(&entry); + __ bind(&loop); + __ ShiftLeftU64(value, counter, Operand(kSystemPointerSizeLog2)); + __ LoadU64(value, MemOperand(array, value)); + if (element_type == ArgumentsElementType::kHandle) { + __ LoadU64(value, MemOperand(value)); + } + __ push(value); + __ bind(&entry); + __ SubS64(counter, counter, Operand(1)); + __ bge(&loop); +} + void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r2 : number of arguments @@ -98,11 +124,14 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // correct position (including any undefined), instead of delaying this to // InvokeFunction. - // Set up pointer to last argument (skip receiver). + // Set up pointer to first argument (skip receiver). __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); // Copy arguments and receiver to the expression stack. - __ PushArray(r6, r2, r1, r0); + // r6: Pointer to start of arguments. + // r2: Number of arguments. + Generate_PushArguments(masm, r6, r2, r1, ArgumentsElementType::kRaw); + // The receiver for the builtin/api call. __ PushRoot(RootIndex::kTheHoleValue); @@ -230,7 +259,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ StackOverflowCheck(r2, r7, &stack_overflow); // Copy arguments and receiver to the expression stack. - __ PushArray(r6, r2, r1, r0); + // r6: Pointer to start of argument. + // r2: Number of arguments. + Generate_PushArguments(masm, r6, r2, r1, ArgumentsElementType::kRaw); // Push implicit receiver. __ Push(r8); @@ -642,6 +673,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // pop the faked function when we return. Handle trampoline_code = masm->isolate()->builtins()->code_handle(entry_trampoline); + USE(pushed_stack_space); DCHECK_EQ(kPushedStackSpace, pushed_stack_space); __ Call(trampoline_code, RelocInfo::CODE_TARGET); @@ -758,7 +790,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ bind(&enough_stack_space); - // Copy arguments to the stack in a loop from argv to sp. + // Copy arguments to the stack from argv to sp. // The arguments are actually placed in reverse order on sp // compared to argv (i.e. arg1 is highest memory in sp). // r2: argc @@ -768,24 +800,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r7: scratch reg to hold scaled argc // r8: scratch reg to hold arg handle // r9: scratch reg to hold index into argv - Label argLoop, argExit; - - __ ShiftLeftU64(r9, r2, Operand(kSystemPointerSizeLog2)); - __ lay(r9, MemOperand(r6, r9, -kSystemPointerSize)); // point to last arg - - __ ltgr(r7, r2); - - __ beq(&argExit, Label::kNear); - __ bind(&argLoop); - - __ LoadU64(r8, MemOperand(r9)); // read next parameter - __ LoadU64(r0, MemOperand(r8)); // dereference handle - __ Push(r0); - __ lay(r9, MemOperand(r9, -kSystemPointerSize)); // r9++; - __ SubS64(r7, r7, Operand(1)); - __ bgt(&argLoop); - - __ bind(&argExit); + Generate_PushArguments(masm, r6, r2, r1, ArgumentsElementType::kHandle); // Push the receiver. __ Push(r5); @@ -1885,6 +1900,46 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { RelocInfo::CODE_TARGET); } +namespace { + +// Allocate new stack space for |count| arguments and shift all existing +// arguments already on the stack. |pointer_to_new_space_out| points to the +// first free slot on the stack to copy additional arguments to and +// |argc_in_out| is updated to include |count|. +void Generate_AllocateSpaceAndShiftExistingArguments( + MacroAssembler* masm, Register count, Register argc_in_out, + Register pointer_to_new_space_out, Register scratch1, Register scratch2) { + DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1, + scratch2)); + Register old_sp = scratch1; + Register new_space = scratch2; + __ mov(old_sp, sp); + __ ShiftLeftU64(new_space, count, Operand(kSystemPointerSizeLog2)); + __ AllocateStackSpace(new_space); + + Register end = scratch2; + Register value = r1; + Register dest = pointer_to_new_space_out; + __ mov(dest, sp); + __ ShiftLeftU64(r0, argc_in_out, Operand(kSystemPointerSizeLog2)); + __ AddS64(end, old_sp, r0); + Label loop, done; + __ bind(&loop); + __ CmpS64(old_sp, end); + __ bgt(&done); + __ LoadU64(value, MemOperand(old_sp)); + __ lay(old_sp, MemOperand(old_sp, kSystemPointerSize)); + __ StoreU64(value, MemOperand(dest)); + __ lay(dest, MemOperand(dest, kSystemPointerSize)); + __ b(&loop); + __ bind(&done); + + // Update total number of arguments. + __ AddS64(argc_in_out, argc_in_out, count); +} + +} // namespace + // static // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, @@ -1926,25 +1981,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy, check; - Register num = ip, src = r8, dest = r7; - __ mov(src, sp); - __ ShiftLeftU64(r1, r6, Operand(kSystemPointerSizeLog2)); - __ SubS64(sp, sp, r1); - // Update stack pointer. - __ mov(dest, sp); - __ ltgr(num, r2); - __ b(&check); - __ bind(©); - __ LoadU64(r0, MemOperand(src)); - __ lay(src, MemOperand(src, kSystemPointerSize)); - __ StoreU64(r0, MemOperand(dest)); - __ lay(dest, MemOperand(dest, kSystemPointerSize)); - __ SubS64(num, num, Operand(1)); - __ bind(&check); - __ b(ge, ©); - } + // r6: Number of arguments to make room for. + // r2: Number of arguments already on the stack. + // r7: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, r6, r2, r7, ip, r8); // Push arguments onto the stack (thisArgument is already on the stack). { @@ -1965,7 +2005,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ lay(r7, MemOperand(r7, kSystemPointerSize)); __ BranchOnCount(r1, &loop); __ bind(&no_args); - __ AddS64(r2, r2, r6); } // Tail-call to the actual Call or Construct builtin. @@ -2035,33 +2074,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Move the arguments already in the stack, // including the receiver and the return address. - { - Label copy, check; - Register num = r1, src = ip, - dest = r4; // r7 and r10 are context and root. - __ mov(src, sp); - // Update stack pointer. - __ ShiftLeftU64(scratch, r7, Operand(kSystemPointerSizeLog2)); - __ SubS64(sp, sp, scratch); - __ mov(dest, sp); - __ ltgr(num, r2); - __ b(&check); - __ bind(©); - __ LoadU64(r0, MemOperand(src)); - __ lay(src, MemOperand(src, kSystemPointerSize)); - __ StoreU64(r0, MemOperand(dest)); - __ lay(dest, MemOperand(dest, kSystemPointerSize)); - __ SubS64(num, num, Operand(1)); - __ bind(&check); - __ b(ge, ©); - } + // r7: Number of arguments to make room for.0 + // r2: Number of arguments already on the stack. + // r4: Points to first free slot on the stack after arguments were shifted. + Generate_AllocateSpaceAndShiftExistingArguments(masm, r7, r2, r4, scratch, + ip); // Copy arguments from the caller frame. // TODO(victorgomes): Consider using forward order as potentially more cache // friendly. { Label loop; - __ AddS64(r2, r2, r7); __ bind(&loop); { __ SubS64(r7, r7, Operand(1)); @@ -2091,8 +2114,12 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // ----------------------------------- __ AssertFunction(r3); + Label class_constructor; __ LoadTaggedPointerField( r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); + __ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset)); + __ TestBitMask(r5, SharedFunctionInfo::IsClassConstructorBit::kMask, r0); + __ bne(&class_constructor); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function @@ -2170,6 +2197,14 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ LoadU16( r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(r3, no_reg, r4, r2, InvokeType::kJump); + + // The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameAndConstantPoolScope frame(masm, StackFrame::INTERNAL); + __ push(r3); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + } } namespace { @@ -2540,6 +2575,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // TODO(v8:12191): Implement for this platform. + __ Trap(); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index d61a2705fb32a2..9dcecdab3302e6 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -226,6 +226,7 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) | RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) | RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET); + PtrComprCageBase cage_base(isolate); for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast; ++builtin) { Code code = builtins->code(builtin); @@ -242,8 +243,8 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH); } else { DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode())); - Object object = rinfo->target_object(); - if (!object.IsCode()) continue; + Object object = rinfo->target_object(cage_base); + if (!object.IsCode(cage_base)) continue; Code target = Code::cast(object); if (!target.is_builtin()) continue; Code new_target = builtins->code(target.builtin_id()); diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq index 519d98867b8285..45a396afe63808 100644 --- a/deps/v8/src/builtins/typed-array-createtypedarray.tq +++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq @@ -292,7 +292,7 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)( // in the step 12 branch. newByteLength = bufferByteLength - offset; newLength = elementsInfo.CalculateLength(newByteLength) - otherwise IfInvalidOffset; + otherwise IfInvalidLength; // 12. Else, } else { diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq index f2701a040b9f87..fa1b7ab619dcba 100644 --- a/deps/v8/src/builtins/typed-array-every.tq +++ b/deps/v8/src/builtins/typed-array-every.tq @@ -9,13 +9,14 @@ const kBuiltinNameEvery: constexpr string = '%TypedArray%.prototype.every'; // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every transitioning macro EveryAllElements(implicit context: Context)( - array: typed_array::AttachedJSTypedArray, length: uintptr, + attachedArrayAndLength: typed_array::AttachedJSTypedArrayAndLength, callbackfn: Callable, thisArg: JSAny): Boolean { - let witness = typed_array::NewAttachedJSTypedArrayWitness(array); + let witness = + typed_array::NewAttachedJSTypedArrayWitness(attachedArrayAndLength.array); // 5. Let k be 0. // 6. Repeat, while k < len - for (let k: uintptr = 0; k < length; k++) { + for (let k: uintptr = 0; k < attachedArrayAndLength.length; k++) { // 6a. Let Pk be ! ToString(𝔽(k)). // There is no need to cast ToString to load elements. @@ -59,13 +60,12 @@ TypedArrayPrototypeEvery( // 3. Let len be IntegerIndexedObjectLength(O). const array: JSTypedArray = Cast(receiver) otherwise NotTypedArray; - const length = LoadJSTypedArrayLengthAndCheckDetached(array) + const attachedArrayAndLength = EnsureAttachedAndReadLength(array) otherwise IsDetachedOrOutOfBounds; // 4. If IsCallable(callbackfn) is false, throw a TypeError exception. const callbackfn = Cast(arguments[0]) otherwise NotCallable; const thisArg = arguments[1]; - return EveryAllElements( - %RawDownCast(array), length, callbackfn, thisArg); + return EveryAllElements(attachedArrayAndLength, callbackfn, thisArg); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameEvery); } label IsDetachedOrOutOfBounds deferred { diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq index 18fbce9f09f883..736dff0affb8e1 100644 --- a/deps/v8/src/builtins/typed-array-filter.tq +++ b/deps/v8/src/builtins/typed-array-filter.tq @@ -13,14 +13,13 @@ transitioning javascript builtin TypedArrayPrototypeFilter( try { // 1. Let O be the this value. // 2. Perform ? ValidateTypedArray(O). + // 3. Let len be IntegerIndexedObjectLength(O). const array: JSTypedArray = Cast(receiver) otherwise ThrowTypeError( MessageTemplate::kNotTypedArray, kBuiltinNameFilter); - const src = typed_array::EnsureAttached(array) otherwise IsDetached; - - // 3. Let len be O.[[ArrayLength]]. - const len: uintptr = src.length; + const attachedArrayAndLength = EnsureAttachedAndReadLength(array) + otherwise IsDetachedOrOutOfBounds; // 4. If IsCallable(callbackfn) is false, throw a TypeError exception. const callbackfn = Cast(arguments[0]) otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); @@ -32,19 +31,20 @@ transitioning javascript builtin TypedArrayPrototypeFilter( // TODO(v8:4153): Support huge TypedArrays here. (growable fixed arrays // can't be longer than kMaxSmiValue). let kept = growable_fixed_array::NewGrowableFixedArray(); - let witness = typed_array::NewAttachedJSTypedArrayWitness(src); + let witness = typed_array::NewAttachedJSTypedArrayWitness( + attachedArrayAndLength.array); // 7. Let k be 0. // 8. Let captured be 0. // 9. Repeat, while k < len - for (let k: uintptr = 0; k < len; k++) { + for (let k: uintptr = 0; k < attachedArrayAndLength.length; k++) { let value: JSAny; // a. Let Pk be ! ToString(k). // b. Let kValue be ? Get(O, Pk). try { - witness.Recheck() otherwise goto IsDetached; + witness.RecheckIndex(k) otherwise goto IsDetachedOrOutOfBounds; value = witness.Load(k); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { value = Undefined; } @@ -80,7 +80,7 @@ transitioning javascript builtin TypedArrayPrototypeFilter( // 13. Return A. return typedArray; - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFilter); } } diff --git a/deps/v8/src/builtins/typed-array-find.tq b/deps/v8/src/builtins/typed-array-find.tq index b37b4ef8a91f41..c2456f9268fcc4 100644 --- a/deps/v8/src/builtins/typed-array-find.tq +++ b/deps/v8/src/builtins/typed-array-find.tq @@ -9,13 +9,14 @@ const kBuiltinNameFind: constexpr string = '%TypedArray%.prototype.find'; // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find transitioning macro FindAllElements(implicit context: Context)( - array: typed_array::AttachedJSTypedArray, predicate: Callable, - thisArg: JSAny): JSAny { - let witness = typed_array::NewAttachedJSTypedArrayWitness(array); - const length: uintptr = witness.Get().length; + attachedArrayAndLength: typed_array::AttachedJSTypedArrayAndLength, + predicate: Callable, thisArg: JSAny): JSAny { + let witness = + typed_array::NewAttachedJSTypedArrayWitness(attachedArrayAndLength.array); + // 5. Let k be 0. // 6. Repeat, while k < len - for (let k: uintptr = 0; k < length; k++) { + for (let k: uintptr = 0; k < attachedArrayAndLength.length; k++) { // 6a. Let Pk be ! ToString(𝔽(k)). // There is no need to cast ToString to load elements. @@ -23,9 +24,9 @@ transitioning macro FindAllElements(implicit context: Context)( // kValue must be undefined when the buffer is detached. let value: JSAny; try { - witness.Recheck() otherwise goto IsDetached; + witness.RecheckIndex(k) otherwise goto IsDetachedOrOutOfBounds; value = witness.Load(k); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { value = Undefined; } @@ -56,18 +57,22 @@ TypedArrayPrototypeFind( // arguments[0] = callback // arguments[1] = thisArg try { + // 1. Let O be the this value. + // 2. Perform ? ValidateTypedArray(O). + // 3. Let len be IntegerIndexedObjectLength(O). const array: JSTypedArray = Cast(receiver) otherwise NotTypedArray; - const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; - + const attachedArrayAndLength = EnsureAttachedAndReadLength(array) + otherwise IsDetachedOrOutOfBounds; + // 4. If IsCallable(predicate) is false, throw a TypeError exception. const predicate = Cast(arguments[0]) otherwise NotCallable; const thisArg = arguments[1]; - return FindAllElements(uarray, predicate, thisArg); + return FindAllElements(attachedArrayAndLength, predicate, thisArg); } label NotCallable deferred { ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFind); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFind); } } diff --git a/deps/v8/src/builtins/typed-array-findindex.tq b/deps/v8/src/builtins/typed-array-findindex.tq index aede90dc7f7be2..6a63008145a9af 100644 --- a/deps/v8/src/builtins/typed-array-findindex.tq +++ b/deps/v8/src/builtins/typed-array-findindex.tq @@ -9,13 +9,14 @@ const kBuiltinNameFindIndex: constexpr string = '%TypedArray%.prototype.findIndex'; transitioning macro FindIndexAllElements(implicit context: Context)( - array: typed_array::AttachedJSTypedArray, predicate: Callable, - thisArg: JSAny): Number { - let witness = typed_array::NewAttachedJSTypedArrayWitness(array); - const length: uintptr = witness.Get().length; + attachedArrayAndLength: typed_array::AttachedJSTypedArrayAndLength, + predicate: Callable, thisArg: JSAny): Number { + let witness = + typed_array::NewAttachedJSTypedArrayWitness(attachedArrayAndLength.array); + // 5. Let k be 0. // 6. Repeat, while k < len - for (let k: uintptr = 0; k < length; k++) { + for (let k: uintptr = 0; k < attachedArrayAndLength.length; k++) { // 6a. Let Pk be ! ToString(𝔽(k)). // There is no need to cast ToString to load elements. @@ -23,9 +24,9 @@ transitioning macro FindIndexAllElements(implicit context: Context)( // kValue must be undefined when the buffer is detached. let value: JSAny; try { - witness.Recheck() otherwise goto IsDetached; + witness.RecheckIndex(k) otherwise goto IsDetachedOrOutOfBounds; value = witness.Load(k); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { value = Undefined; } @@ -47,21 +48,25 @@ transitioning macro FindIndexAllElements(implicit context: Context)( transitioning javascript builtin TypedArrayPrototypeFindIndex( js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { - // arguments[0] = callback + // arguments[0] = predicate // arguments[1] = thisArg. try { + // 1. Let O be the this value. + // 2. Perform ? ValidateTypedArray(O). + // 3. Let len be IntegerIndexedObjectLength(O). const array: JSTypedArray = Cast(receiver) otherwise NotTypedArray; - const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; - + const attachedArrayAndLength = EnsureAttachedAndReadLength(array) + otherwise IsDetachedOrOutOfBounds; + // 4. If IsCallable(predicate) is false, throw a TypeError exception. const predicate = Cast(arguments[0]) otherwise NotCallable; const thisArg = arguments[1]; - return FindIndexAllElements(uarray, predicate, thisArg); + return FindIndexAllElements(attachedArrayAndLength, predicate, thisArg); } label NotCallable deferred { ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindIndex); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFindIndex); } } diff --git a/deps/v8/src/builtins/typed-array-findlast.tq b/deps/v8/src/builtins/typed-array-findlast.tq index 15f67760c0f145..45695f83ac7f50 100644 --- a/deps/v8/src/builtins/typed-array-findlast.tq +++ b/deps/v8/src/builtins/typed-array-findlast.tq @@ -10,14 +10,13 @@ const kBuiltinNameFindLast: constexpr string = // https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast transitioning macro FindLastAllElements(implicit context: Context)( - array: typed_array::AttachedJSTypedArray, predicate: Callable, - thisArg: JSAny): JSAny { - let witness = typed_array::NewAttachedJSTypedArrayWitness(array); - // 3. Let len be O.[[ArrayLength]]. - const length: uintptr = witness.Get().length; + attachedArrayAndLength: typed_array::AttachedJSTypedArrayAndLength, + predicate: Callable, thisArg: JSAny): JSAny { + let witness = + typed_array::NewAttachedJSTypedArrayWitness(attachedArrayAndLength.array); // 5. Let k be len - 1. // 6. Repeat, while k ≥ 0 - for (let k: uintptr = length; k-- > 0;) { + for (let k: uintptr = attachedArrayAndLength.length; k-- > 0;) { // 6a. Let Pk be ! ToString(𝔽(k)). // There is no need to cast ToString to load elements. @@ -25,9 +24,9 @@ transitioning macro FindLastAllElements(implicit context: Context)( // kValue must be undefined when the buffer was detached. let value: JSAny; try { - witness.Recheck() otherwise goto IsDetached; + witness.RecheckIndex(k) otherwise goto IsDetachedOrOutOfBounds; value = witness.Load(k); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { value = Undefined; } @@ -54,24 +53,25 @@ transitioning macro FindLastAllElements(implicit context: Context)( transitioning javascript builtin TypedArrayPrototypeFindLast( js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { - // arguments[0] = callback + // arguments[0] = predicate // arguments[1] = thisArg try { // 1. Let O be the this value. + // 2. Perform ? ValidateTypedArray(O). + // 3. Let len be IntegerIndexedObjectLength(O). const array: JSTypedArray = Cast(receiver) otherwise NotTypedArray; - // 2. Perform ? ValidateTypedArray(O). - const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; - + const attachedArrayAndLength = EnsureAttachedAndReadLength(array) + otherwise IsDetachedOrOutOfBounds; // 4. If IsCallable(predicate) is false, throw a TypeError exception. const predicate = Cast(arguments[0]) otherwise NotCallable; const thisArg = arguments[1]; - return FindLastAllElements(uarray, predicate, thisArg); + return FindLastAllElements(attachedArrayAndLength, predicate, thisArg); } label NotCallable deferred { ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindLast); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameFindLast); } } diff --git a/deps/v8/src/builtins/typed-array-findlastindex.tq b/deps/v8/src/builtins/typed-array-findlastindex.tq index 56d139d8b1b5e2..1edee5444feba0 100644 --- a/deps/v8/src/builtins/typed-array-findlastindex.tq +++ b/deps/v8/src/builtins/typed-array-findlastindex.tq @@ -10,14 +10,13 @@ const kBuiltinNameFindLastIndex: constexpr string = // https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex transitioning macro FindLastIndexAllElements(implicit context: Context)( - array: typed_array::AttachedJSTypedArray, predicate: Callable, - thisArg: JSAny): Number { - let witness = typed_array::NewAttachedJSTypedArrayWitness(array); - // 3. Let len be O.[[ArrayLength]]. - const length: uintptr = witness.Get().length; + attachedArrayAndLength: typed_array::AttachedJSTypedArrayAndLength, + predicate: Callable, thisArg: JSAny): Number { + let witness = + typed_array::NewAttachedJSTypedArrayWitness(attachedArrayAndLength.array); // 5. Let k be len - 1. // 6. Repeat, while k ≥ 0 - for (let k: uintptr = length; k-- > 0;) { + for (let k: uintptr = attachedArrayAndLength.length; k-- > 0;) { // 6a. Let Pk be ! ToString(𝔽(k)). // There is no need to cast ToString to load elements. @@ -25,9 +24,9 @@ transitioning macro FindLastIndexAllElements(implicit context: Context)( // kValue must be undefined when the buffer was detached. let value: JSAny; try { - witness.Recheck() otherwise goto IsDetached; + witness.RecheckIndex(k) otherwise goto IsDetachedOrOutOfBounds; value = witness.Load(k); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { value = Undefined; } @@ -54,25 +53,25 @@ transitioning macro FindLastIndexAllElements(implicit context: Context)( transitioning javascript builtin TypedArrayPrototypeFindLastIndex( js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { - // arguments[0] = callback - // arguments[1] = thisArg. + // arguments[0] = predicate + // arguments[1] = thisArg try { // 1. Let O be the this value. + // 2. Perform ? ValidateTypedArray(O). + // 3. Let len be IntegerIndexedObjectLength(O). const array: JSTypedArray = Cast(receiver) otherwise NotTypedArray; - // 2. Perform ? ValidateTypedArray(O). - const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; - + const attachedArrayAndLength = EnsureAttachedAndReadLength(array) + otherwise IsDetachedOrOutOfBounds; // 4. If IsCallable(predicate) is false, throw a TypeError exception. const predicate = Cast(arguments[0]) otherwise NotCallable; const thisArg = arguments[1]; - - return FindLastIndexAllElements(uarray, predicate, thisArg); + return FindLastIndexAllElements(attachedArrayAndLength, predicate, thisArg); } label NotCallable deferred { ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindLastIndex); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { ThrowTypeError( MessageTemplate::kDetachedOperation, kBuiltinNameFindLastIndex); } diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq index fa227bc75be77f..45b949b4ef1e36 100644 --- a/deps/v8/src/builtins/typed-array-foreach.tq +++ b/deps/v8/src/builtins/typed-array-foreach.tq @@ -8,13 +8,14 @@ namespace typed_array { const kBuiltinNameForEach: constexpr string = '%TypedArray%.prototype.forEach'; transitioning macro ForEachAllElements(implicit context: Context)( - array: typed_array::AttachedJSTypedArray, callbackfn: Callable, - thisArg: JSAny): Undefined { - let witness = typed_array::NewAttachedJSTypedArrayWitness(array); - const length: uintptr = witness.Get().length; + attachedArrayAndLength: typed_array::AttachedJSTypedArrayAndLength, + callbackfn: Callable, thisArg: JSAny): Undefined { + let witness = + typed_array::NewAttachedJSTypedArrayWitness(attachedArrayAndLength.array); + // 5. Let k be 0. // 6. Repeat, while k < len - for (let k: uintptr = 0; k < length; k++) { + for (let k: uintptr = 0; k < attachedArrayAndLength.length; k++) { // 6a. Let Pk be ! ToString(𝔽(k)). // There is no need to cast ToString to load elements. @@ -22,9 +23,9 @@ transitioning macro ForEachAllElements(implicit context: Context)( // kValue must be undefined when the buffer is detached. let value: JSAny; try { - witness.Recheck() otherwise goto IsDetached; + witness.RecheckIndex(k) otherwise goto IsDetachedOrOutOfBounds; value = witness.Load(k); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { value = Undefined; } @@ -50,18 +51,22 @@ TypedArrayPrototypeForEach(js-implicit context: NativeContext, receiver: JSAny)( // arguments[1] = this_arg. try { + // 1. Let O be the this value. + // 2. Perform ? ValidateTypedArray(O). + // 3. Let len be IntegerIndexedObjectLength(O). const array: JSTypedArray = Cast(receiver) otherwise NotTypedArray; - const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; - + const attachedArrayAndLength = EnsureAttachedAndReadLength(array) + otherwise IsDetachedOrOutOfBounds; + // 4. If IsCallable(callbackfn) is false, throw a TypeError exception. const callbackfn = Cast(arguments[0]) otherwise NotCallable; const thisArg = arguments[1]; - return ForEachAllElements(uarray, callbackfn, thisArg); + return ForEachAllElements(attachedArrayAndLength, callbackfn, thisArg); } label NotCallable deferred { ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameForEach); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameForEach); } } diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq index 0261599106d364..b231b1ff98398f 100644 --- a/deps/v8/src/builtins/typed-array-reduce.tq +++ b/deps/v8/src/builtins/typed-array-reduce.tq @@ -8,19 +8,19 @@ namespace typed_array { const kBuiltinNameReduce: constexpr string = '%TypedArray%.prototype.reduce'; transitioning macro ReduceAllElements(implicit context: Context)( - array: typed_array::AttachedJSTypedArray, callbackfn: Callable, - initialValue: JSAny|TheHole): JSAny { - let witness = typed_array::NewAttachedJSTypedArrayWitness(array); - const length: uintptr = witness.Get().length; + attachedArrayAndLength: typed_array::AttachedJSTypedArrayAndLength, + callbackfn: Callable, initialValue: JSAny|TheHole): JSAny { + let witness = + typed_array::NewAttachedJSTypedArrayWitness(attachedArrayAndLength.array); let accumulator = initialValue; - for (let k: uintptr = 0; k < length; k++) { + for (let k: uintptr = 0; k < attachedArrayAndLength.length; k++) { let value: JSAny; try { - witness.Recheck() - otherwise goto IsDetached; + witness.RecheckIndex(k) + otherwise goto IsDetachedOrOutOfBounds; value = witness.Load(k); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { value = Undefined; } typeswitch (accumulator) { @@ -53,18 +53,22 @@ TypedArrayPrototypeReduce( // arguments[0] = callback // arguments[1] = initialValue. try { + // 1. Let O be the this value. + // 2. Perform ? ValidateTypedArray(O). + // 3. Let len be IntegerIndexedObjectLength(O). const array: JSTypedArray = Cast(receiver) otherwise NotTypedArray; - const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; - + const attachedArrayAndLength = EnsureAttachedAndReadLength(array) + otherwise IsDetachedOrOutOfBounds; + // 4. If IsCallable(callbackfn) is false, throw a TypeError exception. const callbackfn = Cast(arguments[0]) otherwise NotCallable; const initialValue = arguments.length >= 2 ? arguments[1] : TheHole; - return ReduceAllElements(uarray, callbackfn, initialValue); + return ReduceAllElements(attachedArrayAndLength, callbackfn, initialValue); } label NotCallable deferred { ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameReduce); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameReduce); } } diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq index 5449c4f1fcfcba..36f14a1b68ef38 100644 --- a/deps/v8/src/builtins/typed-array-reduceright.tq +++ b/deps/v8/src/builtins/typed-array-reduceright.tq @@ -10,18 +10,18 @@ const kBuiltinNameReduceRight: constexpr string = // https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright transitioning macro ReduceRightAllElements(implicit context: Context)( - array: typed_array::AttachedJSTypedArray, callbackfn: Callable, - initialValue: JSAny|TheHole): JSAny { - let witness = typed_array::NewAttachedJSTypedArrayWitness(array); - const length: uintptr = witness.Get().length; + attachedArrayAndLength: typed_array::AttachedJSTypedArrayAndLength, + callbackfn: Callable, initialValue: JSAny|TheHole): JSAny { + let witness = + typed_array::NewAttachedJSTypedArrayWitness(attachedArrayAndLength.array); let accumulator = initialValue; - for (let k: uintptr = length; k-- > 0;) { + for (let k: uintptr = attachedArrayAndLength.length; k-- > 0;) { let value: JSAny; try { - witness.Recheck() - otherwise goto IsDetached; + witness.RecheckIndex(k) + otherwise goto IsDetachedOrOutOfBounds; value = witness.Load(k); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { value = Undefined; } typeswitch (accumulator) { @@ -55,19 +55,24 @@ TypedArrayPrototypeReduceRight( // arguments[0] = callback // arguments[1] = initialValue. try { + // 1. Let O be the this value. + // 2. Perform ? ValidateTypedArray(O). + // 3. Let len be IntegerIndexedObjectLength(O). const array: JSTypedArray = Cast(receiver) otherwise NotTypedArray; - const uarray = typed_array::EnsureAttached(array) otherwise IsDetached; - + const attachedArrayAndLength = EnsureAttachedAndReadLength(array) + otherwise IsDetachedOrOutOfBounds; + // 4. If IsCallable(callbackfn) is false, throw a TypeError exception. const callbackfn = Cast(arguments[0]) otherwise NotCallable; const initialValue = arguments.length >= 2 ? arguments[1] : TheHole; - return ReduceRightAllElements(uarray, callbackfn, initialValue); + return ReduceRightAllElements( + attachedArrayAndLength, callbackfn, initialValue); } label NotCallable deferred { ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameReduceRight); - } label IsDetached deferred { + } label IsDetachedOrOutOfBounds deferred { ThrowTypeError( MessageTemplate::kDetachedOperation, kBuiltinNameReduceRight); } diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq index d9f37937b436d2..bb2d951ec7b76d 100644 --- a/deps/v8/src/builtins/typed-array-some.tq +++ b/deps/v8/src/builtins/typed-array-some.tq @@ -9,13 +9,14 @@ const kBuiltinNameSome: constexpr string = '%TypedArray%.prototype.some'; // https://tc39.es/ecma262/#sec-%typedarray%.prototype.some transitioning macro SomeAllElements(implicit context: Context)( - array: typed_array::AttachedJSTypedArray, length: uintptr, + attachedArrayAndLength: typed_array::AttachedJSTypedArrayAndLength, callbackfn: Callable, thisArg: JSAny): Boolean { - let witness = typed_array::NewAttachedJSTypedArrayWitness(array); + let witness = + typed_array::NewAttachedJSTypedArrayWitness(attachedArrayAndLength.array); // 5. Let k be 0. // 6. Repeat, while k < len - for (let k: uintptr = 0; k < length; k++) { + for (let k: uintptr = 0; k < attachedArrayAndLength.length; k++) { // 6a. Let Pk be ! ToString(𝔽(k)). // There is no need to cast ToString to load elements. @@ -61,13 +62,12 @@ TypedArrayPrototypeSome( // 3. Let len be IntegerIndexedObjectLength(O). const array: JSTypedArray = Cast(receiver) otherwise NotTypedArray; - const length = LoadJSTypedArrayLengthAndCheckDetached(array) + const attachedArrayAndLength = EnsureAttachedAndReadLength(array) otherwise IsDetachedOrOutOfBounds; // 4. If IsCallable(callbackfn) is false, throw a TypeError exception. const callbackfn = Cast(arguments[0]) otherwise NotCallable; const thisArg = arguments[1]; - return SomeAllElements( - %RawDownCast(array), length, callbackfn, thisArg); + return SomeAllElements(attachedArrayAndLength, callbackfn, thisArg); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameSome); } label IsDetachedOrOutOfBounds deferred { diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq index c64573cb3be925..5ddb1072ae1fab 100644 --- a/deps/v8/src/builtins/typed-array.tq +++ b/deps/v8/src/builtins/typed-array.tq @@ -195,21 +195,27 @@ macro EnsureAttached(array: JSTypedArray): AttachedJSTypedArray } } -struct AttachedJSTypedArrayWitness { - macro Get(): AttachedJSTypedArray { - return this.unstable; - } +struct AttachedJSTypedArrayAndLength { + array: AttachedJSTypedArray; + length: uintptr; +} + +macro EnsureAttachedAndReadLength(array: JSTypedArray): + AttachedJSTypedArrayAndLength + labels DetachedOrOutOfBounds { + const length = LoadJSTypedArrayLengthAndCheckDetached(array) + otherwise DetachedOrOutOfBounds; + return AttachedJSTypedArrayAndLength{ + array: %RawDownCast(array), + length: length + }; +} +struct AttachedJSTypedArrayWitness { macro GetStable(): JSTypedArray { return this.stable; } - // TODO(v8:11111): Migrate users to use RecheckIndex. - macro Recheck(): void labels Detached { - if (IsDetachedBuffer(this.stable.buffer)) goto Detached; - this.unstable = %RawDownCast(this.stable); - } - macro RecheckIndex(index: uintptr): void labels DetachedOrOutOfBounds { const length = LoadJSTypedArrayLengthAndCheckDetached(this.stable) otherwise DetachedOrOutOfBounds; diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index e4aea1446d615c..aadb17c3a04663 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -254,6 +254,14 @@ builtin WasmRethrow(exception: Object): JSAny { tail runtime::WasmReThrow(LoadContextFromFrame(), exception); } +// We need this for frames that do not have the instance in the parameters. +// Currently, this is CapiCallWrapper frames. +builtin WasmRethrowExplicitContext( + exception: Object, explicitContext: Context): JSAny { + if (exception == Null) tail ThrowWasmTrapRethrowNull(); + tail runtime::WasmReThrow(explicitContext, exception); +} + builtin WasmTriggerTierUp(): JSAny { const instance: WasmInstanceObject = LoadInstanceFromFrame(); tail runtime::WasmTriggerTierUp(LoadContextFromFrame(), instance); @@ -284,11 +292,6 @@ builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray { return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size); } -builtin WasmAllocatePair(first: Object, second: Object): Tuple2 { - const tuple2Map: Map = %GetClassMapConstant(); - return new Tuple2{map: tuple2Map, value1: first, value2: second}; -} - builtin WasmAllocateRtt(typeIndex: intptr, parent: Map): Map { tail runtime::WasmAllocateRtt( LoadContextFromFrame(), SmiTag(typeIndex), parent, @@ -370,6 +373,15 @@ builtin WasmArrayCopyWithChecks( SmiFromUint32(srcIndex), SmiFromUint32(length)); } +builtin WasmArrayCopy( + dstIndex: uint32, srcIndex: uint32, length: uint32, dstArray: WasmArray, + srcArray: WasmArray): JSAny { + if (length == 0) return Undefined; + tail runtime::WasmArrayCopy( + LoadContextFromFrame(), dstArray, SmiFromUint32(dstIndex), srcArray, + SmiFromUint32(srcIndex), SmiFromUint32(length)); +} + // Redeclaration with different typing (value is an Object, not JSAny). extern transitioning runtime CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, Object): void; @@ -453,6 +465,107 @@ builtin WasmI64AtomicWait64( } } +// Type feedback collection support for `call_ref`. + +extern macro GetCodeEntry(Code): RawPtr; +extern macro GetCodeEntry(CodeDataContainer): RawPtr; + +struct TargetAndInstance { + target: RawPtr; + instance: HeapObject; // WasmInstanceObject or WasmApiFunctionRef +} + +macro GetTargetAndInstance(funcref: JSFunction): TargetAndInstance { + const sfi = funcref.shared_function_info; + dcheck(Is(sfi.function_data)); + const funcData = UnsafeCast(sfi.function_data); + const ref = funcData.ref; + let target = funcData.foreign_address_ptr; + if (Signed(target) == IntPtrConstant(0)) { + const wrapper = + UnsafeCast(funcData).wasm_to_js_wrapper_code; + target = GetCodeEntry(wrapper); + } + return TargetAndInstance{target: target, instance: ref}; +} + +// Vector format: +// Two slots per call_ref instruction. These slots' values can be: +// - uninitialized: (undefined, ). Note: we use {undefined} as the +// sentinel as an optimization, as it's the default value for FixedArrays. +// - monomorphic: (funcref, call_ref_data) +// - polymorphic: (fixed_array, ). In this case, the array +// contains 2..4 pairs (funcref, call_ref_data) (like monomorphic data). +// - megamorphic: ("megamorphic" sentinel, ) + +builtin CallRefIC( + vector: FixedArray, index: intptr, funcref: JSFunction): TargetAndInstance { + const value = vector.objects[index]; + if (value == funcref) { + // Monomorphic hit. Check for this case first to maximize its performance. + const data = UnsafeCast(vector.objects[index + 1]); + data.count = data.count + 1; + return TargetAndInstance{target: data.target, instance: data.instance}; + } + // Check for polymorphic hit; its performance is second-most-important. + if (Is(value)) { + const entries = UnsafeCast(value); + for (let i: intptr = 0; i < entries.length_intptr; i += 2) { + if (entries.objects[i] == funcref) { + // Polymorphic hit. + const data = UnsafeCast(entries.objects[i + 1]); + data.count = data.count + 1; + return TargetAndInstance{target: data.target, instance: data.instance}; + } + } + } + // All other cases are some sort of miss and must compute the target/ + // instance. They all fall through to returning the computed data. + const result = GetTargetAndInstance(funcref); + if (TaggedEqual(value, Undefined)) { + const data = new + CallRefData{instance: result.instance, target: result.target, count: 1}; + vector.objects[index] = funcref; + vector.objects[index + 1] = data; + } else if (Is(value)) { + // Polymorphic miss. + const entries = UnsafeCast(value); + if (entries.length == SmiConstant(8)) { // 4 entries, 2 slots each. + vector.objects[index] = ic::kMegamorphicSymbol; + vector.objects[index + 1] = ic::kMegamorphicSymbol; + } else { + const data = new + CallRefData{instance: result.instance, target: result.target, count: 1}; + const newEntries = UnsafeCast(AllocateFixedArray( + ElementsKind::PACKED_ELEMENTS, entries.length_intptr + 2, + AllocationFlag::kNone)); + for (let i: intptr = 0; i < entries.length_intptr; i++) { + newEntries.objects[i] = entries.objects[i]; + } + const newIndex = entries.length_intptr; + newEntries.objects[newIndex] = funcref; + newEntries.objects[newIndex + 1] = data; + vector.objects[index] = newEntries; + } + } else if (Is(value)) { + // Monomorphic miss. + const data = new + CallRefData{instance: result.instance, target: result.target, count: 1}; + const newEntries = UnsafeCast(AllocateFixedArray( + ElementsKind::PACKED_ELEMENTS, 4, AllocationFlag::kNone)); + newEntries.objects[0] = value; + newEntries.objects[1] = vector.objects[index + 1]; + newEntries.objects[2] = funcref; + newEntries.objects[3] = data; + vector.objects[index] = newEntries; + // Clear the old pointer to the first entry's data object; the specific + // value we write doesn't matter. + vector.objects[index + 1] = Undefined; + } + // The "ic::IsMegamorphic(value)" case doesn't need to do anything. + return result; +} + extern macro TryHasOwnProperty(HeapObject, Map, InstanceType, Name): never labels Found, NotFound, Bailout; type OnNonExistent constexpr 'OnNonExistent'; diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index f2f36243611055..7beedbc3fd702a 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -2377,8 +2377,12 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, StackArgumentsAccessor args(rax); __ AssertFunction(rdi); + Label class_constructor; __ LoadTaggedPointerField( rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset), + Immediate(SharedFunctionInfo::IsClassConstructorBit::kMask)); + __ j(not_zero, &class_constructor); // ----------- S t a t e ------------- // -- rax : the number of arguments // -- rdx : the shared function info. @@ -2463,6 +2467,14 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ movzxwq( rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump); + + // The function is a "classConstructor", need to raise an exception. + __ bind(&class_constructor); + { + FrameScope frame(masm, StackFrame::INTERNAL); + __ Push(rdi); + __ CallRuntime(Runtime::kThrowConstructorNonCallableError); + } } namespace { @@ -3640,6 +3652,213 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ jmp(&compile_wrapper_done); } +namespace { +// Helper function for WasmReturnPromiseOnSuspend. +void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf) { + __ movq(rsp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset)); + // The stack limit is set separately under the ExecutionAccess lock. + // TODO(thibaudm): Reload live registers. +} +} // namespace + +void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { + // Set up the stackframe. + __ EnterFrame(StackFrame::JS_TO_WASM); + + // Parameters. + Register closure = kJSFunctionRegister; // rdi + Register param_count = kJavaScriptCallArgCountRegister; // rax + if (kJSArgcIncludesReceiver) { + __ decq(param_count); + } + + constexpr int kFrameMarkerOffset = -kSystemPointerSize; + constexpr int kParamCountOffset = kFrameMarkerOffset - kSystemPointerSize; + // The frame marker is not included in the slot count. + constexpr int kNumSpillSlots = + -(kParamCountOffset - kFrameMarkerOffset) / kSystemPointerSize; + __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize)); + + __ movq(MemOperand(rbp, kParamCountOffset), param_count); + + // ------------------------------------------- + // Get the instance and wasm call target. + // ------------------------------------------- + Register sfi = closure; + __ LoadAnyTaggedField( + sfi, + MemOperand( + closure, + wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction())); + Register function_data = sfi; + __ LoadAnyTaggedField( + function_data, + FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset)); + Register wasm_instance = kWasmInstanceRegister; // rsi + __ LoadAnyTaggedField( + wasm_instance, + FieldOperand(function_data, WasmExportedFunctionData::kInstanceOffset)); + sfi = no_reg; + closure = no_reg; + // live: [rsi, rdi] + + // ------------------------------------------- + // Save current state in active jmpbuf. + // ------------------------------------------- + Register active_continuation = rax; + Register foreign_jmpbuf = rbx; + __ LoadAnyTaggedField( + active_continuation, + FieldOperand(wasm_instance, + WasmInstanceObject::kActiveContinuationOffset)); + __ LoadAnyTaggedField( + foreign_jmpbuf, + FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset)); + Register jmpbuf = rbx; + __ LoadExternalPointerField( + jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset), + kForeignForeignAddressTag, r8); + __ movq(MemOperand(jmpbuf, wasm::kJmpBufSpOffset), rsp); + Register stack_limit_address = rcx; + __ movq(stack_limit_address, + FieldOperand(wasm_instance, + WasmInstanceObject::kRealStackLimitAddressOffset)); + Register stack_limit = rdx; + __ movq(stack_limit, MemOperand(stack_limit_address, 0)); + __ movq(MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset), stack_limit); + // TODO(thibaudm): Save live registers. + foreign_jmpbuf = no_reg; + stack_limit = no_reg; + stack_limit_address = no_reg; + // live: [rsi, rdi, rax] + + // ------------------------------------------- + // Allocate a new continuation. + // ------------------------------------------- + __ Push(wasm_instance); + __ Push(function_data); + __ Push(wasm_instance); + __ Move(kContextRegister, Smi::zero()); + // TODO(thibaudm): Handle GC. + __ CallRuntime(Runtime::kWasmAllocateContinuation); + __ Pop(function_data); + __ Pop(wasm_instance); + STATIC_ASSERT(kReturnRegister0 == rax); + Register target_continuation = rax; + // live: [rsi, rdi, rax] + + // ------------------------------------------- + // Load target continuation jmpbuf. + // ------------------------------------------- + foreign_jmpbuf = rbx; + __ LoadAnyTaggedField( + foreign_jmpbuf, + FieldOperand(target_continuation, WasmContinuationObject::kJmpbufOffset)); + Register target_jmpbuf = rbx; + __ LoadExternalPointerField( + target_jmpbuf, + FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset), + kForeignForeignAddressTag, r8); + // Switch stack! + LoadJumpBuffer(masm, target_jmpbuf); + __ movq(rbp, rsp); // New stack, there is no frame yet. + foreign_jmpbuf = no_reg; + target_jmpbuf = no_reg; + // live: [rsi, rdi] + + // ------------------------------------------- + // Load and call target wasm function. + // ------------------------------------------- + // TODO(thibaudm): Handle arguments. + // TODO(thibaudm): Handle GC. + // Set thread_in_wasm_flag. + Register thread_in_wasm_flag_addr = rax; + __ movq( + thread_in_wasm_flag_addr, + MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); + __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1)); + Register function_entry = function_data; + __ LoadExternalPointerField( + function_entry, + FieldOperand(function_data, + WasmExportedFunctionData::kForeignAddressOffset), + kForeignForeignAddressTag, r8); + __ Push(wasm_instance); + __ call(function_entry); + __ Pop(wasm_instance); + // Unset thread_in_wasm_flag. + __ movq( + thread_in_wasm_flag_addr, + MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); + __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(0)); + thread_in_wasm_flag_addr = no_reg; + function_entry = no_reg; + function_data = no_reg; + // live: [rsi] + + // ------------------------------------------- + // Reload parent continuation. + // ------------------------------------------- + active_continuation = rbx; + __ LoadAnyTaggedField( + active_continuation, + FieldOperand(wasm_instance, + WasmInstanceObject::kActiveContinuationOffset)); + Register parent = rdx; + __ LoadAnyTaggedField( + parent, + FieldOperand(active_continuation, WasmContinuationObject::kParentOffset)); + active_continuation = no_reg; + // live: [rsi] + + // ------------------------------------------- + // Update instance active continuation. + // ------------------------------------------- + Register object = WriteBarrierDescriptor::ObjectRegister(); + Register slot_address = WriteBarrierDescriptor::SlotAddressRegister(); + DCHECK_EQ(object, rdi); + DCHECK((slot_address == rbx || slot_address == r8)); + // Save reg clobbered by the write barrier. + __ movq(rax, parent); + __ movq(object, wasm_instance); + __ StoreTaggedField( + FieldOperand(object, WasmInstanceObject::kActiveContinuationOffset), + parent); + __ RecordWriteField(object, WasmInstanceObject::kActiveContinuationOffset, + parent, slot_address, SaveFPRegsMode::kIgnore); + // Restore reg clobbered by the write barrier. + __ movq(parent, rax); + foreign_jmpbuf = rax; + __ LoadAnyTaggedField( + foreign_jmpbuf, + FieldOperand(parent, WasmContinuationObject::kJmpbufOffset)); + jmpbuf = foreign_jmpbuf; + __ LoadExternalPointerField( + jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset), + kForeignForeignAddressTag, r8); + // Switch stack! + LoadJumpBuffer(masm, jmpbuf); + __ leaq(rbp, Operand(rsp, (kNumSpillSlots + 1) * kSystemPointerSize)); + __ Push(wasm_instance); // Spill. + __ Push(wasm_instance); // First arg. + __ Move(kContextRegister, Smi::zero()); + __ CallRuntime(Runtime::kWasmSyncStackLimit); + __ Pop(wasm_instance); + parent = no_reg; + active_continuation = no_reg; + foreign_jmpbuf = no_reg; + wasm_instance = no_reg; + + // ------------------------------------------- + // Epilogue. + // ------------------------------------------- + __ movq(param_count, MemOperand(rbp, kParamCountOffset)); + __ LeaveFrame(StackFrame::JS_TO_WASM); + __ DropArguments(param_count, r8, TurboAssembler::kCountIsInteger, + TurboAssembler::kCountExcludesReceiver); + __ ret(0); +} + void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { MemOperand OSRTargetSlot(rbp, -wasm::kOSRTargetOffset); __ movq(kScratchRegister, OSRTargetSlot); diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS index 6644faa7fb230a..a3c3ffdba6b36f 100644 --- a/deps/v8/src/codegen/OWNERS +++ b/deps/v8/src/codegen/OWNERS @@ -8,6 +8,5 @@ jkummerow@chromium.org leszeks@chromium.org mslekova@chromium.org mvstanton@chromium.org -neis@chromium.org nicohartmann@chromium.org zhin@chromium.org diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h index 2c0e69a753bfdc..0ee81b2f945de9 100644 --- a/deps/v8/src/codegen/arm/assembler-arm-inl.h +++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h @@ -91,7 +91,7 @@ Address RelocInfo::constant_pool_entry_address() { int RelocInfo::target_address_size() { return kPointerSize; } -HeapObject RelocInfo::target_object() { +HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) || IsDataEmbeddedObject(rmode_)); if (IsDataEmbeddedObject(rmode_)) { @@ -101,10 +101,6 @@ HeapObject RelocInfo::target_object() { Object(Assembler::target_address_at(pc_, constant_pool_))); } -HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) { - return target_object(); -} - Handle RelocInfo::target_object_handle(Assembler* origin) { if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) { return Handle(reinterpret_cast( diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index b49d9ed186d821..38d691007f3ff5 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -5172,29 +5172,9 @@ void Assembler::RecordConstPool(int size) { RecordRelocInfo(RelocInfo::CONST_POOL, static_cast(size)); } -void Assembler::FixOnHeapReferences(bool update_embedded_objects) { - if (!update_embedded_objects) return; - Address base = reinterpret_cast
(buffer_->start()); - for (auto p : saved_handles_for_raw_object_ptr_) { - Handle object(reinterpret_cast(p.second)); - WriteUnalignedValue(base + p.first, *object); - } -} - -void Assembler::FixOnHeapReferencesToHandles() { - Address base = reinterpret_cast
(buffer_->start()); - for (auto p : saved_handles_for_raw_object_ptr_) { - WriteUnalignedValue(base + p.first, p.second); - } - saved_handles_for_raw_object_ptr_.clear(); -} - void Assembler::GrowBuffer() { DCHECK_EQ(buffer_start_, buffer_->start()); - bool previously_on_heap = buffer_->IsOnHeap(); - int previous_on_heap_gc_count = OnHeapGCCount(); - // Compute new buffer size. int old_size = buffer_->size(); int new_size = std::min(2 * old_size, old_size + 1 * MB); @@ -5227,15 +5207,6 @@ void Assembler::GrowBuffer() { reinterpret_cast
(reloc_info_writer.last_pc()) + pc_delta); reloc_info_writer.Reposition(new_reloc_start, new_last_pc); - // Fix on-heap references. - if (previously_on_heap) { - if (buffer_->IsOnHeap()) { - FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount()); - } else { - FixOnHeapReferencesToHandles(); - } - } - // None of our relocation types are pc relative pointing outside the code // buffer nor pc absolute pointing inside the code buffer, so there is no need // to relocate any emitted relocation entries. @@ -5470,15 +5441,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { instr_at_put(entry.position(), SetLdrRegisterImmediateOffset(instr, delta)); if (!entry.is_merged()) { - if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(entry.rmode())) { - int offset = pc_offset(); - saved_handles_for_raw_object_ptr_.emplace_back(offset, entry.value()); - Handle object(reinterpret_cast(entry.value())); - emit(object->ptr()); - DCHECK(EmbeddedObjectMatches(offset, object)); - } else { - emit(entry.value()); - } + emit(entry.value()); } } diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h index a34b9e1b6626cc..a7d224a09457ed 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.h +++ b/deps/v8/src/codegen/arm/assembler-arm.h @@ -328,15 +328,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); } - // This function is called when on-heap-compilation invariants are - // invalidated. For instance, when the assembler buffer grows or a GC happens - // between Code object allocation and Code object finalization. - void FixOnHeapReferences(bool update_embedded_objects = true); - - // This function is called when we fallback from on-heap to off-heap - // compilation and patch on-heap references to handles. - void FixOnHeapReferencesToHandles(); - // Label operations & relative jumps (PPUM Appendix D) // // Takes a branch opcode (cc) and a label (L) and generates @@ -1197,13 +1188,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } } -#ifdef DEBUG - bool EmbeddedObjectMatches(int pc_offset, Handle object) { - return *reinterpret_cast(buffer_->start() + pc_offset) == - (IsOnHeap() ? object->ptr() : object.address()); - } -#endif - // Move a 32-bit immediate into a register, potentially via the constant pool. void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al); diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h index 83d82fe3cea089..14960a3193bcff 100644 --- a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h +++ b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h @@ -117,7 +117,7 @@ constexpr auto CallTrampolineDescriptor::registers() { // static constexpr auto CallVarargsDescriptor::registers() { - // r0 : number of arguments (on the stack, not including receiver) + // r0 : number of arguments (on the stack) // r1 : the target to call // r4 : arguments list length (untagged) // r2 : arguments list (FixedArray) @@ -135,13 +135,13 @@ constexpr auto CallForwardVarargsDescriptor::registers() { // static constexpr auto CallFunctionTemplateDescriptor::registers() { // r1 : function template info - // r2 : number of arguments (on the stack, not including receiver) + // r2 : number of arguments (on the stack) return RegisterArray(r1, r2); } // static constexpr auto CallWithSpreadDescriptor::registers() { - // r0 : number of arguments (on the stack, not including receiver) + // r0 : number of arguments (on the stack) // r1 : the target to call // r2 : the object to spread return RegisterArray(r1, r0, r2); @@ -156,7 +156,7 @@ constexpr auto CallWithArrayLikeDescriptor::registers() { // static constexpr auto ConstructVarargsDescriptor::registers() { - // r0 : number of arguments (on the stack, not including receiver) + // r0 : number of arguments (on the stack) // r1 : the target to call // r3 : the new target // r4 : arguments list length (untagged) @@ -175,7 +175,7 @@ constexpr auto ConstructForwardVarargsDescriptor::registers() { // static constexpr auto ConstructWithSpreadDescriptor::registers() { - // r0 : number of arguments (on the stack, not including receiver) + // r0 : number of arguments (on the stack) // r1 : the target to call // r3 : the new target // r2 : the object to spread @@ -241,7 +241,7 @@ constexpr auto InterpreterDispatchDescriptor::registers() { // static constexpr auto InterpreterPushArgsThenCallDescriptor::registers() { - return RegisterArray(r0, // argument count (not including receiver) + return RegisterArray(r0, // argument count r2, // address of first argument r1); // the target callable to be call } @@ -249,7 +249,7 @@ constexpr auto InterpreterPushArgsThenCallDescriptor::registers() { // static constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() { return RegisterArray( - r0, // argument count (not including receiver) + r0, // argument count r4, // address of the first argument r1, // constructor to call r3, // new target diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index aebfaab9320b8f..5c46c64b3eebee 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -2218,6 +2218,20 @@ void MacroAssembler::AssertFunction(Register object) { Check(ls, AbortReason::kOperandIsNotAFunction); } +void MacroAssembler::AssertCallableFunction(Register object) { + if (!FLAG_debug_code) return; + ASM_CODE_COMMENT(this); + STATIC_ASSERT(kSmiTag == 0); + tst(object, Operand(kSmiTagMask)); + Check(ne, AbortReason::kOperandIsASmiAndNotAFunction); + push(object); + LoadMap(object, object); + CompareInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, + LAST_CALLABLE_JS_FUNCTION_TYPE); + pop(object); + Check(ls, AbortReason::kOperandIsNotACallableFunction); +} + void MacroAssembler::AssertBoundFunction(Register object) { if (!FLAG_debug_code) return; ASM_CODE_COMMENT(this); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index 3dc3e208f59d23..73efa120028f2c 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -851,6 +851,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Abort execution if argument is not a JSFunction, enabled via --debug-code. void AssertFunction(Register object); + // Abort execution if argument is not a callable JSFunction, enabled via + // --debug-code. + void AssertCallableFunction(Register object); + // Abort execution if argument is not a JSBoundFunction, // enabled via --debug-code. void AssertBoundFunction(Register object); diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h index 41d07b10b18983..c5a1d4fd8ad0aa 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h @@ -655,31 +655,25 @@ Address RelocInfo::constant_pool_entry_address() { return Assembler::target_pointer_address_at(pc_); } -HeapObject RelocInfo::target_object() { +HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); if (IsDataEmbeddedObject(rmode_)) { return HeapObject::cast(Object(ReadUnalignedValue
(pc_))); } else if (IsCompressedEmbeddedObject(rmode_)) { - CHECK(!host_.is_null()); - return HeapObject::cast(Object(DecompressTaggedAny( - host_.address(), - Assembler::target_compressed_address_at(pc_, constant_pool_)))); + Tagged_t compressed = + Assembler::target_compressed_address_at(pc_, constant_pool_); + DCHECK(!HAS_SMI_TAG(compressed)); + Object obj(DecompressTaggedPointer(cage_base, compressed)); + // Embedding of compressed Code objects must not happen when external code + // space is enabled, because CodeDataContainers must be used instead. + DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !obj.IsCode(cage_base)); + return HeapObject::cast(obj); } else { return HeapObject::cast( Object(Assembler::target_address_at(pc_, constant_pool_))); } } -HeapObject RelocInfo::target_object_no_host(PtrComprCageBase cage_base) { - if (IsCompressedEmbeddedObject(rmode_)) { - return HeapObject::cast(Object(DecompressTaggedAny( - cage_base, - Assembler::target_compressed_address_at(pc_, constant_pool_)))); - } else { - return target_object(); - } -} - Handle RelocInfo::target_object_handle(Assembler* origin) { if (IsDataEmbeddedObject(rmode_)) { return Handle::cast(ReadUnalignedValue>(pc_)); diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index f6a035a9e7737b..627c7ae0213351 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -2627,7 +2627,7 @@ void Assembler::fmov(const VRegister& vd, float imm) { DCHECK(vd.Is1S()); Emit(FMOV_s_imm | Rd(vd) | ImmFP(imm)); } else { - DCHECK(vd.Is2S() | vd.Is4S()); + DCHECK(vd.Is2S() || vd.Is4S()); Instr op = NEONModifiedImmediate_MOVI; Instr q = vd.Is4S() ? NEON_Q : 0; Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd)); @@ -4275,42 +4275,7 @@ bool Assembler::IsImmFP64(double imm) { return true; } -void Assembler::FixOnHeapReferences(bool update_embedded_objects) { - Address base = reinterpret_cast
(buffer_->start()); - if (update_embedded_objects) { - for (auto p : saved_handles_for_raw_object_ptr_) { - Handle object = GetEmbeddedObject(p.second); - WriteUnalignedValue(base + p.first, object->ptr()); - } - } - for (auto p : saved_offsets_for_runtime_entries_) { - Instruction* instr = reinterpret_cast(base + p.first); - Address target = p.second * kInstrSize + options().code_range_start; - DCHECK(is_int26(p.second)); - DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); - instr->SetBranchImmTarget(reinterpret_cast(target)); - } -} - -void Assembler::FixOnHeapReferencesToHandles() { - Address base = reinterpret_cast
(buffer_->start()); - for (auto p : saved_handles_for_raw_object_ptr_) { - WriteUnalignedValue(base + p.first, p.second); - } - saved_handles_for_raw_object_ptr_.clear(); - for (auto p : saved_offsets_for_runtime_entries_) { - Instruction* instr = reinterpret_cast(base + p.first); - DCHECK(is_int26(p.second)); - DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); - instr->SetInstructionBits(instr->Mask(UnconditionalBranchMask) | p.second); - } - saved_offsets_for_runtime_entries_.clear(); -} - void Assembler::GrowBuffer() { - bool previously_on_heap = buffer_->IsOnHeap(); - int previous_on_heap_gc_count = OnHeapGCCount(); - // Compute new buffer size. int old_size = buffer_->size(); int new_size = std::min(2 * old_size, old_size + 1 * MB); @@ -4353,15 +4318,6 @@ void Assembler::GrowBuffer() { WriteUnalignedValue(address, internal_ref); } - // Fix on-heap references. - if (previously_on_heap) { - if (buffer_->IsOnHeap()) { - FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount()); - } else { - FixOnHeapReferencesToHandles(); - } - } - // Pending relocation entries are also relative, no need to relocate. } diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h index 8cdca7bfa83ef7..dac90f8058c9dd 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64.h @@ -204,15 +204,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); } - // This function is called when on-heap-compilation invariants are - // invalidated. For instance, when the assembler buffer grows or a GC happens - // between Code object allocation and Code object finalization. - void FixOnHeapReferences(bool update_embedded_objects = true); - - // This function is called when we fallback from on-heap to off-heap - // compilation and patch on-heap references to handles. - void FixOnHeapReferencesToHandles(); - // Insert the smallest number of nop instructions // possible to align the pc offset to a multiple // of m. m must be a power of 2 (>= 4). @@ -2689,12 +2680,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static size_t GetApproxMaxDistToConstPoolForTesting() { return ConstantPool::kApproxDistToPool64; } - - bool EmbeddedObjectMatches(int pc_offset, Handle object, - EmbeddedObjectIndex index) { - return *reinterpret_cast(buffer_->start() + pc_offset) == - (IsOnHeap() ? object->ptr() : index); - } #endif class FarBranchInfo { diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h index e8fe4ef1d327b5..f1fa16673c1464 100644 --- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h @@ -117,7 +117,7 @@ constexpr auto CallTrampolineDescriptor::registers() { // static constexpr auto CallVarargsDescriptor::registers() { - // x0 : number of arguments (on the stack, not including receiver) + // x0 : number of arguments (on the stack) // x1 : the target to call // x4 : arguments list length (untagged) // x2 : arguments list (FixedArray) @@ -135,13 +135,13 @@ constexpr auto CallForwardVarargsDescriptor::registers() { // static constexpr auto CallFunctionTemplateDescriptor::registers() { // x1 : function template info - // x2 : number of arguments (on the stack, not including receiver) + // x2 : number of arguments (on the stack) return RegisterArray(x1, x2); } // static constexpr auto CallWithSpreadDescriptor::registers() { - // x0 : number of arguments (on the stack, not including receiver) + // x0 : number of arguments (on the stack) // x1 : the target to call // x2 : the object to spread return RegisterArray(x1, x0, x2); @@ -156,7 +156,7 @@ constexpr auto CallWithArrayLikeDescriptor::registers() { // static constexpr auto ConstructVarargsDescriptor::registers() { - // x0 : number of arguments (on the stack, not including receiver) + // x0 : number of arguments (on the stack) // x1 : the target to call // x3 : the new target // x4 : arguments list length (untagged) @@ -175,7 +175,7 @@ constexpr auto ConstructForwardVarargsDescriptor::registers() { // static constexpr auto ConstructWithSpreadDescriptor::registers() { - // x0 : number of arguments (on the stack, not including receiver) + // x0 : number of arguments (on the stack) // x1 : the target to call // x3 : the new target // x2 : the object to spread @@ -249,7 +249,7 @@ constexpr auto InterpreterDispatchDescriptor::registers() { // static constexpr auto InterpreterPushArgsThenCallDescriptor::registers() { - return RegisterArray(x0, // argument count (not including receiver) + return RegisterArray(x0, // argument count x2, // address of first argument x1); // the target callable to be call } @@ -257,7 +257,7 @@ constexpr auto InterpreterPushArgsThenCallDescriptor::registers() { // static constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() { return RegisterArray( - x0, // argument count (not including receiver) + x0, // argument count x4, // address of the first argument x1, // constructor to call x3, // new target diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index 91d972ea0004a1..bcf2e4574ab030 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -1551,6 +1551,19 @@ void MacroAssembler::AssertFunction(Register object) { Check(ls, AbortReason::kOperandIsNotAFunction); } +void MacroAssembler::AssertCallableFunction(Register object) { + if (!FLAG_debug_code) return; + ASM_CODE_COMMENT(this); + AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction); + + UseScratchRegisterScope temps(this); + Register temp = temps.AcquireX(); + LoadMap(temp, object); + CompareInstanceTypeRange(temp, temp, FIRST_CALLABLE_JS_FUNCTION_TYPE, + LAST_CALLABLE_JS_FUNCTION_TYPE); + Check(ls, AbortReason::kOperandIsNotACallableFunction); +} + void MacroAssembler::AssertBoundFunction(Register object) { if (!FLAG_debug_code) return; ASM_CODE_COMMENT(this); @@ -1843,10 +1856,6 @@ int64_t TurboAssembler::CalculateTargetOffset(Address target, void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { int64_t offset = CalculateTargetOffset(target, rmode, pc_); - if (RelocInfo::IsRuntimeEntry(rmode) && IsOnHeap()) { - saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset); - offset = CalculateTargetOffset(target, RelocInfo::NONE, pc_); - } JumpHelper(offset, rmode, cond); } @@ -1891,10 +1900,6 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) { BlockPoolsScope scope(this); if (CanUseNearCallOrJump(rmode)) { int64_t offset = CalculateTargetOffset(target, rmode, pc_); - if (IsOnHeap() && RelocInfo::IsRuntimeEntry(rmode)) { - saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset); - offset = CalculateTargetOffset(target, RelocInfo::NONE, pc_); - } DCHECK(IsNearCallOffset(offset)); near_call(static_cast(offset), rmode); } else { @@ -2099,9 +2104,13 @@ void TurboAssembler::LoadCodeDataContainerEntry( void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin( Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); - LoadTaggedPointerField(destination, - FieldMemOperand(code_data_container_object, - CodeDataContainer::kCodeOffset)); + CHECK(V8_EXTERNAL_CODE_SPACE_BOOL); + // Given the fields layout we can read the Code reference as a full word. + STATIC_ASSERT(!V8_EXTERNAL_CODE_SPACE_BOOL || + (CodeDataContainer::kCodeCageBaseUpper32BitsOffset == + CodeDataContainer::kCodeOffset + kTaggedSize)); + Ldr(destination, FieldMemOperand(code_data_container_object, + CodeDataContainer::kCodeOffset)); } void TurboAssembler::CallCodeDataContainerObject( @@ -3063,6 +3072,43 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Bind(&done); } +void TurboAssembler::EncodeCagedPointer(const Register& value) { + ASM_CODE_COMMENT(this); +#ifdef V8_CAGED_POINTERS + Sub(value, value, kPtrComprCageBaseRegister); + Mov(value, Operand(value, LSL, kCagedPointerShift)); +#else + UNREACHABLE(); +#endif +} + +void TurboAssembler::DecodeCagedPointer(const Register& value) { + ASM_CODE_COMMENT(this); +#ifdef V8_CAGED_POINTERS + Add(value, kPtrComprCageBaseRegister, + Operand(value, LSR, kCagedPointerShift)); +#else + UNREACHABLE(); +#endif +} + +void TurboAssembler::LoadCagedPointerField(const Register& destination, + const MemOperand& field_operand) { + ASM_CODE_COMMENT(this); + Ldr(destination, field_operand); + DecodeCagedPointer(destination); +} + +void TurboAssembler::StoreCagedPointerField( + const Register& value, const MemOperand& dst_field_operand) { + ASM_CODE_COMMENT(this); + UseScratchRegisterScope temps(this); + Register scratch = temps.AcquireX(); + Mov(scratch, value); + EncodeCagedPointer(scratch); + Str(scratch, dst_field_operand); +} + void TurboAssembler::LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTag tag, diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 8f60217d9e92c8..165d702c31e9a0 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -1343,6 +1343,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { DCHECK(allow_macro_instructions()); cmlt(vd, vn, imm); } + void Cmle(const VRegister& vd, const VRegister& vn, int imm) { + DCHECK(allow_macro_instructions()); + cmle(vd, vn, imm); + } inline void Neg(const Register& rd, const Operand& operand); inline void Negs(const Register& rd, const Operand& operand); @@ -1437,6 +1441,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // --------------------------------------------------------------------------- // V8 Heap sandbox support + // Transform a CagedPointer from/to its encoded form, which is used when the + // pointer is stored on the heap and ensures that the pointer will always + // point into the virtual memory cage. + void EncodeCagedPointer(const Register& value); + void DecodeCagedPointer(const Register& value); + + // Load and decode a CagedPointer from the heap. + void LoadCagedPointerField(const Register& destination, + const MemOperand& field_operand); + // Encode and store a CagedPointer to the heap. + void StoreCagedPointerField(const Register& value, + const MemOperand& dst_field_operand); + // Loads a field containing off-heap pointer and does necessary decoding // if V8 heap sandbox is enabled. void LoadExternalPointerField(Register destination, MemOperand field_operand, @@ -1618,11 +1635,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { inline void Umsubl(const Register& rd, const Register& rn, const Register& rm, const Register& ra); - void Cmle(const VRegister& vd, const VRegister& vn, int imm) { - DCHECK(allow_macro_instructions()); - cmle(vd, vn, imm); - } - void Ld1(const VRegister& vt, const MemOperand& src) { DCHECK(allow_macro_instructions()); ld1(vt, src); @@ -1858,6 +1870,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Abort execution if argument is not a JSFunction, enabled via --debug-code. void AssertFunction(Register object); + // Abort execution if argument is not a callable JSFunction, enabled via + // --debug-code. + void AssertCallableFunction(Register object); + // Abort execution if argument is not a JSGeneratorObject (or subclass), // enabled via --debug-code. void AssertGeneratorObject(Register object); diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc index cacbfbd679fa36..dd5c8b2d9aa338 100644 --- a/deps/v8/src/codegen/assembler.cc +++ b/deps/v8/src/codegen/assembler.cc @@ -140,48 +140,6 @@ class ExternalAssemblerBufferImpl : public AssemblerBuffer { const int size_; }; -class OnHeapAssemblerBuffer : public AssemblerBuffer { - public: - OnHeapAssemblerBuffer(Isolate* isolate, Handle code, int size, - int gc_count) - : isolate_(isolate), code_(code), size_(size), gc_count_(gc_count) {} - - byte* start() const override { - return reinterpret_cast(code_->raw_instruction_start()); - } - - int size() const override { return size_; } - - std::unique_ptr Grow(int new_size) override { - DCHECK_LT(size(), new_size); - Heap* heap = isolate_->heap(); - if (Code::SizeFor(new_size) < - heap->MaxRegularHeapObjectSize(AllocationType::kCode)) { - MaybeHandle code = - isolate_->factory()->NewEmptyCode(CodeKind::BASELINE, new_size); - if (!code.is_null()) { - return std::make_unique( - isolate_, code.ToHandleChecked(), new_size, heap->gc_count()); - } - } - // We fall back to the slow path using the default assembler buffer and - // compile the code off the GC heap. - return std::make_unique(new_size); - } - - bool IsOnHeap() const override { return true; } - - int OnHeapGCCount() const override { return gc_count_; } - - MaybeHandle code() const override { return code_; } - - private: - Isolate* isolate_; - Handle code_; - const int size_; - const int gc_count_; -}; - static thread_local std::aligned_storage_t tls_singleton_storage; @@ -218,16 +176,6 @@ std::unique_ptr NewAssemblerBuffer(int size) { return std::make_unique(size); } -std::unique_ptr NewOnHeapAssemblerBuffer(Isolate* isolate, - int estimated) { - int size = std::max(AssemblerBase::kMinimalBufferSize, estimated); - MaybeHandle code = - isolate->factory()->NewEmptyCode(CodeKind::BASELINE, size); - if (code.is_null()) return {}; - return std::make_unique( - isolate, code.ToHandleChecked(), size, isolate->heap()->gc_count()); -} - // ----------------------------------------------------------------------------- // Implementation of AssemblerBase @@ -248,12 +196,6 @@ AssemblerBase::AssemblerBase(const AssemblerOptions& options, if (!buffer_) buffer_ = NewAssemblerBuffer(kDefaultBufferSize); buffer_start_ = buffer_->start(); pc_ = buffer_start_; - if (IsOnHeap()) { - saved_handles_for_raw_object_ptr_.reserve( - kSavedHandleForRawObjectsInitialSize); - saved_offsets_for_runtime_entries_.reserve( - kSavedOffsetForRuntimeEntriesInitialSize); - } } AssemblerBase::~AssemblerBase() = default; diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index f1e5b85f1f6798..50711046e6b1f4 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -202,11 +202,6 @@ class AssemblerBuffer { // destructed), but not written. virtual std::unique_ptr Grow(int new_size) V8_WARN_UNUSED_RESULT = 0; - virtual bool IsOnHeap() const { return false; } - virtual MaybeHandle code() const { return MaybeHandle(); } - // Return the GC count when the buffer was allocated (only if the buffer is on - // the GC heap). - virtual int OnHeapGCCount() const { return 0; } }; // Allocate an AssemblerBuffer which uses an existing buffer. This buffer cannot @@ -219,10 +214,6 @@ std::unique_ptr ExternalAssemblerBuffer(void* buffer, V8_EXPORT_PRIVATE std::unique_ptr NewAssemblerBuffer(int size); -V8_EXPORT_PRIVATE -std::unique_ptr NewOnHeapAssemblerBuffer(Isolate* isolate, - int size); - class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { public: AssemblerBase(const AssemblerOptions& options, @@ -286,15 +277,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { #endif } - bool IsOnHeap() const { return buffer_->IsOnHeap(); } - - int OnHeapGCCount() const { return buffer_->OnHeapGCCount(); } - - MaybeHandle code() const { - DCHECK(IsOnHeap()); - return buffer_->code(); - } - byte* buffer_start() const { return buffer_->start(); } int buffer_size() const { return buffer_->size(); } int instruction_size() const { return pc_offset(); } @@ -419,14 +401,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { CodeCommentsWriter code_comments_writer_; - // Relocation information when code allocated directly on heap. - // These constants correspond to the 99% percentile of a selected number of JS - // frameworks and benchmarks, including jquery, lodash, d3 and speedometer3. - const int kSavedHandleForRawObjectsInitialSize = 60; - const int kSavedOffsetForRuntimeEntriesInitialSize = 100; - std::vector> saved_handles_for_raw_object_ptr_; - std::vector> saved_offsets_for_runtime_entries_; - private: // Before we copy code into the code space, we sometimes cannot encode // call/jump code targets as we normally would, as the difference between the diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h index 128858a47fa11a..c2374536f72777 100644 --- a/deps/v8/src/codegen/bailout-reason.h +++ b/deps/v8/src/codegen/bailout-reason.h @@ -48,6 +48,7 @@ namespace internal { V(kOperandIsNotAConstructor, "Operand is not a constructor") \ V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \ V(kOperandIsNotAFunction, "Operand is not a function") \ + V(kOperandIsNotACallableFunction, "Operand is not a callable function") \ V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \ V(kOperandIsNotACodeT, "Operand is not a CodeT") \ V(kOperandIsNotASmi, "Operand is not a smi") \ diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc index dcf19a0ad51a51..494f23de76ef0d 100644 --- a/deps/v8/src/codegen/code-factory.cc +++ b/deps/v8/src/codegen/code-factory.cc @@ -96,15 +96,11 @@ Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate, } Callable CodeFactory::StoreOwnIC(Isolate* isolate) { - // TODO(ishell): Currently we use StoreOwnIC only for storing properties that - // already exist in the boilerplate therefore we can use StoreIC. - return Builtins::CallableFor(isolate, Builtin::kStoreICTrampoline); + return Builtins::CallableFor(isolate, Builtin::kStoreOwnICTrampoline); } Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) { - // TODO(ishell): Currently we use StoreOwnIC only for storing properties that - // already exist in the boilerplate therefore we can use StoreIC. - return Builtins::CallableFor(isolate, Builtin::kStoreIC); + return Builtins::CallableFor(isolate, Builtin::kStoreOwnIC); } // static diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index e61933b05ac387..4a9c06bdd89d40 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -1539,6 +1539,32 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(TNode value, } } +#ifdef V8_CAGED_POINTERS + +TNode CodeStubAssembler::LoadCagedPointerFromObject( + TNode object, TNode field_offset) { + return LoadObjectField(object, field_offset); +} + +void CodeStubAssembler::StoreCagedPointerToObject(TNode object, + TNode offset, + TNode pointer) { +#ifdef DEBUG + // Verify pointer points into the cage. + TNode cage_base_address = + ExternalConstant(ExternalReference::virtual_memory_cage_base_address()); + TNode cage_end_address = + ExternalConstant(ExternalReference::virtual_memory_cage_end_address()); + TNode cage_base = Load(cage_base_address); + TNode cage_end = Load(cage_end_address); + CSA_CHECK(this, UintPtrGreaterThanOrEqual(pointer, cage_base)); + CSA_CHECK(this, UintPtrLessThan(pointer, cage_end)); +#endif + StoreObjectFieldNoWriteBarrier(object, offset, pointer); +} + +#endif // V8_CAGED_POINTERS + TNode CodeStubAssembler::ChangeUint32ToExternalPointer( TNode value) { STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize); @@ -14440,6 +14466,19 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( return sfi_code.value(); } +TNode CodeStubAssembler::GetCodeEntry(TNode code) { +#ifdef V8_EXTERNAL_CODE_SPACE + TNode cdc = CodeDataContainerFromCodeT(code); + return LoadExternalPointerFromObject( + cdc, IntPtrConstant(CodeDataContainer::kCodeEntryPointOffset), + kCodeEntryPointTag); +#else + TNode object = BitcastTaggedToWord(code); + return ReinterpretCast( + IntPtrAdd(object, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag))); +#endif +} + TNode CodeStubAssembler::AllocateFunctionWithMapAndContext( TNode map, TNode shared_info, TNode context) { @@ -14713,42 +14752,8 @@ TNode CodeStubAssembler::ArrayCreate(TNode context, void CodeStubAssembler::SetPropertyLength(TNode context, TNode array, TNode length) { - Label fast(this), runtime(this), done(this); - // There's no need to set the length, if - // 1) the array is a fast JS array and - // 2) the new length is equal to the old length. - // as the set is not observable. Otherwise fall back to the run-time. - - // 1) Check that the array has fast elements. - // TODO(delphick): Consider changing this since it does an an unnecessary - // check for SMIs. - // TODO(delphick): Also we could hoist this to after the array construction - // and copy the args into array in the same way as the Array constructor. - BranchIfFastJSArray(array, context, &fast, &runtime); - - BIND(&fast); - { - TNode fast_array = CAST(array); - - TNode length_smi = CAST(length); - TNode old_length = LoadFastJSArrayLength(fast_array); - CSA_DCHECK(this, TaggedIsPositiveSmi(old_length)); - - // 2) If the created array's length matches the required length, then - // there's nothing else to do. Otherwise use the runtime to set the - // property as that will insert holes into excess elements or shrink - // the backing store as appropriate. - Branch(SmiNotEqual(length_smi, old_length), &runtime, &done); - } - - BIND(&runtime); - { - SetPropertyStrict(context, array, CodeStubAssembler::LengthStringConstant(), - length); - Goto(&done); - } - - BIND(&done); + SetPropertyStrict(context, array, CodeStubAssembler::LengthStringConstant(), + length); } TNode CodeStubAssembler::RefillMathRandom( diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 1cb0b4cf6e1740..4d16af8a3d63e9 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -9,7 +9,6 @@ #include "src/base/macros.h" #include "src/codegen/bailout-reason.h" -#include "src/common/external-pointer.h" #include "src/common/globals.h" #include "src/common/message-template.h" #include "src/compiler/code-assembler.h" @@ -27,6 +26,7 @@ #include "src/objects/swiss-name-dictionary.h" #include "src/objects/tagged-index.h" #include "src/roots/roots.h" +#include "src/security/external-pointer.h" #include "torque-generated/exported-macros-assembler.h" namespace v8 { @@ -804,7 +804,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode FromCodeT(TNode code) { #ifdef V8_EXTERNAL_CODE_SPACE - return LoadObjectField(code, CodeDataContainer::kCodeOffset); +#if V8_TARGET_BIG_ENDIAN +#error "This code requires updating for big-endian architectures" +#endif + // Given the fields layout we can read the Code reference as a full word. + STATIC_ASSERT(CodeDataContainer::kCodeCageBaseUpper32BitsOffset == + CodeDataContainer::kCodeOffset + kTaggedSize); + TNode o = BitcastWordToTagged(Load( + code, IntPtrConstant(CodeDataContainer::kCodeOffset - kHeapObjectTag))); + return CAST(o); #else return code; #endif @@ -837,6 +845,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler #endif } + TNode GetCodeEntry(TNode code); + // The following Call wrappers call an object according to the semantics that // one finds in the EcmaScript spec, operating on an Callable (e.g. a // JSFunction or proxy) rather than a Code object. @@ -1032,6 +1042,33 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Works only with V8_ENABLE_FORCE_SLOW_PATH compile time flag. Nop otherwise. void GotoIfForceSlowPath(Label* if_true); +#ifdef V8_CAGED_POINTERS + + // + // Caged pointer related functionality. + // + + // Load a caged pointer value from an object. + TNode LoadCagedPointerFromObject(TNode object, + int offset) { + return LoadCagedPointerFromObject(object, IntPtrConstant(offset)); + } + + TNode LoadCagedPointerFromObject(TNode object, + TNode offset); + + // Stored a caged pointer value to an object. + void StoreCagedPointerToObject(TNode object, int offset, + TNode pointer) { + StoreCagedPointerToObject(object, IntPtrConstant(offset), pointer); + } + + void StoreCagedPointerToObject(TNode object, + TNode offset, + TNode pointer); + +#endif // V8_CAGED_POINTERS + // // ExternalPointerT-related functionality. // diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index 9fab1cd40f2917..b7eafaf0d984aa 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -60,6 +60,7 @@ #include "src/parsing/scanner-character-streams.h" #include "src/snapshot/code-serializer.h" #include "src/utils/ostreams.h" +#include "src/web-snapshot/web-snapshot.h" #include "src/zone/zone-list-inl.h" // crbug.com/v8/8816 namespace v8 { @@ -208,8 +209,9 @@ struct ScopedTimer { // static void Compiler::LogFunctionCompilation(Isolate* isolate, CodeEventListener::LogEventsAndTags tag, - Handle shared, Handle