diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 894988269..cfed03782 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,7 +18,9 @@ jobs: steps: - uses: actions/checkout@v2 - name: setup - run: cmake -E make_directory ${{runner.workspace}}/build + run: | + git config --global core.longpaths true + cmake -E make_directory ${{runner.workspace}}/build - name: configure run: cmake ${{runner.workspace}}/v8-cmake working-directory: ${{runner.workspace}}/build diff --git a/CMakeLists.txt b/CMakeLists.txt index 4467e77e2..620d98bb8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1022,6 +1022,7 @@ add_custom_command( ${torque_outputs} ) + add_custom_command( COMMAND ${CMAKE_COMMAND} -E make_directory ${torque_dirs} diff --git a/update_v8.json b/update_v8.json index a61b8c357..7875e51e8 100644 --- a/update_v8.json +++ b/update_v8.json @@ -1,9 +1,9 @@ [ { "url": "https://chromium.googlesource.com/v8/v8.git", - "commit": "07c05c4b8c5e8916e604c09692d7b6e1813c606e", + "commit": "3236505a90c64a5fbb1e1f2584b9e551d84a08dc", "/* comment */": "Dependency v8 must be first.", - "branch": "branch-heads/11.5", + "branch": "branch-heads/11.6", "path": "" }, { @@ -26,7 +26,7 @@ }, { "url": "https://chromium.googlesource.com/chromium/src/third_party/zlib.git", - "commit": "14dd4c4455602c9b71a1a89b5cafd1f4030d2e3f", + "commit": "e6795474e4885ae55b6c6c0a612d0889e2ebd8d9", "branch": "master", "path": "third_party/zlib" }, diff --git a/v8/.gitignore b/v8/.gitignore index 1687038dc..65fe25d1b 100644 --- a/v8/.gitignore +++ b/v8/.gitignore @@ -64,6 +64,8 @@ /test/wasm-spec-tests/tests.tar.gz /third_party/* !/third_party/antlr4 +!/third_party/cpu_features +/third_party/cpu_features/src !/third_party/inspector_protocol !/third_party/jsoncpp /third_party/jsoncpp/source diff --git a/v8/.vpython3 b/v8/.vpython3 index 1187542f5..c4adffad1 100644 --- a/v8/.vpython3 +++ b/v8/.vpython3 @@ -75,7 +75,24 @@ wheel: < version: "version:3.19.3" > +# requests and its dependencies. wheel: < - name: "infra/python/wheels/requests-py2_py3" - version: "version:2.13.0" + name: "infra/python/wheels/requests-py3" + version: "version:2.31.0" +> +wheel: < + name: "infra/python/wheels/urllib3-py2_py3" + version: "version:1.26.6" +> +wheel: < + name: "infra/python/wheels/idna-py2_py3" + version: "version:2.8" +> +wheel: < + name: "infra/python/wheels/certifi-py2_py3" + version: "version:2020.11.8" +> +wheel: < + name: "infra/python/wheels/charset_normalizer-py3" + version: "version:2.0.4" > diff --git a/v8/AUTHORS b/v8/AUTHORS index 2b393ad38..476c0955c 100644 --- a/v8/AUTHORS +++ b/v8/AUTHORS @@ -180,6 +180,7 @@ Maciej Małecki Marcel Laverdet Marcin Cieślak Marcin Wiącek +Marisa Kirisame Martin Bidlingmaier Mateusz Czeladka Matheus Marchini @@ -241,6 +242,7 @@ Sakthipriyan Vairamani (thefourtheye) Sander Mathijs van Veen Sandro Santilli Sanjoy Das +Sam James Seo Sanghyeon Shawn Anastasio Shawn Presser diff --git a/v8/BUILD.bazel b/v8/BUILD.bazel index dcc64bc2f..ba1a415eb 100644 --- a/v8/BUILD.bazel +++ b/v8/BUILD.bazel @@ -57,7 +57,6 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression # v8_use_mips_abi_hardfloat # v8_enable_gdbjit # v8_check_header_includes -# v8_enable_shared_ro_heap # v8_enable_lazy_source_positions # v8_enable_third_party_heap # v8_third_party_heap_files @@ -228,6 +227,13 @@ v8_flag( default = True, ) +# Shared RO heap. Flag has to be set to false when +# v8_enable_pointer_compression_shared_cage is set to false. +v8_flag( + name = "v8_enable_shared_ro_heap", + default = True, +) + # Enable shared cage if v8_enable_pointer_compression # and v8_enable_pointer_compression_shared_cage. selects.config_setting_group( @@ -413,6 +419,10 @@ v8_config( "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE", ], "//conditions:default": [], + }) + select({ + ":is_v8_enable_shared_ro_heap": [ + "V8_SHARED_RO_HEAP", + ], }) + select({ ":is_v8_enable_short_builtin_calls": [ "V8_SHORT_BUILTIN_CALLS", @@ -843,10 +853,12 @@ filegroup( "src/builtins/iterator.tq", "src/builtins/iterator-from.tq", "src/builtins/iterator-helpers.tq", + "src/builtins/map-groupby.tq", "src/builtins/math.tq", "src/builtins/number.tq", "src/builtins/object.tq", "src/builtins/object-fromentries.tq", + "src/builtins/object-groupby.tq", "src/builtins/promise-abstract-operations.tq", "src/builtins/promise-all.tq", "src/builtins/promise-all-element-closure.tq", @@ -881,6 +893,8 @@ filegroup( "src/builtins/regexp-source.tq", "src/builtins/regexp-split.tq", "src/builtins/regexp-test.tq", + "src/builtins/set-intersection.tq", + "src/builtins/set-union.tq", "src/builtins/string-at.tq", "src/builtins/string-endswith.tq", "src/builtins/string-html.tq", @@ -999,6 +1013,7 @@ filegroup( "third_party/v8/builtins/array-sort.tq", ] + select({ ":is_v8_enable_webassembly": [ + "src/builtins/js-to-wasm.tq", "src/builtins/wasm.tq", "src/debug/debug-wasm-objects.tq", "src/wasm/wasm-objects.tq", @@ -1550,6 +1565,8 @@ filegroup( "src/heap/marking-worklist-inl.h", "src/heap/memory-allocator.cc", "src/heap/memory-allocator.h", + "src/heap/memory-balancer.cc", + "src/heap/memory-balancer.h", "src/heap/memory-chunk.cc", "src/heap/memory-chunk.h", "src/heap/memory-chunk-inl.h", @@ -1575,6 +1592,7 @@ filegroup( "src/heap/paged-spaces.h", "src/heap/paged-spaces-inl.h", "src/heap/parallel-work-item.h", + "src/heap/parked-scope-inl.h", "src/heap/parked-scope.h", "src/heap/pretenuring-handler.cc", "src/heap/pretenuring-handler.h", @@ -1954,6 +1972,7 @@ filegroup( "src/objects/synthetic-module.cc", "src/objects/synthetic-module.h", "src/objects/synthetic-module-inl.h", + "src/objects/tagged.h", "src/objects/tagged-field.h", "src/objects/tagged-field-inl.h", "src/objects/tagged-impl.cc", @@ -2134,6 +2153,11 @@ filegroup( "src/sandbox/external-pointer-table.cc", "src/sandbox/external-pointer-table.h", "src/sandbox/external-pointer-table-inl.h", + "src/sandbox/code-pointer-table.cc", + "src/sandbox/code-pointer-table.h", + "src/sandbox/code-pointer-table-inl.h", + "src/sandbox/code-pointer.h", + "src/sandbox/code-pointer-inl.h", "src/sandbox/external-entity-table.h", "src/sandbox/external-entity-table-inl.h", "src/sandbox/sandbox.cc", @@ -2162,6 +2186,7 @@ filegroup( "src/snapshot/read-only-deserializer.h", "src/snapshot/read-only-serializer.cc", "src/snapshot/read-only-serializer.h", + "src/snapshot/read-only-serializer-deserializer.h", "src/snapshot/references.h", "src/snapshot/roots-serializer.cc", "src/snapshot/roots-serializer.h", @@ -2799,6 +2824,7 @@ filegroup( "src/compiler/js-graph.h", "src/compiler/js-heap-broker.cc", "src/compiler/js-heap-broker.h", + "src/compiler/js-heap-broker-inl.h", "src/compiler/js-inlining.cc", "src/compiler/js-inlining.h", "src/compiler/js-inlining-heuristic.cc", @@ -2969,7 +2995,6 @@ filegroup( "src/compiler/turboshaft/store-store-elimination-phase.h", "src/compiler/turboshaft/store-store-elimination-reducer.h", "src/compiler/turboshaft/structural-optimization-reducer.h", - "src/compiler/turboshaft/tag-untag-lowering-reducer.h", "src/compiler/turboshaft/tracing.h", "src/compiler/turboshaft/type-assertions-phase.cc", "src/compiler/turboshaft/type-assertions-phase.h", @@ -2990,6 +3015,7 @@ filegroup( "src/compiler/turboshaft/utils.h", "src/compiler/turboshaft/value-numbering-reducer.h", "src/compiler/turboshaft/variable-reducer.h", + "src/compiler/turboshaft/wasm-js-lowering-reducer.h", "src/compiler/type-cache.cc", "src/compiler/type-cache.h", "src/compiler/type-narrowing-reducer.cc", @@ -3088,6 +3114,8 @@ filegroup( "src/compiler/wasm-load-elimination.h", "src/compiler/wasm-loop-peeling.cc", "src/compiler/wasm-loop-peeling.h", + "src/compiler/wasm-js-lowering.cc", + "src/compiler/wasm-js-lowering.h", "src/compiler/wasm-typer.cc", "src/compiler/wasm-typer.h", ], @@ -3600,6 +3628,7 @@ py_binary( "third_party/inspector_protocol/templates/TypeBuilder_cpp.template", "third_party/inspector_protocol/templates/TypeBuilder_h.template", ], + imports = ["third_party/inspector_protocol/"], python_version = "PY3", deps = [ requirement("jinja2"), diff --git a/v8/BUILD.gn b/v8/BUILD.gn index a69e7b2ed..c13cf053a 100644 --- a/v8/BUILD.gn +++ b/v8/BUILD.gn @@ -319,6 +319,10 @@ declare_args() { # Sets -DV8_ENABLE_SANDBOX. v8_enable_sandbox = "" + # Enable experimental code pointer sandboxing for the V8 sandbox. + # Sets -DV8_CODE_POINTER_SANDBOXING + v8_code_pointer_sandboxing = false + # Expose the memory corruption API to JavaScript. Useful for testing the sandbox. # WARNING This will expose builtins that (by design) cause memory corruption. # Sets -DV8_EXPOSE_MEMORY_CORRUPTION_API @@ -368,13 +372,6 @@ declare_args() { # (incomplete and experimental). v8_enable_cet_shadow_stack = false - # Get VMEX priviledge at startup. - # It allows to run V8 without "deprecated-ambient-replace-as-executable". - # Sets -DV8_USE_VMEX_RESOURCE. - # TODO(victorgomes): Remove this flag once Chormium no longer needs - # the deprecated feature. - v8_fuchsia_use_vmex_resource = is_fuchsia && !build_with_chromium - # Enables pointer compression for 8GB heaps. # Sets -DV8_COMPRESS_POINTERS_8GB. v8_enable_pointer_compression_8gb = "" @@ -393,6 +390,10 @@ declare_args() { # iOS (non-simulator) does not have executable pages for 3rd party # applications yet so disable jit. v8_jitless = v8_enable_lite_mode || target_is_ios_device + + # Enable Maglev's graph printer. + # Sets -DV8_MAGLEV_GRAPH_PRINTER. + v8_enable_maglev_graph_printer = !build_with_chromium } # Derived defaults. @@ -472,13 +473,13 @@ if (v8_enable_short_builtin_calls == "") { if (v8_enable_external_code_space == "") { v8_enable_external_code_space = v8_enable_pointer_compression && - (v8_current_cpu == "x64" || - (target_os != "fuchsia" && v8_current_cpu == "arm64")) + (v8_current_cpu == "x64" || v8_current_cpu == "arm64") } if (v8_enable_maglev == "") { v8_enable_maglev = v8_enable_turbofan && - (v8_current_cpu == "x64" || v8_current_cpu == "arm64") && - v8_enable_pointer_compression + (v8_current_cpu == "arm" || + ((v8_current_cpu == "x64" || v8_current_cpu == "arm64") && + v8_enable_pointer_compression)) } assert(v8_enable_turbofan || !v8_enable_maglev, "Maglev is not available when Turbofan is disabled.") @@ -641,6 +642,9 @@ assert(!v8_enable_sandbox || v8_enable_pointer_compression_shared_cage, assert(!v8_enable_sandbox || v8_enable_external_code_space, "The sandbox requires the external code space") +assert(!v8_code_pointer_sandboxing || v8_enable_sandbox, + "Code pointer sandboxing requires the sandbox") + assert(!v8_expose_memory_corruption_api || v8_enable_sandbox, "The Memory Corruption API requires the sandbox") @@ -674,10 +678,6 @@ if (v8_enable_single_generation == true) { "Requires unconditional write barriers or none (which disables incremental marking)") } -if (v8_fuchsia_use_vmex_resource) { - assert(target_os == "fuchsia", "VMEX resource only available on Fuchsia") -} - assert(!v8_enable_snapshot_compression || v8_use_zlib, "Snapshot compression requires zlib") @@ -720,7 +720,9 @@ config("internal_config") { } if (v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") { - libs = [ "atomic" ] + if (!is_clang) { + libs = [ "atomic" ] + } } } @@ -792,7 +794,9 @@ config("external_config") { } if (current_cpu == "riscv64" || current_cpu == "riscv32") { - libs = [ "atomic" ] + if (!is_clang) { + libs = [ "atomic" ] + } } } @@ -1138,9 +1142,6 @@ config("features") { if (v8_advanced_bigint_algorithms) { defines += [ "V8_ADVANCED_BIGINT_ALGORITHMS" ] } - if (v8_fuchsia_use_vmex_resource) { - defines += [ "V8_USE_VMEX_RESOURCE" ] - } if (v8_expose_memory_corruption_api) { defines += [ "V8_EXPOSE_MEMORY_CORRUPTION_API" ] } @@ -1165,6 +1166,12 @@ config("features") { if (v8_enable_wasm_simd256_revec) { defines += [ "V8_ENABLE_WASM_SIMD256_REVEC" ] } + if (v8_code_pointer_sandboxing) { + defines += [ "V8_CODE_POINTER_SANDBOXING" ] + } + if (v8_enable_maglev_graph_printer) { + defines += [ "V8_ENABLE_MAGLEV_GRAPH_PRINTER" ] + } } config("toolchain") { @@ -1309,6 +1316,12 @@ config("toolchain") { defines += [ "V8_TARGET_ARCH_RISCV32" ] defines += [ "__riscv_xlen=32" ] defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] + + # TODO(riscv32): Add condition riscv_use_rvv and riscv_rvv_vlen here after + # 4538202 merge. + if (target_is_simulator) { + defines += [ "CAN_USE_RVV_INSTRUCTIONS" ] + } } if (v8_current_cpu == "x86") { @@ -1397,15 +1410,18 @@ config("toolchain") { "-Wmissing-field-initializers", "-Wunreachable-code", - # Google3 enables this warning, so we should also enable it to find issue - # earlier. See https://reviews.llvm.org/D56731 for details about this - # warning. - "-Wctad-maybe-unsupported", - # TODO(v8:12245): Fix shadowing instances and remove. "-Wno-shadow", ] + # TODO(fuchsia:127411): Re-enable once FIDL bindings are compatible. + if (!is_fuchsia) { + # Google3 enables this warning, so we should also enable it to find issue + # earlier. See https://reviews.llvm.org/D56731 for details about this + # warning. + cflags += [ "-Wctad-maybe-unsupported" ] + } + if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" || v8_current_cpu == "riscv64") { cflags += [ "-Wshorten-64-to-32" ] @@ -1749,6 +1765,7 @@ if (v8_postmortem_support) { "src/objects/string-inl.h", "src/objects/struct.h", "src/objects/struct-inl.h", + "src/objects/tagged.h", ] outputs = [ "$target_gen_dir/debug-support.cc" ] @@ -1815,9 +1832,11 @@ torque_files = [ "src/builtins/iterator.tq", "src/builtins/iterator-from.tq", "src/builtins/iterator-helpers.tq", + "src/builtins/map-groupby.tq", "src/builtins/math.tq", "src/builtins/number.tq", "src/builtins/object-fromentries.tq", + "src/builtins/object-groupby.tq", "src/builtins/object.tq", "src/builtins/promise-abstract-operations.tq", "src/builtins/promise-all.tq", @@ -1853,6 +1872,8 @@ torque_files = [ "src/builtins/regexp-split.tq", "src/builtins/regexp-test.tq", "src/builtins/regexp.tq", + "src/builtins/set-intersection.tq", + "src/builtins/set-union.tq", "src/builtins/string-at.tq", "src/builtins/string-endswith.tq", "src/builtins/string-html.tq", @@ -1992,6 +2013,7 @@ if (v8_enable_i18n_support) { if (v8_enable_webassembly) { torque_files += [ + "src/builtins/js-to-wasm.tq", "src/builtins/wasm.tq", "src/debug/debug-wasm-objects.tq", "src/wasm/wasm-objects.tq", @@ -3069,6 +3091,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/js-create-lowering.h", "src/compiler/js-generic-lowering.h", "src/compiler/js-graph.h", + "src/compiler/js-heap-broker-inl.h", "src/compiler/js-heap-broker.h", "src/compiler/js-inlining-heuristic.h", "src/compiler/js-inlining.h", @@ -3168,7 +3191,6 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/store-store-elimination-phase.h", "src/compiler/turboshaft/store-store-elimination-reducer.h", "src/compiler/turboshaft/structural-optimization-reducer.h", - "src/compiler/turboshaft/tag-untag-lowering-reducer.h", "src/compiler/turboshaft/tracing.h", "src/compiler/turboshaft/type-assertions-phase.h", "src/compiler/turboshaft/type-inference-analysis.h", @@ -3348,6 +3370,7 @@ v8_header_set("v8_internal_headers") { "src/heap/marking-worklist.h", "src/heap/marking.h", "src/heap/memory-allocator.h", + "src/heap/memory-balancer.h", "src/heap/memory-chunk-inl.h", "src/heap/memory-chunk-layout.h", "src/heap/memory-chunk.h", @@ -3364,6 +3387,7 @@ v8_header_set("v8_internal_headers") { "src/heap/paged-spaces-inl.h", "src/heap/paged-spaces.h", "src/heap/parallel-work-item.h", + "src/heap/parked-scope-inl.h", "src/heap/parked-scope.h", "src/heap/pretenuring-handler-inl.h", "src/heap/pretenuring-handler.h", @@ -3636,6 +3660,7 @@ v8_header_set("v8_internal_headers") { "src/objects/tagged-index.h", "src/objects/tagged-value-inl.h", "src/objects/tagged-value.h", + "src/objects/tagged.h", "src/objects/template-objects-inl.h", "src/objects/template-objects.h", "src/objects/templates-inl.h", @@ -3720,6 +3745,10 @@ v8_header_set("v8_internal_headers") { "src/runtime/runtime.h", "src/sandbox/bounded-size-inl.h", "src/sandbox/bounded-size.h", + "src/sandbox/code-pointer-inl.h", + "src/sandbox/code-pointer-table-inl.h", + "src/sandbox/code-pointer-table.h", + "src/sandbox/code-pointer.h", "src/sandbox/external-entity-table-inl.h", "src/sandbox/external-entity-table.h", "src/sandbox/external-pointer-inl.h", @@ -3739,6 +3768,7 @@ v8_header_set("v8_internal_headers") { "src/snapshot/embedded/embedded-file-writer-interface.h", "src/snapshot/object-deserializer.h", "src/snapshot/read-only-deserializer.h", + "src/snapshot/read-only-serializer-deserializer.h", "src/snapshot/read-only-serializer.h", "src/snapshot/references.h", "src/snapshot/roots-serializer.h", @@ -3843,7 +3873,9 @@ v8_header_set("v8_internal_headers") { "src/maglev/maglev-register-frame-array.h", "src/maglev/maglev.h", ] - if (v8_current_cpu == "arm64") { + if (v8_current_cpu == "arm") { + sources += [ "src/maglev/arm/maglev-assembler-arm-inl.h" ] + } else if (v8_current_cpu == "arm64") { sources += [ "src/maglev/arm64/maglev-assembler-arm64-inl.h" ] } else if (v8_current_cpu == "x64") { sources += [ "src/maglev/x64/maglev-assembler-x64-inl.h" ] @@ -3858,6 +3890,7 @@ v8_header_set("v8_internal_headers") { "src/asmjs/asm-scanner.h", "src/asmjs/asm-types.h", "src/compiler/int64-lowering.h", + "src/compiler/turboshaft/wasm-js-lowering-reducer.h", "src/compiler/wasm-address-reassociation.h", "src/compiler/wasm-call-descriptors.h", "src/compiler/wasm-compiler-definitions.h", @@ -3868,6 +3901,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/wasm-graph-assembler.h", "src/compiler/wasm-inlining-into-js.h", "src/compiler/wasm-inlining.h", + "src/compiler/wasm-js-lowering.h", "src/compiler/wasm-load-elimination.h", "src/compiler/wasm-loop-peeling.h", "src/compiler/wasm-typer.h", @@ -4525,6 +4559,7 @@ if (v8_enable_webassembly) { "src/compiler/wasm-graph-assembler.cc", "src/compiler/wasm-inlining-into-js.cc", "src/compiler/wasm-inlining.cc", + "src/compiler/wasm-js-lowering.cc", "src/compiler/wasm-load-elimination.cc", "src/compiler/wasm-loop-peeling.cc", "src/compiler/wasm-typer.cc", @@ -4876,6 +4911,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/marking-worklist.cc", "src/heap/marking.cc", "src/heap/memory-allocator.cc", + "src/heap/memory-balancer.cc", "src/heap/memory-chunk-layout.cc", "src/heap/memory-chunk.cc", "src/heap/memory-measurement.cc", @@ -5087,6 +5123,7 @@ v8_source_set("v8_base_without_compiler") { "src/runtime/runtime-typedarray.cc", "src/runtime/runtime-weak-refs.cc", "src/runtime/runtime.cc", + "src/sandbox/code-pointer-table.cc", "src/sandbox/external-pointer-table.cc", "src/sandbox/sandbox.cc", "src/sandbox/testing.cc", @@ -5161,7 +5198,12 @@ v8_source_set("v8_base_without_compiler") { "src/maglev/maglev-regalloc.cc", "src/maglev/maglev.cc", ] - if (v8_current_cpu == "arm64") { + if (v8_current_cpu == "arm") { + sources += [ + "src/maglev/arm/maglev-assembler-arm.cc", + "src/maglev/arm/maglev-ir-arm.cc", + ] + } else if (v8_current_cpu == "arm64") { sources += [ "src/maglev/arm64/maglev-assembler-arm64.cc", "src/maglev/arm64/maglev-ir-arm64.cc", @@ -5576,7 +5618,9 @@ v8_source_set("v8_base_without_compiler") { v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" || v8_current_cpu == "s390" || v8_current_cpu == "s390x" || v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") { - libs += [ "atomic" ] + if (!is_clang) { + libs += [ "atomic" ] + } } if (v8_enable_vtunetracemark && (is_linux || is_chromeos || is_win)) { @@ -5923,8 +5967,8 @@ v8_component("v8_libbase") { "src/base/platform/platform-fuchsia.cc", ] deps += [ - "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel", - "//third_party/fuchsia-sdk/sdk/pkg/fdio", + "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.kernel:fuchsia.kernel_cpp", + "//third_party/fuchsia-sdk/sdk/pkg/component_incoming_cpp", "//third_party/fuchsia-sdk/sdk/pkg/zx", ] } else if (is_mac) { @@ -5976,7 +6020,9 @@ v8_component("v8_libbase") { } if (v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") { - libs += [ "atomic" ] + if (!is_clang) { + libs += [ "atomic" ] + } } if (is_tsan && !build_with_chromium) { @@ -6086,7 +6132,9 @@ v8_component("v8_libplatform") { } if (v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") { - libs = [ "atomic" ] + if (!is_clang) { + libs = [ "atomic" ] + } } } @@ -7025,7 +7073,9 @@ v8_executable("cppgc_hello_world") { sources = [ "samples/cppgc/hello-world.cc" ] if (v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") { - libs = [ "atomic" ] + if (!is_clang) { + libs = [ "atomic" ] + } } configs = [ diff --git a/v8/COMMON_OWNERS b/v8/COMMON_OWNERS index 486688cfc..7652be245 100644 --- a/v8/COMMON_OWNERS +++ b/v8/COMMON_OWNERS @@ -26,6 +26,7 @@ nicohartmann@chromium.org nikolaos@chromium.org omerkatz@chromium.org pthier@chromium.org +sroettger@google.com syg@chromium.org szuend@chromium.org tebbi@chromium.org diff --git a/v8/DEPS b/v8/DEPS index 860c61eba..bfb6d8ed3 100644 --- a/v8/DEPS +++ b/v8/DEPS @@ -14,13 +14,12 @@ vars = { # # Available images: # Emulation: - # - qemu.x64 (pulls terminal.qemu-x64-release) - # - qemu.arm64 (pulls terminal.qemu-arm64-release) + # - terminal.qemu-x64 + # - terminal.qemu-arm64 # - workstation.qemu-x64-release # Hardware: - # - generic.x64 (pulls terminal.x64-debug) - # - generic.arm64 (pulls terminal.arm64-debug) - # - chromebook.x64 (pulls terminal.chromebook-x64-debug) + # - minimal.x64 + # - core.x64-dfv2 # # Since the images are hundreds of MB, default to only downloading the image # most commonly useful for developers. Bots and developers that need to use @@ -50,25 +49,25 @@ vars = { 'check_v8_header_includes': False, # By default, download the fuchsia sdk from the public sdk directory. - 'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/', + 'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/core/', # reclient CIPD package version - 'reclient_version': 're_client_version:0.105.0.d6a0caf-gomaip', + 'reclient_version': 're_client_version:0.108.0.7cdbbe9-gomaip', # GN CIPD package version. - 'gn_version': 'git_revision:e9e83d9095d3234adf68f3e2866f25daf766d5c7', + 'gn_version': 'git_revision:4bd1a77e67958fb7f6739bd4542641646f264e5d', # ninja CIPD package version # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja 'ninja_version': 'version:2@1.11.1.chromium.6', # luci-go CIPD package version. - 'luci_go': 'git_revision:6ac770095bc0e289138f6f29aa91ff2f4886b81c', + 'luci_go': 'git_revision:39f255d5875293d3e1d978888b819ac124a8b0cc', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:12.20230520.1.1', + 'fuchsia_version': 'version:12.20230601.2.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -108,11 +107,9 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '147f65333c38ddd1ebf554e89965c243c8ce50b3', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '7f93a1e7ae8de96f113834f37d01b869a74b7dd3', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '04466898300bf849d1650cb35dcf2eb353e8e1e1', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '5d2ccbf93c7d956a9aff1d0acd21155e6b515ceb', - 'buildtools/clang_format/script': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'f97059df7f8b205064625cdb5f97b56668a125ef', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '3739a3619309af3b788379ad0936ca00b981616e', 'buildtools/linux64': { 'packages': [ { @@ -134,11 +131,11 @@ deps = { 'condition': 'host_os == "mac"', }, 'buildtools/third_party/libc++/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'f8279b01085b800724f5c5629dc365b9f040dc53', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '055b2e17ae4f0e2c025ad0c7508b01787df17758', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '982e218cb8a2d11979e241f483fff904468f6057', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '8d21803b9076b16d46c32e2f10da191ee758520c', 'buildtools/third_party/libunwind/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '55ceecab990d33d26ef0566ed59a057852c1d3f2', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'b5a43ecdac82a248f8a700a68c722b4d98708377', 'buildtools/win': { 'packages': [ { @@ -164,13 +161,9 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'beb4f26eb4e6f6f2bf71c8441521aaa950e62052', - 'third_party/android_ndk': { - 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d', - 'condition': 'checkout_android', - }, + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '2f0193d4cf583091f7717b7851270a75a78826e4', 'third_party/android_platform': { - 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'f312145c4191affc66e7a1d46194f0d6c9dec438', + 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'a72ec125fae7ab32bdd76f9de3bb3fa89436ea28', 'condition': 'checkout_android', }, 'third_party/android_sdk/public': { @@ -211,25 +204,35 @@ deps = { 'condition': 'checkout_android', 'dep_type': 'cipd', }, + 'third_party/android_toolchain': { + 'packages': [ + { + 'package': 'chromium/third_party/android_toolchain/android_toolchain', + 'version': 'version:2@r25c.cr1', + }, + ], + 'condition': 'checkout_android', + 'dep_type': 'cipd', + }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'c6222c27ba3f3253c68caaa5b7274f1f7884cd94', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'cef5cf05b2410be6cae210e4ae7de0ab808736c3', 'condition': 'checkout_android', }, + 'third_party/clang-format/script': + Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'f97059df7f8b205064625cdb5f97b56668a125ef', 'third_party/colorama/src': { 'url': Var('chromium_url') + '/external/colorama.git' + '@' + '3de9f013df4b470069d03d250224062e8cf15c49', 'condition': 'checkout_android', }, + 'third_party/cpu_features/src': { + 'url': Var('chromium_url') + '/external/github.com/google/cpu_features.git' + '@' + '936b9ab5515dead115606559502e3864958f7f6e', + 'condition': 'checkout_android', + }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '4d73c057d82ac5994d992f0b1f57ca6d513c3554', - 'third_party/fuchsia-sdk/sdk': { - 'packages': [ - { - 'package': Var('fuchsia_sdk_cipd_prefix') + '${{platform}}', - 'version': Var('fuchsia_version'), - }, - ], + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '3ffad8166e1c233624dcac4e5a12a59944f1231a', + 'third_party/fuchsia-gn-sdk': { + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-gn-sdk.git' + '@' + '0d6902558d92fe3d49ba9a8f638ddea829be595b', 'condition': 'checkout_fuchsia', - 'dep_type': 'cipd', }, 'third_party/google_benchmark/src': { 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'b177433f3ee2513b1075140c723d73ab8901790f', @@ -237,7 +240,7 @@ deps = { 'third_party/googletest/src': Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'a2961dc659b4ae847a9c6120718cc2517ee57d9e', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'e8c3bc9ea97d4423ad0515e5f1c064f486dae8b1', 'third_party/instrumented_libraries': Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '0f536d22dbed454b1254c7e6d7130eab28fba1fa', 'third_party/ittapi': { @@ -273,9 +276,9 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '14dd4c4455602c9b71a1a89b5cafd1f4030d2e3f', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'e6795474e4885ae55b6c6c0a612d0889e2ebd8d9', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '4ee099ac1c0d6e86e53cedfdcfd7cd2d45e126ca', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '573d371dff6d798388c9b400718e4b30ad683830', 'tools/luci-go': { 'packages': [ { @@ -589,6 +592,17 @@ hooks = [ 'action': ['python3', 'build/util/lastchange.py', '-o', 'build/util/LASTCHANGE'], }, + { + 'name': 'Download Fuchsia SDK from GCS', + 'pattern': '.', + 'condition': 'checkout_fuchsia', + 'action': [ + 'python3', + 'build/fuchsia/update_sdk.py', + '--cipd-prefix={fuchsia_sdk_cipd_prefix}', + '--version={fuchsia_version}', + ], + }, { 'name': 'Download Fuchsia system images', 'pattern': '.', diff --git a/v8/gni/v8.gni b/v8/gni/v8.gni index 959afd3f8..6a9f9c661 100644 --- a/v8/gni/v8.gni +++ b/v8/gni/v8.gni @@ -2,6 +2,7 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +import("//build/config/chrome_build.gni") import("//build/config/compiler/pgo/pgo.gni") import("//build/config/gclient_args.gni") import("//build/config/ios/config.gni") @@ -98,8 +99,8 @@ declare_args() { cppgc_is_standalone = false - # Enable object names in cppgc for debug purposes. - cppgc_enable_object_names = false + # Enable object names in cppgc for profiling purposes. + cppgc_enable_object_names = is_chrome_for_testing # Enable young generation in cppgc. cppgc_enable_young_generation = false diff --git a/v8/include/cppgc/internal/api-constants.h b/v8/include/cppgc/internal/api-constants.h index 7c1b1e823..4e2a637e4 100644 --- a/v8/include/cppgc/internal/api-constants.h +++ b/v8/include/cppgc/internal/api-constants.h @@ -40,15 +40,6 @@ constexpr size_t kGuardPageSize = 4096; static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2; -#if defined(CPPGC_CAGED_HEAP) -#if defined(CPPGC_2GB_CAGE) -constexpr size_t kCagedHeapReservationSize = static_cast(2) * kGB; -#else // !defined(CPPGC_2GB_CAGE) -constexpr size_t kCagedHeapReservationSize = static_cast(4) * kGB; -#endif // !defined(CPPGC_2GB_CAGE) -constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize; -#endif // defined(CPPGC_CAGED_HEAP) - #if defined(CPPGC_POINTER_COMPRESSION) #if defined(CPPGC_ENABLE_LARGER_CAGE) constexpr unsigned kPointerCompressionShift = 3; @@ -57,6 +48,26 @@ constexpr unsigned kPointerCompressionShift = 1; #endif // !defined(CPPGC_ENABLE_LARGER_CAGE) #endif // !defined(CPPGC_POINTER_COMPRESSION) +#if defined(CPPGC_CAGED_HEAP) +#if defined(CPPGC_2GB_CAGE) +constexpr size_t kCagedHeapDefaultReservationSize = + static_cast(2) * kGB; +constexpr size_t kCagedHeapMaxReservationSize = + kCagedHeapDefaultReservationSize; +#else // !defined(CPPGC_2GB_CAGE) +constexpr size_t kCagedHeapDefaultReservationSize = + static_cast(4) * kGB; +#if defined(CPPGC_POINTER_COMPRESSION) +constexpr size_t kCagedHeapMaxReservationSize = + size_t{1} << (31 + kPointerCompressionShift); +#else // !defined(CPPGC_POINTER_COMPRESSION) +constexpr size_t kCagedHeapMaxReservationSize = + kCagedHeapDefaultReservationSize; +#endif // !defined(CPPGC_POINTER_COMPRESSION) +#endif // !defined(CPPGC_2GB_CAGE) +constexpr size_t kCagedHeapReservationAlignment = kCagedHeapMaxReservationSize; +#endif // defined(CPPGC_CAGED_HEAP) + static constexpr size_t kDefaultAlignment = sizeof(void*); // Maximum support alignment for a type as in `alignof(T)`. diff --git a/v8/include/cppgc/internal/caged-heap-local-data.h b/v8/include/cppgc/internal/caged-heap-local-data.h index 7d689f87e..1eb87dfb5 100644 --- a/v8/include/cppgc/internal/caged-heap-local-data.h +++ b/v8/include/cppgc/internal/caged-heap-local-data.h @@ -46,7 +46,11 @@ class V8_EXPORT AgeTable final { enum class AdjacentCardsPolicy : uint8_t { kConsider, kIgnore }; static constexpr size_t kCardSizeInBytes = - api_constants::kCagedHeapReservationSize / kRequiredSize; + api_constants::kCagedHeapDefaultReservationSize / kRequiredSize; + + static constexpr size_t CalculateAgeTableSizeForHeapSize(size_t heap_size) { + return heap_size / kCardSizeInBytes; + } void SetAge(uintptr_t cage_offset, Age age) { table_[card(cage_offset)] = age; @@ -81,16 +85,18 @@ class V8_EXPORT AgeTable final { #endif // !V8_HAS_BUILTIN_CTZ static_assert((1 << kGranularityBits) == kCardSizeInBytes); const size_t entry = offset >> kGranularityBits; - CPPGC_DCHECK(table_.size() > entry); + CPPGC_DCHECK(CagedHeapBase::GetAgeTableSize() > entry); return entry; } - std::array table_; +#if defined(V8_CC_GNU) + // gcc disallows flexible arrays in otherwise empty classes. + Age table_[0]; +#else // !defined(V8_CC_GNU) + Age table_[]; +#endif // !defined(V8_CC_GNU) }; -static_assert(sizeof(AgeTable) == 1 * api_constants::kMB, - "Size of AgeTable is 1MB"); - #endif // CPPGC_YOUNG_GENERATION struct CagedHeapLocalData final { @@ -98,6 +104,10 @@ struct CagedHeapLocalData final { return *reinterpret_cast(CagedHeapBase::GetBase()); } + static constexpr size_t CalculateLocalDataSizeForHeapSize(size_t heap_size) { + return AgeTable::CalculateAgeTableSizeForHeapSize(heap_size); + } + #if defined(CPPGC_YOUNG_GENERATION) AgeTable age_table; #endif diff --git a/v8/include/cppgc/internal/caged-heap.h b/v8/include/cppgc/internal/caged-heap.h index 4db42aee0..0c987a954 100644 --- a/v8/include/cppgc/internal/caged-heap.h +++ b/v8/include/cppgc/internal/caged-heap.h @@ -33,24 +33,31 @@ class V8_EXPORT CagedHeapBase { V8_INLINE static bool AreWithinCage(const void* addr1, const void* addr2) { #if defined(CPPGC_2GB_CAGE) - static constexpr size_t kHalfWordShift = sizeof(uint32_t) * CHAR_BIT - 1; + static constexpr size_t kHeapBaseShift = sizeof(uint32_t) * CHAR_BIT - 1; #else //! defined(CPPGC_2GB_CAGE) - static constexpr size_t kHalfWordShift = sizeof(uint32_t) * CHAR_BIT; +#if defined(CPPGC_POINTER_COMPRESSION) + static constexpr size_t kHeapBaseShift = + 31 + api_constants::kPointerCompressionShift; +#else // !defined(CPPGC_POINTER_COMPRESSION) + static constexpr size_t kHeapBaseShift = sizeof(uint32_t) * CHAR_BIT; +#endif // !defined(CPPGC_POINTER_COMPRESSION) #endif //! defined(CPPGC_2GB_CAGE) - static_assert((static_cast(1) << kHalfWordShift) == - api_constants::kCagedHeapReservationSize); + static_assert((static_cast(1) << kHeapBaseShift) == + api_constants::kCagedHeapMaxReservationSize); CPPGC_DCHECK(g_heap_base_); return !(((reinterpret_cast(addr1) ^ g_heap_base_) | (reinterpret_cast(addr2) ^ g_heap_base_)) >> - kHalfWordShift); + kHeapBaseShift); } V8_INLINE static uintptr_t GetBase() { return g_heap_base_; } + V8_INLINE static size_t GetAgeTableSize() { return g_age_table_size_; } private: friend class CagedHeap; static uintptr_t g_heap_base_; + static size_t g_age_table_size_; }; } // namespace internal diff --git a/v8/include/cppgc/internal/gc-info.h b/v8/include/cppgc/internal/gc-info.h index 08ffd411a..c8cb99acb 100644 --- a/v8/include/cppgc/internal/gc-info.h +++ b/v8/include/cppgc/internal/gc-info.h @@ -24,89 +24,90 @@ struct V8_EXPORT EnsureGCInfoIndexTrait final { // Acquires a new GC info object and updates `registered_index` with the index // that identifies that new info accordingly. template - V8_INLINE static void EnsureIndex( + V8_INLINE static GCInfoIndex EnsureIndex( std::atomic& registered_index) { - EnsureGCInfoIndexTraitDispatch{}(registered_index); + return EnsureGCInfoIndexTraitDispatch{}(registered_index); } private: - template ::value, - bool = FinalizerTrait::HasFinalizer(), + template ::HasFinalizer(), bool = NameTrait::HasNonHiddenName()> struct EnsureGCInfoIndexTraitDispatch; - static void V8_PRESERVE_MOST - EnsureGCInfoIndexPolymorphic(std::atomic&, TraceCallback, - FinalizationCallback, NameCallback); - static void V8_PRESERVE_MOST EnsureGCInfoIndexPolymorphic( + static GCInfoIndex V8_PRESERVE_MOST + EnsureGCInfoIndex(std::atomic&, TraceCallback, + FinalizationCallback, NameCallback); + static GCInfoIndex V8_PRESERVE_MOST EnsureGCInfoIndex( std::atomic&, TraceCallback, FinalizationCallback); - static void V8_PRESERVE_MOST EnsureGCInfoIndexPolymorphic( - std::atomic&, TraceCallback, NameCallback); - static void V8_PRESERVE_MOST - EnsureGCInfoIndexPolymorphic(std::atomic&, TraceCallback); - static void V8_PRESERVE_MOST - EnsureGCInfoIndexNonPolymorphic(std::atomic&, TraceCallback, - FinalizationCallback, NameCallback); - static void V8_PRESERVE_MOST EnsureGCInfoIndexNonPolymorphic( - std::atomic&, TraceCallback, FinalizationCallback); - static void V8_PRESERVE_MOST EnsureGCInfoIndexNonPolymorphic( - std::atomic&, TraceCallback, NameCallback); - static void V8_PRESERVE_MOST - EnsureGCInfoIndexNonPolymorphic(std::atomic&, TraceCallback); + static GCInfoIndex V8_PRESERVE_MOST + EnsureGCInfoIndex(std::atomic&, TraceCallback, NameCallback); + static GCInfoIndex V8_PRESERVE_MOST + EnsureGCInfoIndex(std::atomic&, TraceCallback); }; -#define DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function) \ - template \ - struct EnsureGCInfoIndexTrait::EnsureGCInfoIndexTraitDispatch< \ - T, is_polymorphic, has_finalizer, has_non_hidden_name> { \ - V8_INLINE void operator()(std::atomic& registered_index) { \ - function; \ - } \ +#define DISPATCH(has_finalizer, has_non_hidden_name, function) \ + template \ + struct EnsureGCInfoIndexTrait::EnsureGCInfoIndexTraitDispatch< \ + T, has_finalizer, has_non_hidden_name> { \ + V8_INLINE GCInfoIndex \ + operator()(std::atomic& registered_index) { \ + return function; \ + } \ }; -// --------------------------------------------------------------------- // -// DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function) -// --------------------------------------------------------------------- // -DISPATCH(true, true, true, // - EnsureGCInfoIndexPolymorphic(registered_index, // - TraceTrait::Trace, // - FinalizerTrait::kCallback, // - NameTrait::GetName)) // -DISPATCH(true, true, false, // - EnsureGCInfoIndexPolymorphic(registered_index, // - TraceTrait::Trace, // - FinalizerTrait::kCallback)) // -DISPATCH(true, false, true, // - EnsureGCInfoIndexPolymorphic(registered_index, // - TraceTrait::Trace, // - NameTrait::GetName)) // -DISPATCH(true, false, false, // - EnsureGCInfoIndexPolymorphic(registered_index, // - TraceTrait::Trace)) // -DISPATCH(false, true, true, // - EnsureGCInfoIndexNonPolymorphic(registered_index, // - TraceTrait::Trace, // - FinalizerTrait::kCallback, // - NameTrait::GetName)) // -DISPATCH(false, true, false, // - EnsureGCInfoIndexNonPolymorphic(registered_index, // - TraceTrait::Trace, // - FinalizerTrait::kCallback)) // -DISPATCH(false, false, true, // - EnsureGCInfoIndexNonPolymorphic(registered_index, // - TraceTrait::Trace, // - NameTrait::GetName)) // -DISPATCH(false, false, false, // - EnsureGCInfoIndexNonPolymorphic(registered_index, // - TraceTrait::Trace)) // +// ------------------------------------------------------- // +// DISPATCH(has_finalizer, has_non_hidden_name, function) // +// ------------------------------------------------------- // +DISPATCH(true, true, // + EnsureGCInfoIndex(registered_index, // + TraceTrait::Trace, // + FinalizerTrait::kCallback, // + NameTrait::GetName)) // +DISPATCH(true, false, // + EnsureGCInfoIndex(registered_index, // + TraceTrait::Trace, // + FinalizerTrait::kCallback)) // +DISPATCH(false, true, // + EnsureGCInfoIndex(registered_index, // + TraceTrait::Trace, // + NameTrait::GetName)) // +DISPATCH(false, false, // + EnsureGCInfoIndex(registered_index, // + TraceTrait::Trace)) // #undef DISPATCH +// Trait determines how the garbage collector treats objects wrt. to traversing, +// finalization, and naming. +template +struct GCInfoTrait final { + V8_INLINE static GCInfoIndex Index() { + static_assert(sizeof(T), "T must be fully defined"); + static std::atomic + registered_index; // Uses zero initialization. + GCInfoIndex index = registered_index.load(std::memory_order_acquire); + if (V8_UNLIKELY(!index)) { + index = EnsureGCInfoIndexTrait::EnsureIndex(registered_index); + CPPGC_DCHECK(index != 0); + CPPGC_DCHECK(index == registered_index.load(std::memory_order_acquire)); + } + return index; + } + + static constexpr bool CheckCallbacksAreDefined() { + // No USE() macro available. + (void)static_cast(TraceTrait::Trace); + (void)static_cast(FinalizerTrait::kCallback); + (void)static_cast(NameTrait::GetName); + return true; + } +}; + // Fold types based on finalizer behavior. Note that finalizer characteristics // align with trace behavior, i.e., destructors are virtual when trace methods // are and vice versa. template -struct GCInfoFolding { +struct GCInfoFolding final { static constexpr bool kHasVirtualDestructorAtBase = std::has_virtual_destructor::value; static constexpr bool kBothTypesAreTriviallyDestructible = @@ -121,34 +122,24 @@ struct GCInfoFolding { static constexpr bool kWantsDetailedObjectNames = false; #endif // !CPPGC_SUPPORTS_OBJECT_NAMES - // Folding would regresses name resolution when deriving names from C++ - // class names as it would just folds a name to the base class name. - using ResultType = std::conditional_t<(kHasVirtualDestructorAtBase || - kBothTypesAreTriviallyDestructible || - kHasCustomFinalizerDispatchAtBase) && - !kWantsDetailedObjectNames, - ParentMostGarbageCollectedType, T>; -}; + // Always true. Forces the compiler to resolve callbacks which ensures that + // both modes don't break without requiring compiling a separate + // configuration. Only a single GCInfo (for `ResultType` below) will actually + // be instantiated but existence (and well-formedness) of all callbacks is + // checked. + static constexpr bool kCheckTypeGuardAlwaysTrue = + GCInfoTrait::CheckCallbacksAreDefined() && + GCInfoTrait::CheckCallbacksAreDefined(); -// Trait determines how the garbage collector treats objects wrt. to traversing, -// finalization, and naming. -template -struct GCInfoTrait final { - V8_INLINE static GCInfoIndex Index() { - static_assert(sizeof(T), "T must be fully defined"); - static std::atomic - registered_index; // Uses zero initialization. - GCInfoIndex index = registered_index.load(std::memory_order_acquire); - if (V8_UNLIKELY(!index)) { - EnsureGCInfoIndexTrait::EnsureIndex(registered_index); - // Slow path call uses V8_PRESERVE_MOST which does not support return - // values (also preserves RAX). Avoid out parameter by just reloading the - // value here which at this point is guaranteed to be set. - index = registered_index.load(std::memory_order_acquire); - CPPGC_DCHECK(index != 0); - } - return index; - } + // Folding would regress name resolution when deriving names from C++ + // class names as it would just folds a name to the base class name. + using ResultType = + std::conditional_t; }; } // namespace internal diff --git a/v8/include/cppgc/member.h b/v8/include/cppgc/member.h index b6382a023..457f163bc 100644 --- a/v8/include/cppgc/member.h +++ b/v8/include/cppgc/member.h @@ -597,8 +597,33 @@ using UncompressedMember = internal::BasicMember< T, internal::StrongMemberTag, internal::DijkstraWriteBarrierPolicy, internal::DefaultMemberCheckingPolicy, internal::RawPointer>; +#if defined(CPPGC_POINTER_COMPRESSION) +/** + * CompressedMember. Default implementation of cppgc::Member on builds with + * pointer compression. + */ +template +using CompressedMember = internal::BasicMember< + T, internal::StrongMemberTag, internal::DijkstraWriteBarrierPolicy, + internal::DefaultMemberCheckingPolicy, internal::CompressedPointer>; +#endif // defined(CPPGC_POINTER_COMPRESSION) + } // namespace subtle +namespace internal { + +struct Dummy; + +static constexpr size_t kSizeOfMember = sizeof(Member); +static constexpr size_t kSizeOfUncompressedMember = + sizeof(subtle::UncompressedMember); +#if defined(CPPGC_POINTER_COMPRESSION) +static constexpr size_t kSizeofCompressedMember = + sizeof(subtle::CompressedMember); +#endif // defined(CPPGC_POINTER_COMPRESSION) + +} // namespace internal + } // namespace cppgc #endif // INCLUDE_CPPGC_MEMBER_H_ diff --git a/v8/include/cppgc/platform.h b/v8/include/cppgc/platform.h index 5a0a40ec8..ae96579dd 100644 --- a/v8/include/cppgc/platform.h +++ b/v8/include/cppgc/platform.h @@ -136,8 +136,13 @@ class V8_EXPORT Platform { * \param page_allocator The allocator used for maintaining meta data. Must stay * always alive and not change between multiple calls to InitializeProcess. If * no allocator is provided, a default internal version will be used. + * \param desired_heap_size Desired amount of virtual address space to reserve + * for the heap, in bytes. Actual size will be clamped to minimum and maximum + * values based on compile-time settings and may be rounded up. If this + * parameter is zero, a default value will be used. */ -V8_EXPORT void InitializeProcess(PageAllocator* page_allocator = nullptr); +V8_EXPORT void InitializeProcess(PageAllocator* page_allocator = nullptr, + size_t desired_heap_size = 0); /** * Must be called after destroying the last used heap. Some process-global diff --git a/v8/include/cppgc/trace-trait.h b/v8/include/cppgc/trace-trait.h index 694fbfdcc..5fc863d2e 100644 --- a/v8/include/cppgc/trace-trait.h +++ b/v8/include/cppgc/trace-trait.h @@ -53,6 +53,14 @@ struct TraceDescriptor { TraceCallback callback; }; +/** + * Callback for getting a TraceDescriptor for a given address. + * + * \param address Possibly inner address of an object. + * \returns a TraceDescriptor for the provided address. + */ +using TraceDescriptorCallback = TraceDescriptor (*)(const void* address); + namespace internal { struct V8_EXPORT TraceTraitFromInnerAddressImpl { diff --git a/v8/include/cppgc/visitor.h b/v8/include/cppgc/visitor.h index 9f43ad5e9..1d6b39a14 100644 --- a/v8/include/cppgc/visitor.h +++ b/v8/include/cppgc/visitor.h @@ -5,10 +5,13 @@ #ifndef INCLUDE_CPPGC_VISITOR_H_ #define INCLUDE_CPPGC_VISITOR_H_ +#include + #include "cppgc/custom-space.h" #include "cppgc/ephemeron-pair.h" #include "cppgc/garbage-collected.h" #include "cppgc/internal/logging.h" +#include "cppgc/internal/member-storage.h" #include "cppgc/internal/pointer-policies.h" #include "cppgc/liveness-broker.h" #include "cppgc/member.h" @@ -113,6 +116,30 @@ class V8_EXPORT Visitor { } #endif // defined(CPPGC_POINTER_COMPRESSION) + template + void TraceMultiple(const subtle::UncompressedMember* start, size_t len) { + static_assert(sizeof(T), "Pointee type must be fully defined."); + static_assert(internal::IsGarbageCollectedOrMixinType::value, + "T must be GarbageCollected or GarbageCollectedMixin type"); + VisitMultipleUncompressedMember(start, len, + &TraceTrait::GetTraceDescriptor); + } + + template , subtle::UncompressedMember>>* = nullptr> + void TraceMultiple(const Member* start, size_t len) { + static_assert(sizeof(T), "Pointee type must be fully defined."); + static_assert(internal::IsGarbageCollectedOrMixinType::value, + "T must be GarbageCollected or GarbageCollectedMixin type"); +#if defined(CPPGC_POINTER_COMPRESSION) + static_assert(std::is_same_v, subtle::CompressedMember>, + "Member and CompressedMember must be the same."); + VisitMultipleCompressedMember(start, len, + &TraceTrait::GetTraceDescriptor); +#endif // defined(CPPGC_POINTER_COMPRESSION) + } + /** * Trace method for inlined objects that are not allocated themselves but * otherwise follow managed heap layout and have a Trace() method. @@ -131,6 +158,26 @@ class V8_EXPORT Visitor { TraceTrait::Trace(this, &object); } + template + void TraceMultiple(const T* start, size_t len) { +#if V8_ENABLE_CHECKS + // This object is embedded in potentially multiple nested objects. The + // outermost object must not be in construction as such objects are (a) not + // processed immediately, and (b) only processed conservatively if not + // otherwise possible. + CheckObjectNotInConstruction(start); +#endif // V8_ENABLE_CHECKS + for (size_t i = 0; i < len; ++i) { + const T* object = &start[i]; + if constexpr (std::is_polymorphic_v) { + // The object's vtable may be uninitialized in which case the object is + // not traced. + if (*reinterpret_cast(object) == 0) continue; + } + TraceTrait::Trace(this, object); + } + } + /** * Registers a weak callback method on the object of type T. See * LivenessBroker for an usage example. @@ -314,6 +361,39 @@ class V8_EXPORT Visitor { WeakCallback callback, const void* data) {} virtual void HandleMovableReference(const void**) {} + virtual void VisitMultipleUncompressedMember( + const void* start, size_t len, + TraceDescriptorCallback get_trace_descriptor) { + // Default implementation merely delegates to Visit(). + const char* it = static_cast(start); + const char* end = it + len * internal::kSizeOfUncompressedMember; + for (; it < end; it += internal::kSizeOfUncompressedMember) { + const auto* current = reinterpret_cast(it); + const void* object = current->LoadAtomic(); + if (!object) continue; + + Visit(object, get_trace_descriptor(object)); + } + } + +#if defined(CPPGC_POINTER_COMPRESSION) + virtual void VisitMultipleCompressedMember( + const void* start, size_t len, + TraceDescriptorCallback get_trace_descriptor) { + // Default implementation merely delegates to Visit(). + const char* it = static_cast(start); + const char* end = it + len * internal::kSizeofCompressedMember; + for (; it < end; it += internal::kSizeofCompressedMember) { + const auto* current = + reinterpret_cast(it); + const void* object = current->LoadAtomic(); + if (!object) continue; + + Visit(object, get_trace_descriptor(object)); + } + } +#endif // defined(CPPGC_POINTER_COMPRESSION) + private: template static void WeakCallbackMethodDelegate(const LivenessBroker& info, diff --git a/v8/include/js_protocol.pdl b/v8/include/js_protocol.pdl index 0dbdc01d3..7a3c772cb 100644 --- a/v8/include/js_protocol.pdl +++ b/v8/include/js_protocol.pdl @@ -1443,7 +1443,7 @@ domain Runtime # resulting `objectId` is still provided. deprecated optional boolean generateWebDriverValue # Specifies the result serialization. If provided, overrides - # `returnByValue` and `generateWebDriverValue`. + # `generatePreview`, `returnByValue` and `generateWebDriverValue`. experimental optional SerializationOptions serializationOptions returns @@ -1538,7 +1538,7 @@ domain Runtime # resulting `objectId` is still provided. deprecated optional boolean generateWebDriverValue # Specifies the result serialization. If provided, overrides - # `returnByValue` and `generateWebDriverValue`. + # `generatePreview`, `returnByValue` and `generateWebDriverValue`. experimental optional SerializationOptions serializationOptions returns # Evaluation result. diff --git a/v8/include/v8-handle-base.h b/v8/include/v8-handle-base.h index 113ffa2da..da19db77d 100644 --- a/v8/include/v8-handle-base.h +++ b/v8/include/v8-handle-base.h @@ -49,11 +49,6 @@ class ValueHelper final { return *reinterpret_cast(slot); } - template - V8_INLINE static T* ValueAsSlot(T* const& value) { - return reinterpret_cast(const_cast(&value)); - } - #else // !V8_ENABLE_DIRECT_LOCAL template @@ -66,11 +61,6 @@ class ValueHelper final { return reinterpret_cast(slot); } - template - V8_INLINE static T* ValueAsSlot(T* const& value) { - return value; - } - #endif // V8_ENABLE_DIRECT_LOCAL }; diff --git a/v8/include/v8-internal.h b/v8/include/v8-internal.h index f4b9c13ab..f59eaea3c 100644 --- a/v8/include/v8-internal.h +++ b/v8/include/v8-internal.h @@ -12,7 +12,6 @@ #include #include -#include "v8-version.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) namespace v8 { @@ -80,7 +79,7 @@ struct SmiTagging<4> { static_cast(kUintptrAllBitsSet << (kSmiValueSize - 1)); static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1); - V8_INLINE static int SmiToInt(Address value) { + V8_INLINE static constexpr int SmiToInt(Address value) { int shift_bits = kSmiTagSize + kSmiShiftSize; // Truncate and shift down (requires >> to be sign extending). return static_cast(static_cast(value)) >> shift_bits; @@ -105,7 +104,7 @@ struct SmiTagging<8> { static_cast(kUintptrAllBitsSet << (kSmiValueSize - 1)); static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1); - V8_INLINE static int SmiToInt(Address value) { + V8_INLINE static constexpr int SmiToInt(Address value) { int shift_bits = kSmiTagSize + kSmiShiftSize; // Shift down and throw away top 32 bits. return static_cast(static_cast(value) >> shift_bits); @@ -259,8 +258,10 @@ static const uint32_t kExternalPointerIndexShift = 5; #endif // V8_TARGET_OS_ANDROID // The maximum number of entries in an external pointer table. +static const int kExternalPointerTableEntrySize = 8; +static const int kExternalPointerTableEntrySizeLog2 = 3; static const size_t kMaxExternalPointers = - kExternalPointerTableReservationSize / kApiSystemPointerSize; + kExternalPointerTableReservationSize / kExternalPointerTableEntrySize; static_assert((1 << (32 - kExternalPointerIndexShift)) == kMaxExternalPointers, "kExternalPointerTableReservationSize and " "kExternalPointerIndexShift don't match"); @@ -281,15 +282,18 @@ static const size_t kMaxExternalPointers = 0; // that it is smaller than the size of the table. using ExternalPointerHandle = uint32_t; -// ExternalPointers point to objects located outside the sandbox. When -// sandboxed external pointers are enabled, these are stored on heap as -// ExternalPointerHandles, otherwise they are simply raw pointers. +// ExternalPointers point to objects located outside the sandbox. When the V8 +// sandbox is enabled, these are stored on heap as ExternalPointerHandles, +// otherwise they are simply raw pointers. #ifdef V8_ENABLE_SANDBOX using ExternalPointer_t = ExternalPointerHandle; #else using ExternalPointer_t = Address; #endif +constexpr ExternalPointer_t kNullExternalPointer = 0; +constexpr ExternalPointerHandle kNullExternalPointerHandle = 0; + // When the sandbox is enabled, external pointers are stored in an external // pointer table and are referenced from HeapObjects through an index (a // "handle"). When stored in the table, the pointers are tagged with per-type @@ -471,6 +475,42 @@ PER_ISOLATE_EXTERNAL_POINTER_TAGS(CHECK_NON_SHARED_EXTERNAL_POINTER_TAGS) #undef SHARED_EXTERNAL_POINTER_TAGS #undef EXTERNAL_POINTER_TAGS +// A handle to a code pointer stored in a code pointer table. +using CodePointerHandle = uint32_t; + +// CodePointers point to machine code (JIT or AOT compiled). When +// the V8 sandbox is enabled, these are stored as CodePointerHandles on the heap +// (i.e. as index into a code pointer table). Otherwise, they are simply raw +// pointers. +#ifdef V8_CODE_POINTER_SANDBOXING +using CodePointer_t = CodePointerHandle; +#else +using CodePointer_t = Address; +#endif + +constexpr CodePointerHandle kNullCodePointerHandle = 0; + +// The size of the virtual memory reservation for code pointer table. +// This determines the maximum number of entries in a table. Using a maximum +// size allows omitting bounds checks on table accesses if the indices are +// guaranteed (e.g. through shifting) to be below the maximum index. This +// value must be a power of two. +static const size_t kCodePointerTableReservationSize = 512 * MB; + +// The code pointer table indices stored in HeapObjects as external +// pointers are shifted to the left by this amount to guarantee that they are +// smaller than the maximum table size. +static const uint32_t kCodePointerIndexShift = 6; + +// The maximum number of entries in an external pointer table. +static const int kCodePointerTableEntrySize = 8; +static const int kCodePointerTableEntrySizeLog2 = 3; +static const size_t kMaxCodePointers = + kCodePointerTableReservationSize / kCodePointerTableEntrySize; +static_assert( + (1 << (32 - kCodePointerIndexShift)) == kMaxCodePointers, + "kCodePointerTableReservationSize and kCodePointerIndexShift don't match"); + // {obj} must be the raw tagged pointer representation of a HeapObject // that's guaranteed to never be in ReadOnlySpace. V8_EXPORT internal::Isolate* IsolateFromNeverReadOnlySpaceObject(Address obj); @@ -647,11 +687,11 @@ class Internals { #endif } - V8_INLINE static bool HasHeapObjectTag(Address value) { + V8_INLINE static constexpr bool HasHeapObjectTag(Address value) { return (value & kHeapObjectTagMask) == static_cast
(kHeapObjectTag); } - V8_INLINE static int SmiValue(Address value) { + V8_INLINE static constexpr int SmiValue(Address value) { return PlatformSmiTagging::SmiToInt(value); } diff --git a/v8/include/v8-script.h b/v8/include/v8-script.h index 33825921d..bca78970b 100644 --- a/v8/include/v8-script.h +++ b/v8/include/v8-script.h @@ -651,7 +651,9 @@ class V8_EXPORT ScriptCompiler { static ScriptStreamingTask* StartStreaming( Isolate* isolate, StreamedSource* source, ScriptType type = ScriptType::kClassic, - CompileOptions options = kNoCompileOptions); + CompileOptions options = kNoCompileOptions, + CompileHintCallback compile_hint_callback = nullptr, + void* compile_hint_callback_data = nullptr); static ConsumeCodeCacheTask* StartConsumingCodeCache( Isolate* isolate, std::unique_ptr source); diff --git a/v8/include/v8-util.h b/v8/include/v8-util.h index f6d9073de..c845c9924 100644 --- a/v8/include/v8-util.h +++ b/v8/include/v8-util.h @@ -302,9 +302,9 @@ class PersistentValueMapBase { } static PersistentContainerValue ClearAndLeak(Global* persistent) { - V* v = persistent->template value(); + internal::Address* address = persistent->slot(); persistent->Clear(); - return reinterpret_cast(v); + return reinterpret_cast(address); } static PersistentContainerValue Leak(Global* persistent) { diff --git a/v8/include/v8-version.h b/v8/include/v8-version.h index cac0e9f9b..cbb46079b 100644 --- a/v8/include/v8-version.h +++ b/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 11 -#define V8_MINOR_VERSION 5 -#define V8_BUILD_NUMBER 150 -#define V8_PATCH_LEVEL 2 +#define V8_MINOR_VERSION 6 +#define V8_BUILD_NUMBER 189 +#define V8_PATCH_LEVEL 4 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/v8/include/v8config.h b/v8/include/v8config.h index c1729b60d..3c203332f 100644 --- a/v8/include/v8config.h +++ b/v8/include/v8config.h @@ -529,9 +529,6 @@ path. Add it with -I to the command line // A macro used to change the calling conventions to preserve all registers (no // caller-saved registers). Use this for cold functions called from hot // functions. -// Note: The attribute is considered experimental, so apply with care. Also, -// "preserve_most" is currently not handling the return value correctly, so only -// use it for functions returning void (see https://reviews.llvm.org/D141020). // Use like: // V8_NOINLINE V8_PRESERVE_MOST void UnlikelyMethod(); #if V8_HAS_ATTRIBUTE_PRESERVE_MOST @@ -761,7 +758,7 @@ V8 shared library set USING_V8_SHARED. #elif defined(__mips64) #define V8_HOST_ARCH_MIPS64 1 #define V8_HOST_ARCH_64_BIT 1 -#elif defined(__loongarch64) +#elif defined(__loongarch_lp64) #define V8_HOST_ARCH_LOONG64 1 #define V8_HOST_ARCH_64_BIT 1 #elif defined(__PPC64__) || defined(_ARCH_PPC64) @@ -811,7 +808,7 @@ V8 shared library set USING_V8_SHARED. #define V8_TARGET_ARCH_ARM 1 #elif defined(__mips64) #define V8_TARGET_ARCH_MIPS64 1 -#elif defined(__loongarch64) +#elif defined(__loongarch_lp64) #define V8_TARGET_ARCH_LOONG64 1 #elif defined(_ARCH_PPC64) #define V8_TARGET_ARCH_PPC64 1 diff --git a/v8/infra/mb/mb_config.pyl b/v8/infra/mb/mb_config.pyl index 8acd2665c..836eb4b13 100644 --- a/v8/infra/mb/mb_config.pyl +++ b/v8/infra/mb/mb_config.pyl @@ -182,17 +182,26 @@ 'release_x64_asan_symbolized_expose_memory_corruption', }, 'client.v8.perf' : { + # Arm 'V8 Arm - builder - perf': 'official_arm', 'V8 Arm - builder - pgo - perf': 'official_arm_pgo', + # Android Arm 'V8 Android Arm - builder - perf': 'official_android_arm', + 'v8_android_arm_compile_perf_try': 'official_android_arm', 'V8 Android Arm - builder - pgo - perf': 'official_android_arm_pgo', 'V8 Android Arm64 - builder - perf': 'official_android_arm64', + 'v8_android_arm64_compile_perf_try': 'official_android_arm64', 'V8 Android Arm64 - builder - pgo - perf': 'official_android_arm64_pgo', + # Linux 'V8 Linux - builder - perf': 'official_x86', + 'v8_linux_compile_perf_try': 'official_x86', 'V8 Linux - builder - pgo - perf': 'official_x86_pgo', 'V8 Linux64 - builder - perf': 'official_x64', + 'v8_linux64_compile_perf_try': 'official_x64', 'V8 Linux64 - builder - pgo - perf': 'official_x64_pgo', + # Mac Arm 'V8 Mac Arm64 - builder - perf': 'official_mac_arm64', + 'v8_mac_arm64_compile_perf_try': 'official_mac_arm64', 'V8 Mac Arm64 - builder - pgo - perf': 'official_mac_arm64_pgo', }, 'client.v8.ports': { diff --git a/v8/infra/testing/builders.pyl b/v8/infra/testing/builders.pyl index f1695e64f..29adba047 100644 --- a/v8/infra/testing/builders.pyl +++ b/v8/infra/testing/builders.pyl @@ -48,7 +48,7 @@ # Fuchsia 'v8_fuchsia_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'fuchsia-unittests'}, @@ -56,7 +56,7 @@ }, 'V8 Fuchsia': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'fuchsia-unittests'}, @@ -67,7 +67,7 @@ 'v8_linux_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -112,7 +112,7 @@ }, 'v8_linux_gc_stress_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mjsunit', 'variant': 'slow_path', 'test_args': ['--gc-stress'], 'shards': 2}, @@ -122,7 +122,7 @@ 'v8_linux_nodcheck_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -146,7 +146,7 @@ }, 'v8_linux_noi18n_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, @@ -157,7 +157,7 @@ 'v8_linux_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -186,7 +186,7 @@ 'v8_linux_optional_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ # Code serializer. @@ -292,7 +292,7 @@ }, 'v8_linux_verify_csa_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -302,7 +302,7 @@ # Linux32 with arm simulators 'v8_linux_arm_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 3}, @@ -314,7 +314,7 @@ }, 'v8_linux_arm_lite_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -322,7 +322,7 @@ }, 'v8_linux_arm_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 2}, @@ -357,7 +357,7 @@ }, 'v8_linux64_asan_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'test262', 'shards': 12}, @@ -368,7 +368,7 @@ }, 'v8_linux64_cfi_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -380,7 +380,7 @@ }, 'v8_linux64_coverage_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default'}, @@ -389,7 +389,7 @@ }, 'v8_linux64_coverage_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default'}, @@ -399,7 +399,7 @@ 'v8_linux64_cppgc_non_default_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -407,7 +407,7 @@ }, 'v8_linux64_css_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 5}, @@ -416,7 +416,7 @@ 'v8_linux64_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks', 'shards': 2}, @@ -450,7 +450,7 @@ 'v8_linux64_dict_tracking_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -458,7 +458,7 @@ }, 'v8_linux64_disable_runtime_call_stats_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing'}, @@ -467,7 +467,7 @@ 'v8_linux64_external_code_space_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -475,14 +475,14 @@ }, 'v8_linux64_fuzzilli_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, # TODO(almuthanna): Add a new test config for the fuzzilli suite. 'tests': [], }, 'v8_linux64_fyi_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ # Infra staging. @@ -501,7 +501,7 @@ }, 'v8_linux64_gc_stress_custom_snapshot_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ { @@ -514,7 +514,7 @@ 'v8_linux64_gc_stress_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 5}, @@ -549,7 +549,7 @@ 'v8_linux64_minor_mc_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'minor_mc'}, @@ -557,11 +557,16 @@ {'name': 'mozilla', 'variant': 'minor_mc'}, {'name': 'test262', 'variant': 'minor_mc', 'shards': 2}, {'name': 'mjsunit', 'variant': 'minor_mc'}, + {'name': 'v8testing', 'variant': 'concurrent_minor_mc'}, + {'name': 'benchmarks', 'variant': 'concurrent_minor_mc'}, + {'name': 'mozilla', 'variant': 'concurrent_minor_mc'}, + {'name': 'test262', 'variant': 'concurrent_minor_mc', 'shards': 2}, + {'name': 'mjsunit', 'variant': 'concurrent_minor_mc'}, ], }, 'v8_linux64_msan_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'test262', 'variant': 'default', 'shards': 4}, @@ -571,7 +576,7 @@ 'v8_linux64_nodcheck_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -600,7 +605,7 @@ }, 'v8_linux64_perfetto_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -608,7 +613,7 @@ }, 'v8_linux64_no_pointer_compression_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -617,7 +622,7 @@ 'v8_linux64_no_sandbox_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 5}, @@ -626,7 +631,7 @@ 'v8_linux64_official_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -634,7 +639,7 @@ }, 'v8_linux64_single_generation_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -643,7 +648,7 @@ 'v8_linux64_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ # TODO(machenbach): Add benchmarks. @@ -683,7 +688,7 @@ 'v8_linux64_predictable_rel': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -694,7 +699,7 @@ 'v8_linux64_no_sandbox_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -702,7 +707,7 @@ }, 'v8_linux64_tsan_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks', 'shards': 2}, @@ -716,7 +721,7 @@ }, 'v8_linux64_tsan_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks', 'shards': 2}, @@ -728,7 +733,7 @@ }, 'v8_linux64_tsan_no_cm_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ { @@ -746,7 +751,7 @@ }, 'v8_linux64_tsan_isolates_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7}, @@ -754,7 +759,7 @@ }, 'v8_linux64_ubsan_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -762,7 +767,7 @@ }, 'v8_linux64_verify_csa_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -772,7 +777,7 @@ # Linux64 with arm64 simulators 'v8_linux_arm64_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 2}, @@ -784,7 +789,7 @@ }, 'v8_linux_arm64_gc_stress_dbg': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 12}, @@ -792,7 +797,7 @@ }, 'v8_linux_arm64_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 4}, @@ -805,7 +810,7 @@ }, 'v8_linux_arm64_cfi_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'test262', 'variant': 'default', 'shards': 3}, @@ -814,7 +819,7 @@ }, 'v8_linux64_arm64_no_pointer_compression_rel': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -824,7 +829,7 @@ # Linux64 with Loongson simulators 'v8_linux64_loong64_rel': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -834,7 +839,7 @@ # Linux with RISC-V simulators 'v8_linux_riscv32_rel': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -842,7 +847,7 @@ }, 'v8_linux64_riscv64_rel': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -941,7 +946,7 @@ 'v8_mac64_asan_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ {'name': 'v8testing', 'shards': 8}, @@ -950,7 +955,7 @@ 'v8_mac64_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ {'name': 'mozilla'}, @@ -963,7 +968,7 @@ 'v8_mac64_gc_stress_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6}, @@ -972,7 +977,7 @@ 'v8_mac64_noopt_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ {'name': 'v8testing', 'shards': 6}, @@ -981,7 +986,7 @@ 'v8_mac64_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ {'name': 'mozilla'}, @@ -994,7 +999,7 @@ 'v8_mac_arm64_rel': { 'swarming_dimensions' : { 'cpu': 'arm64', - 'os': 'Mac-11', + 'os': 'Mac-13', 'pool': 'chromium.tests', }, 'tests': [ @@ -1010,7 +1015,7 @@ 'v8_mac_arm64_dbg': { 'swarming_dimensions' : { 'cpu': 'arm64', - 'os': 'Mac-11', + 'os': 'Mac-13', 'pool': 'chromium.tests', }, 'tests': [ @@ -1026,7 +1031,7 @@ 'v8_mac_arm64_full_dbg': { 'swarming_dimensions' : { 'cpu': 'arm64', - 'os': 'Mac-11', + 'os': 'Mac-13', 'pool': 'chromium.tests', }, 'tests': [ @@ -1042,7 +1047,7 @@ 'v8_mac_arm64_no_pointer_compression_dbg': { 'swarming_dimensions' : { 'cpu': 'arm64', - 'os': 'Mac-11', + 'os': 'Mac-13', 'pool': 'chromium.tests', }, 'tests': [ @@ -1052,30 +1057,30 @@ 'v8_mac_arm64_sim_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ - {'name': 'v8testing', 'shards': 8}, + {'name': 'v8testing', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, 'v8_mac_arm64_sim_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ - {'name': 'v8testing', 'shards': 8}, + {'name': 'v8testing', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, 'v8_mac_arm64_sim_nodcheck_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ - {'name': 'v8testing', 'shards': 8}, + {'name': 'v8testing', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, @@ -1085,7 +1090,7 @@ # Main. 'V8 Fuzzer': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1099,7 +1104,7 @@ 'V8 Linux': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1171,7 +1176,7 @@ }, 'V8 Linux - arm64 - sim - CFI': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'test262', 'variant': 'default', 'shards': 3}, @@ -1180,7 +1185,7 @@ }, 'V8 Linux - arm64 - sim - MSAN': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'test262', 'variant': 'default', 'shards': 4}, @@ -1190,7 +1195,7 @@ 'V8 Linux - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1293,7 +1298,7 @@ }, 'V8 Linux - full debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 6}, @@ -1301,7 +1306,7 @@ }, 'V8 Linux - gc stress': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ { @@ -1319,7 +1324,7 @@ }, 'V8 Linux - noi18n - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, @@ -1330,7 +1335,7 @@ 'V8 Linux64 - predictable': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1340,7 +1345,7 @@ }, 'V8 Linux - shared': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1350,7 +1355,7 @@ }, 'V8 Linux - verify csa': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1358,7 +1363,7 @@ }, 'V8 Linux PGO instrumentation - builder' : { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'pgo_instrumentation'} @@ -1367,7 +1372,7 @@ 'V8 Linux64': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1432,7 +1437,7 @@ 'V8 Linux64 - official': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -1440,7 +1445,7 @@ }, 'V8 Linux64 - cfi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1452,7 +1457,7 @@ }, 'V8 Linux64 - coverage': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default'}, @@ -1461,7 +1466,7 @@ }, 'V8 Linux64 - coverage - debug': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default'}, @@ -1470,7 +1475,7 @@ }, 'V8 Linux64 - custom snapshot - debug': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mjsunit', 'test_args': ['--no-harness']}, @@ -1479,7 +1484,7 @@ 'V8 Linux64 - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1527,7 +1532,7 @@ 'V8 Linux64 - minor mc - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'minor_mc'}, @@ -1535,11 +1540,16 @@ {'name': 'mozilla', 'variant': 'minor_mc'}, {'name': 'test262', 'variant': 'minor_mc', 'shards': 2}, {'name': 'mjsunit', 'variant': 'minor_mc'}, + {'name': 'v8testing', 'variant': 'concurrent_minor_mc'}, + {'name': 'benchmarks', 'variant': 'concurrent_minor_mc'}, + {'name': 'mozilla', 'variant': 'concurrent_minor_mc'}, + {'name': 'test262', 'variant': 'concurrent_minor_mc', 'shards': 2}, + {'name': 'mjsunit', 'variant': 'concurrent_minor_mc'}, ], }, 'V8 Linux64 - disable runtime call stats': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1547,7 +1557,7 @@ }, 'V8 Linux64 - debug - fyi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ # Infra staging. @@ -1567,7 +1577,7 @@ 'V8 Linux64 - cppgc-non-default - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -1575,7 +1585,7 @@ }, 'V8 Linux64 - debug - perfetto': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1588,7 +1598,7 @@ }, 'V8 Linux64 - debug - single generation': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1602,7 +1612,7 @@ 'V8 Linux64 - external code space - debug': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -1610,7 +1620,7 @@ }, 'V8 Linux64 - fyi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ # Infra staging. @@ -1638,7 +1648,7 @@ 'V8 Linux64 - gc stress': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ { @@ -1668,7 +1678,7 @@ }, 'V8 Linux64 - internal snapshot': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1676,7 +1686,7 @@ }, 'V8 Linux64 - no pointer compression': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -1685,7 +1695,7 @@ 'V8 Linux64 - no sandbox': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1694,7 +1704,7 @@ 'V8 Linux64 - no sandbox - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -1702,7 +1712,7 @@ }, 'V8 Linux64 - shared': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1712,7 +1722,7 @@ }, 'V8 Linux64 - verify csa': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1720,7 +1730,7 @@ }, 'V8 Linux64 ASAN': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'test262', 'shards': 7}, @@ -1731,7 +1741,7 @@ }, 'V8 Linux64 css - debug': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'shards': 5}, @@ -1739,7 +1749,7 @@ }, 'V8 Linux64 GC Stress - custom snapshot': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ { @@ -1751,7 +1761,7 @@ }, 'V8 Linux64 PGO instrumentation - builder' : { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'pgo_instrumentation'} @@ -1759,7 +1769,7 @@ }, 'V8 Linux64 TSAN': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks', 'shards': 2}, @@ -1773,7 +1783,7 @@ }, 'V8 Linux64 TSAN - debug': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'benchmarks', 'shards': 2}, @@ -1785,7 +1795,7 @@ }, 'V8 Linux64 TSAN - stress-incremental-marking': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1818,7 +1828,7 @@ }, 'V8 Linux64 TSAN - isolates': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7}, @@ -1826,7 +1836,7 @@ }, 'V8 Linux64 TSAN - no-concurrent-marking': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ { @@ -1844,7 +1854,7 @@ }, 'V8 Linux64 UBSan': { 'swarming_dimensions' : { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1856,7 +1866,7 @@ 'V8 Mac64': { 'swarming_dimensions': { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ {'name': 'mozilla'}, @@ -1869,7 +1879,7 @@ 'V8 Mac64 - debug': { 'swarming_dimensions': { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ {'name': 'mozilla'}, @@ -1882,7 +1892,7 @@ 'V8 Mac64 ASAN': { 'swarming_dimensions': { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ {'name': 'v8testing', 'shards': 10}, @@ -1891,7 +1901,7 @@ 'V8 Mac64 GC Stress': { 'swarming_dimensions': { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'tests': [ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6}, @@ -1899,7 +1909,7 @@ }, 'V8 Mac - arm64 - release': { 'swarming_dimensions' : { - 'os': 'Mac-11', + 'os': 'Mac-13', 'cpu': 'arm64', 'pool': 'chromium.tests', }, @@ -1921,7 +1931,7 @@ }, 'V8 Mac - arm64 - debug': { 'swarming_dimensions' : { - 'os': 'Mac-11', + 'os': 'Mac-13', 'cpu': 'arm64', 'pool': 'chromium.tests', }, @@ -1944,7 +1954,7 @@ 'V8 Mac - arm64 - no pointer compression debug': { 'swarming_dimensions' : { 'cpu': 'arm64', - 'os': 'Mac-11', + 'os': 'Mac-13', 'pool': 'chromium.tests', }, 'tests': [ @@ -1954,7 +1964,7 @@ 'V8 Mac - arm64 - sim - debug': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1962,14 +1972,14 @@ 'priority': 35, }, 'tests': [ - {'name': 'v8testing', 'shards': 10}, + {'name': 'v8testing', 'variant': 'default', 'shards': 4}, {'name': 'v8testing', 'variant': 'future', 'shards': 4}, ], }, 'V8 Mac - arm64 - sim - release': { 'swarming_dimensions' : { 'cpu': 'x86-64', - 'os': 'Mac-12', + 'os': 'Mac-13', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1977,7 +1987,7 @@ 'priority': 35, }, 'tests': [ - {'name': 'v8testing', 'shards': 8}, + {'name': 'v8testing', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, @@ -2155,7 +2165,7 @@ }, 'V8 Linux - arm - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -2207,7 +2217,7 @@ }, 'V8 Linux - arm - sim - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 3}, @@ -2260,7 +2270,7 @@ }, 'V8 Linux - arm - sim - lite': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 2}, @@ -2268,7 +2278,7 @@ }, 'V8 Linux - arm - sim - lite - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -2276,7 +2286,7 @@ }, 'V8 Linux - arm64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -2289,14 +2299,14 @@ }, 'V8 Linux - arm64 - sim - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, # TODO(machenbach): Remove longer timeout when this builder scales better. 'swarming_task_attrs': { 'hard_timeout': 3600, }, 'tests': [ - {'name': 'mjsunit_sp_frame_access'}, + {'name': 'mjsunit_sp_frame_access', 'shards': 2}, {'name': 'mozilla', 'shards': 2}, {'name': 'test262', 'variant': 'default', 'shards': 2}, {'name': 'v8testing', 'shards': 12}, @@ -2305,7 +2315,7 @@ }, 'V8 Linux - arm64 - sim - gc stress': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -2322,7 +2332,7 @@ }, 'V8 Linux - loong64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -2335,7 +2345,7 @@ }, 'V8 Linux - mips64el - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -2349,7 +2359,7 @@ }, 'V8 Linux - ppc64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -2362,7 +2372,7 @@ }, 'V8 Linux - riscv32 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -2375,7 +2385,7 @@ }, 'V8 Linux - riscv64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -2388,7 +2398,7 @@ }, 'V8 Linux - s390x - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -2421,7 +2431,7 @@ }, 'V8 Linux64 - arm64 - sim - no pointer compression': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -2436,7 +2446,7 @@ # Clusterfuzz. 'V8 NumFuzz': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -2458,7 +2468,7 @@ }, 'V8 NumFuzz - TSAN': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -2518,7 +2528,7 @@ }, 'V8 NumFuzz - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -2585,7 +2595,7 @@ }, 'v8_numfuzz_rel': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ { @@ -2602,7 +2612,7 @@ }, 'v8_numfuzz_tsan_rel': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ { @@ -2655,7 +2665,7 @@ }, 'v8_numfuzz_dbg': { 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', + 'os': 'Ubuntu-22.04', }, 'tests': [ { @@ -2711,4 +2721,40 @@ }, ], }, + # Try perf bots + 'v8_android_arm_perf_try': { + 'tests': [ + { + 'name': 'perf_integration', + }, + ], + }, + 'v8_android_arm64_perf_try': { + 'tests': [ + { + 'name': 'perf_integration', + }, + ], + }, + 'v8_linux_perf_try': { + 'tests': [ + { + 'name': 'perf_integration', + }, + ], + }, + 'v8_linux64_perf_try': { + 'tests': [ + { + 'name': 'perf_integration', + }, + ], + }, + 'v8_mac_arm64_perf_try': { + 'tests': [ + { + 'name': 'perf_integration', + }, + ], + }, } diff --git a/v8/src/DEPS b/v8/src/DEPS index c195782d6..848e97e7c 100644 --- a/v8/src/DEPS +++ b/v8/src/DEPS @@ -38,6 +38,7 @@ include_rules = [ "+src/heap/memory-chunk.h", "+src/heap/memory-chunk-inl.h", "+src/heap/paged-spaces-inl.h", + "+src/heap/parked-scope-inl.h", "+src/heap/parked-scope.h", "+src/heap/read-only-heap-inl.h", "+src/heap/read-only-heap.h", diff --git a/v8/src/api/api-inl.h b/v8/src/api/api-inl.h index 130b18e5d..54c12812d 100644 --- a/v8/src/api/api-inl.h +++ b/v8/src/api/api-inl.h @@ -300,39 +300,37 @@ inline bool V8_EXPORT TryToCopyAndConvertArrayToCppBuffer(Local src, namespace internal { -void HandleScopeImplementer::EnterContext(Context context) { +void HandleScopeImplementer::EnterContext(NativeContext context) { DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); - DCHECK(context.IsNativeContext()); entered_contexts_.push_back(context); is_microtask_context_.push_back(0); } -void HandleScopeImplementer::EnterMicrotaskContext(Context context) { +void HandleScopeImplementer::EnterMicrotaskContext(NativeContext context) { DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); - DCHECK(context.IsNativeContext()); entered_contexts_.push_back(context); is_microtask_context_.push_back(1); } -Handle HandleScopeImplementer::LastEnteredContext() { +Handle HandleScopeImplementer::LastEnteredContext() { DCHECK_EQ(entered_contexts_.capacity(), is_microtask_context_.capacity()); DCHECK_EQ(entered_contexts_.size(), is_microtask_context_.size()); for (size_t i = 0; i < entered_contexts_.size(); ++i) { size_t j = entered_contexts_.size() - i - 1; if (!is_microtask_context_.at(j)) { - return Handle(entered_contexts_.at(j), isolate_); + return handle(entered_contexts_.at(j), isolate_); } } - return Handle::null(); + return {}; } -Handle HandleScopeImplementer::LastEnteredOrMicrotaskContext() { - if (entered_contexts_.empty()) return Handle::null(); - return Handle(entered_contexts_.back(), isolate_); +Handle HandleScopeImplementer::LastEnteredOrMicrotaskContext() { + if (entered_contexts_.empty()) return {}; + return handle(entered_contexts_.back(), isolate_); } } // namespace internal diff --git a/v8/src/api/api-natives.cc b/v8/src/api/api-natives.cc index f4fe0ae1d..aadb9ffb7 100644 --- a/v8/src/api/api-natives.cc +++ b/v8/src/api/api-natives.cc @@ -146,7 +146,7 @@ void EnableAccessChecks(Isolate* isolate, Handle object) { // Copy map so it won't interfere constructor's initial map. Handle new_map = Map::Copy(isolate, old_map, "EnableAccessChecks"); new_map->set_is_access_check_needed(true); - new_map->set_may_have_interesting_symbols(true); + new_map->set_may_have_interesting_properties(true); JSObject::MigrateToMap(isolate, object, new_map); } @@ -628,7 +628,7 @@ MaybeHandle ApiNatives::InstantiateRemoteObject( TERMINAL_FAST_ELEMENTS_KIND); object_map->SetConstructor(*constructor); object_map->set_is_access_check_needed(true); - object_map->set_may_have_interesting_symbols(true); + object_map->set_may_have_interesting_properties(true); Handle object = isolate->factory()->NewJSObjectFromMap(object_map); JSObject::ForceSetPrototype(isolate, object, @@ -757,13 +757,13 @@ Handle ApiNatives::CreateApiFunction( // Mark as needs_access_check if needed. if (obj->needs_access_check()) { map->set_is_access_check_needed(true); - map->set_may_have_interesting_symbols(true); + map->set_may_have_interesting_properties(true); } // Set interceptor information in the map. if (!obj->GetNamedPropertyHandler().IsUndefined(isolate)) { map->set_has_named_interceptor(true); - map->set_may_have_interesting_symbols(true); + map->set_may_have_interesting_properties(true); } if (!obj->GetIndexedPropertyHandler().IsUndefined(isolate)) { map->set_has_indexed_interceptor(true); diff --git a/v8/src/api/api.cc b/v8/src/api/api.cc index f19e07ff7..86585cf7b 100644 --- a/v8/src/api/api.cc +++ b/v8/src/api/api.cc @@ -653,7 +653,7 @@ size_t SnapshotCreator::AddData(i::Address object) { size_t SnapshotCreator::AddData(Local context, i::Address object) { DCHECK_NE(object, i::kNullAddress); DCHECK(!SnapshotCreatorData::cast(data_)->created_); - i::Handle ctx = Utils::OpenHandle(*context); + i::Handle ctx = Utils::OpenHandle(*context); i::Isolate* i_isolate = ctx->GetIsolate(); i::HandleScope scope(i_isolate); i::Handle obj(i::Object(object), i_isolate); @@ -672,7 +672,7 @@ size_t SnapshotCreator::AddData(Local context, i::Address object) { namespace { void ConvertSerializedObjectsToFixedArray(Local context) { - i::Handle ctx = Utils::OpenHandle(*context); + i::Handle ctx = Utils::OpenHandle(*context); i::Isolate* i_isolate = ctx->GetIsolate(); if (!ctx->serialized_objects().IsArrayList()) { ctx->set_serialized_objects( @@ -1194,7 +1194,7 @@ bool Data::IsContext() const { return Utils::OpenHandle(this)->IsContext(); } void Context::Enter() { i::DisallowGarbageCollection no_gc; - i::Context env = *Utils::OpenHandle(this); + i::NativeContext env = *Utils::OpenHandle(this); i::Isolate* i_isolate = env.GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); i::HandleScopeImplementer* impl = i_isolate->handle_scope_implementer(); @@ -1204,7 +1204,7 @@ void Context::Enter() { } void Context::Exit() { - i::Handle env = Utils::OpenHandle(this); + i::Handle env = Utils::OpenHandle(this); i::Isolate* i_isolate = env->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); i::HandleScopeImplementer* impl = i_isolate->handle_scope_implementer(); @@ -1221,7 +1221,8 @@ Context::BackupIncumbentScope::BackupIncumbentScope( : backup_incumbent_context_(backup_incumbent_context) { DCHECK(!backup_incumbent_context_.IsEmpty()); - i::Handle env = Utils::OpenHandle(*backup_incumbent_context_); + i::Handle env = + Utils::OpenHandle(*backup_incumbent_context_); i::Isolate* i_isolate = env->GetIsolate(); js_stack_comparable_address_ = @@ -1232,7 +1233,8 @@ Context::BackupIncumbentScope::BackupIncumbentScope( } Context::BackupIncumbentScope::~BackupIncumbentScope() { - i::Handle env = Utils::OpenHandle(*backup_incumbent_context_); + i::Handle env = + Utils::OpenHandle(*backup_incumbent_context_); i::Isolate* i_isolate = env->GetIsolate(); i::SimulatorStack::UnregisterJSStackComparableAddress(i_isolate); @@ -1247,7 +1249,7 @@ static_assert(i::Internals::kEmbedderDataSlotExternalPointerOffset == static i::Handle EmbedderDataFor(Context* context, int index, bool can_grow, const char* location) { - i::Handle env = Utils::OpenHandle(context); + i::Handle env = Utils::OpenHandle(context); i::Isolate* i_isolate = env->GetIsolate(); DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate); bool ok = Utils::ApiCheck(env->IsNativeContext(), location, @@ -1268,7 +1270,7 @@ static i::Handle EmbedderDataFor(Context* context, } uint32_t Context::GetNumberOfEmbedderDataFields() { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); DCHECK_NO_SCRIPT_NO_EXCEPTION(context->GetIsolate()); Utils::ApiCheck(context->IsNativeContext(), "Context::GetNumberOfEmbedderDataFields", @@ -1398,7 +1400,7 @@ void Template::SetAccessorProperty(v8::Local name, static void InitializeFunctionTemplate(i::FunctionTemplateInfo info, bool do_not_cache) { InitializeTemplate(info, Consts::FUNCTION_TEMPLATE, do_not_cache); - info.set_flag(0); + info.set_flag(0, kRelaxedStore); } namespace { @@ -1615,6 +1617,7 @@ void FunctionTemplate::SetCallHandler( i::HandleScope scope(i_isolate); i::Handle obj = i_isolate->factory()->NewCallHandlerInfo( side_effect_type == SideEffectType::kHasNoSideEffect); + obj->set_owner_template(*info); obj->set_callback(i_isolate, reinterpret_cast(callback)); if (data.IsEmpty()) { data = v8::Undefined(reinterpret_cast(i_isolate)); @@ -2066,6 +2069,7 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback, EnsureNotPublished(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler"); i::Handle obj = i_isolate->factory()->NewCallHandlerInfo(); + obj->set_owner_template(*Utils::OpenHandle(this)); obj->set_callback(i_isolate, reinterpret_cast(callback)); if (data.IsEmpty()) { data = v8::Undefined(reinterpret_cast(i_isolate)); @@ -2928,17 +2932,19 @@ void ScriptCompiler::ScriptStreamingTask::Run() { data_->task->Run(); } ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreaming( Isolate* v8_isolate, StreamedSource* source, v8::ScriptType type, - CompileOptions options) { - Utils::ApiCheck(options == kNoCompileOptions || options == kEagerCompile || - options == kProduceCompileHints, - "v8::ScriptCompiler::StartStreaming", - "Invalid CompileOptions"); + CompileOptions options, CompileHintCallback compile_hint_callback, + void* compile_hint_callback_data) { + Utils::ApiCheck( + options == kNoCompileOptions || options == kEagerCompile || + options == kProduceCompileHints || options == kConsumeCompileHints, + "v8::ScriptCompiler::StartStreaming", "Invalid CompileOptions"); if (!i::v8_flags.script_streaming) return nullptr; i::Isolate* i_isolate = reinterpret_cast(v8_isolate); i::ScriptStreamingData* data = source->impl(); std::unique_ptr task = - std::make_unique(data, i_isolate, type, - options); + std::make_unique(data, i_isolate, type, options, + compile_hint_callback, + compile_hint_callback_data); data->task = std::move(task); return new ScriptCompiler::ScriptStreamingTask(data); } @@ -4919,8 +4925,8 @@ Local v8::Object::FindInstanceInPrototypeChain( auto self = Utils::OpenHandle(this); auto i_isolate = self->GetIsolate(); i::PrototypeIterator iter(i_isolate, *self, i::kStartAtReceiver); - auto tmpl_info = *Utils::OpenHandle(*tmpl); - while (!tmpl_info.IsTemplateFor(iter.GetCurrent())) { + i::Tagged tmpl_info = *Utils::OpenHandle(*tmpl); + while (!tmpl_info->IsTemplateFor(iter.GetCurrent())) { iter.Advance(); if (iter.IsAtEnd()) return Local(); if (!iter.GetCurrent().IsJSObject()) return Local(); @@ -5269,13 +5275,13 @@ Maybe v8::Object::HasRealNamedCallbackProperty(Local context, bool v8::Object::HasNamedLookupInterceptor() const { auto self = *Utils::OpenHandle(this); - if (self.IsJSObject()) return false; + if (!self.IsJSObject()) return false; return i::JSObject::cast(self).HasNamedInterceptor(); } bool v8::Object::HasIndexedLookupInterceptor() const { auto self = *Utils::OpenHandle(this); - if (self.IsJSObject()) return false; + if (!self.IsJSObject()) return false; return i::JSObject::cast(self).HasIndexedInterceptor(); } @@ -5372,7 +5378,7 @@ Local v8::Object::Clone() { MaybeLocal v8::Object::GetCreationContext() { auto self = Utils::OpenHandle(this); - i::Handle context; + i::Handle context; if (self->GetCreationContext().ToHandle(&context)) { return Utils::ToLocal(context); } @@ -6624,8 +6630,8 @@ template struct InvokeBootstrapper; template <> -struct InvokeBootstrapper { - i::Handle Invoke( +struct InvokeBootstrapper { + i::Handle Invoke( i::Isolate* i_isolate, i::MaybeHandle maybe_global_proxy, v8::Local global_proxy_template, @@ -6777,7 +6783,7 @@ Local NewContext( i::HandleScope scope(i_isolate); ExtensionConfiguration no_extensions; if (extensions == nullptr) extensions = &no_extensions; - i::Handle env = CreateEnvironment( + i::Handle env = CreateEnvironment( i_isolate, extensions, global_template, global_object, context_snapshot_index, embedder_fields_deserializer, microtask_queue); if (env.is_null()) { @@ -6844,18 +6850,18 @@ MaybeLocal v8::Context::NewRemoteContext( } void v8::Context::SetSecurityToken(Local token) { - i::Handle env = Utils::OpenHandle(this); + i::Handle env = Utils::OpenHandle(this); i::Handle token_handle = Utils::OpenHandle(*token); env->set_security_token(*token_handle); } void v8::Context::UseDefaultSecurityToken() { - i::Handle env = Utils::OpenHandle(this); + i::Handle env = Utils::OpenHandle(this); env->set_security_token(env->global_object()); } Local v8::Context::GetSecurityToken() { - i::Handle env = Utils::OpenHandle(this); + i::Handle env = Utils::OpenHandle(this); i::Isolate* i_isolate = env->GetIsolate(); i::Object security_token = env->security_token(); i::Handle token_handle(security_token, i_isolate); @@ -7146,7 +7152,7 @@ class ObjectVisitorDeepFreezer : i::ObjectVisitor { } // namespace Maybe Context::DeepFreeze(DeepFreezeDelegate* delegate) { - i::Handle env = Utils::OpenHandle(this); + i::Handle env = Utils::OpenHandle(this); i::Isolate* i_isolate = env->GetIsolate(); // TODO(behamilton): Incorporate compatibility improvements similar to NodeJS: @@ -7164,19 +7170,19 @@ Maybe Context::DeepFreeze(DeepFreezeDelegate* delegate) { } v8::Isolate* Context::GetIsolate() { - i::Handle env = Utils::OpenHandle(this); + i::Handle env = Utils::OpenHandle(this); return reinterpret_cast(env->GetIsolate()); } v8::MicrotaskQueue* Context::GetMicrotaskQueue() { - i::Handle env = Utils::OpenHandle(this); + i::Handle env = Utils::OpenHandle(this); Utils::ApiCheck(env->IsNativeContext(), "v8::Context::GetMicrotaskQueue", "Must be called on a native context"); return i::Handle::cast(env)->microtask_queue(); } void Context::SetMicrotaskQueue(v8::MicrotaskQueue* queue) { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Isolate* i_isolate = context->GetIsolate(); Utils::ApiCheck(context->IsNativeContext(), "v8::Context::SetMicrotaskQueue", "Must be called on a native context"); @@ -7197,7 +7203,7 @@ void Context::SetMicrotaskQueue(v8::MicrotaskQueue* queue) { } v8::Local Context::Global() { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Isolate* i_isolate = context->GetIsolate(); i::Handle global(context->global_proxy(), i_isolate); // TODO(chromium:324812): This should always return the global proxy @@ -7210,21 +7216,21 @@ v8::Local Context::Global() { } void Context::DetachGlobal() { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Isolate* i_isolate = context->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); i_isolate->DetachGlobal(context); } Local Context::GetExtrasBindingObject() { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Isolate* i_isolate = context->GetIsolate(); i::Handle binding(context->extras_binding_object(), i_isolate); return Utils::ToLocal(binding); } void Context::AllowCodeGenerationFromStrings(bool allow) { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Isolate* i_isolate = context->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); context->set_allow_code_gen_from_strings( @@ -7233,25 +7239,25 @@ void Context::AllowCodeGenerationFromStrings(bool allow) { } bool Context::IsCodeGenerationFromStringsAllowed() const { - i::Context context = *Utils::OpenHandle(this); + i::NativeContext context = *Utils::OpenHandle(this); return !context.allow_code_gen_from_strings().IsFalse(context.GetIsolate()); } void Context::SetErrorMessageForCodeGenerationFromStrings(Local error) { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Handle error_handle = Utils::OpenHandle(*error); context->set_error_message_for_code_gen_from_strings(*error_handle); } void Context::SetErrorMessageForWasmCodeGeneration(Local error) { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Handle error_handle = Utils::OpenHandle(*error); context->set_error_message_for_wasm_code_gen(*error_handle); } void Context::SetAbortScriptExecution( Context::AbortScriptExecutionCallback callback) { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Isolate* i_isolate = context->GetIsolate(); if (callback == nullptr) { context->set_script_execution_callback( @@ -7263,7 +7269,7 @@ void Context::SetAbortScriptExecution( } Local Context::GetContinuationPreservedEmbedderData() const { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Isolate* i_isolate = context->GetIsolate(); i::Handle data( context->native_context().continuation_preserved_embedder_data(), @@ -7272,7 +7278,7 @@ Local Context::GetContinuationPreservedEmbedderData() const { } void Context::SetContinuationPreservedEmbedderData(Local data) { - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Isolate* i_isolate = context->GetIsolate(); if (data.IsEmpty()) data = v8::Undefined(reinterpret_cast(i_isolate)); @@ -7285,7 +7291,7 @@ void v8::Context::SetPromiseHooks(Local init_hook, Local after_hook, Local resolve_hook) { #ifdef V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS - i::Handle context = Utils::OpenHandle(this); + i::Handle context = Utils::OpenHandle(this); i::Isolate* i_isolate = context->GetIsolate(); i::Handle init = i_isolate->factory()->undefined_value(); @@ -7340,7 +7346,7 @@ MaybeLocal metrics::Recorder::GetContext( metrics::Recorder::ContextId metrics::Recorder::GetContextId( Local context) { - i::Handle i_context = Utils::OpenHandle(*context); + i::Handle i_context = Utils::OpenHandle(*context); i::Isolate* i_isolate = i_context->GetIsolate(); return i_isolate->GetOrRegisterRecorderContextId( handle(i_context->native_context(), i_isolate)); @@ -7730,16 +7736,7 @@ bool v8::String::MakeExternal( bool v8::String::CanMakeExternal(Encoding encoding) const { i::String obj = *Utils::OpenHandle(this); - if (obj.IsThinString()) { - obj = i::ThinString::cast(obj).actual(); - } - - if (!obj.SupportsExternalization(encoding)) { - return false; - } - - // Only old space strings should be externalized. - return !i::Heap::InYoungGeneration(obj); + return obj.SupportsExternalization(encoding); } bool v8::String::StringEquals(Local that) const { @@ -9227,23 +9224,21 @@ v8::Local Isolate::GetCurrentContext() { i::Isolate* i_isolate = reinterpret_cast(this); i::Context context = i_isolate->context(); if (context.is_null()) return Local(); - i::Context native_context = context.native_context(); - if (native_context.is_null()) return Local(); - return Utils::ToLocal(i::Handle(native_context, i_isolate)); + i::NativeContext native_context = context.native_context(); + return Utils::ToLocal(handle(native_context, i_isolate)); } v8::Local Isolate::GetEnteredOrMicrotaskContext() { i::Isolate* i_isolate = reinterpret_cast(this); - i::Handle last = + i::Handle last = i_isolate->handle_scope_implementer()->LastEnteredOrMicrotaskContext(); if (last.is_null()) return Local(); - DCHECK(last->IsNativeContext()); - return Utils::ToLocal(i::Handle::cast(last)); + return Utils::ToLocal(last); } v8::Local Isolate::GetIncumbentContext() { i::Isolate* i_isolate = reinterpret_cast(this); - i::Handle context = i_isolate->GetIncumbentContext(); + i::Handle context = i_isolate->GetIncumbentContext(); return Utils::ToLocal(context); } @@ -11291,14 +11286,17 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) { FullObjectSlot(handle_scope_data_.next)); } - DetachableVector* context_lists[2] = {&saved_contexts_, - &entered_contexts_}; - for (unsigned i = 0; i < arraysize(context_lists); i++) { - context_lists[i]->shrink_to_fit(); - if (context_lists[i]->empty()) continue; - FullObjectSlot start(&context_lists[i]->front()); + saved_contexts_.shrink_to_fit(); + if (!saved_contexts_.empty()) { + FullObjectSlot start(&saved_contexts_.front()); v->VisitRootPointers(Root::kHandleScope, nullptr, start, - start + static_cast(context_lists[i]->size())); + start + static_cast(saved_contexts_.size())); + } + entered_contexts_.shrink_to_fit(); + if (!entered_contexts_.empty()) { + FullObjectSlot start(&entered_contexts_.front()); + v->VisitRootPointers(Root::kHandleScope, nullptr, start, + start + static_cast(entered_contexts_.size())); } // The shape of |entered_contexts_| and |is_microtask_context_| stacks must // be in sync. @@ -11388,12 +11386,46 @@ void InvokeAccessorGetterCallback( getter(property, info); } -void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info) { +namespace { + +inline void InvokeFunctionCallback( + const v8::FunctionCallbackInfo& info, CallApiCallbackMode mode) { i::Isolate* i_isolate = reinterpret_cast(info.GetIsolate()); RCS_SCOPE(i_isolate, RuntimeCallCounterId::kFunctionCallback); - // TODO(v8:13825): perform side effect checks if necessary once - // CallHandlerInfo is passed here. + switch (mode) { + case CallApiCallbackMode::kGeneric: { + if (V8_UNLIKELY(i_isolate->should_check_side_effects())) { + // Load FunctionTemplateInfo from API_CALLBACK_EXIT frame. + // If this ever becomes a performance bottleneck, one can pass function + // template info here explicitly. + StackFrameIterator it(i_isolate); + CHECK(it.frame()->is_api_callback_exit()); + ApiCallbackExitFrame* frame = ApiCallbackExitFrame::cast(it.frame()); + FunctionTemplateInfo fti = FunctionTemplateInfo::cast(frame->target()); + CallHandlerInfo call_handler_info = + CallHandlerInfo::cast(fti.call_code(kAcquireLoad)); + if (!i_isolate->debug()->PerformSideEffectCheckForCallback( + handle(call_handler_info, i_isolate))) { + // Failed side effect check. + return; + } + } + break; + } + case CallApiCallbackMode::kWithSideEffects: { + Handle has_side_effects; + if (V8_UNLIKELY(i_isolate->should_check_side_effects()) && + !i_isolate->debug()->PerformSideEffectCheckForCallback( + has_side_effects)) { + // Failed side effect check. + return; + } + break; + } + case CallApiCallbackMode::kNoSideEffects: + break; + } Address arg = i_isolate->isolate_data()->api_callback_thunk_argument(); if (USE_SIMULATOR_BOOL) { @@ -11403,9 +11435,25 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info) { ExternalCallbackScope call_scope(i_isolate, FUNCTION_ADDR(callback)); callback(info); } +} // namespace + +void InvokeFunctionCallbackGeneric( + const v8::FunctionCallbackInfo& info) { + InvokeFunctionCallback(info, CallApiCallbackMode::kGeneric); +} + +void InvokeFunctionCallbackNoSideEffects( + const v8::FunctionCallbackInfo& info) { + InvokeFunctionCallback(info, CallApiCallbackMode::kNoSideEffects); +} + +void InvokeFunctionCallbackWithSideEffects( + const v8::FunctionCallbackInfo& info) { + InvokeFunctionCallback(info, CallApiCallbackMode::kWithSideEffects); +} void InvokeFinalizationRegistryCleanupFromTask( - Handle context, + Handle native_context, Handle finalization_registry, Handle callback) { i::Isolate* i_isolate = finalization_registry->native_context().GetIsolate(); @@ -11416,7 +11464,7 @@ void InvokeFinalizationRegistryCleanupFromTask( // API call. This method is implemented here to avoid duplication of the // exception handling and microtask running logic in CallDepthScope. if (i_isolate->is_execution_terminating()) return; - Local api_context = Utils::ToLocal(context); + Local api_context = Utils::ToLocal(native_context); CallDepthScope call_depth_scope(i_isolate, api_context); VMState state(i_isolate); Handle argv[] = {callback}; diff --git a/v8/src/api/api.h b/v8/src/api/api.h index a3fa6047e..05f641334 100644 --- a/v8/src/api/api.h +++ b/v8/src/api/api.h @@ -96,7 +96,7 @@ class RegisteredExtension { #define TO_LOCAL_LIST(V) \ V(ToLocal, AccessorPair, debug::AccessorPair) \ - V(ToLocal, Context, Context) \ + V(ToLocal, NativeContext, Context) \ V(ToLocal, Object, Value) \ V(ToLocal, Module, Module) \ V(ToLocal, Name, Name) \ @@ -168,7 +168,7 @@ class RegisteredExtension { V(Module, Module) \ V(Function, JSReceiver) \ V(Message, JSMessageObject) \ - V(Context, Context) \ + V(Context, NativeContext) \ V(External, Object) \ V(StackTrace, FixedArray) \ V(StackFrame, StackFrameInfo) \ @@ -320,17 +320,17 @@ class HandleScopeImplementer { inline internal::Address* GetSpareOrNewBlock(); inline void DeleteExtensions(internal::Address* prev_limit); - inline void EnterContext(Context context); + inline void EnterContext(NativeContext context); inline void LeaveContext(); - inline bool LastEnteredContextWas(Context context); + inline bool LastEnteredContextWas(NativeContext context); inline size_t EnteredContextCount() const { return entered_contexts_.size(); } - inline void EnterMicrotaskContext(Context context); + inline void EnterMicrotaskContext(NativeContext context); // Returns the last entered context or an empty handle if no // contexts have been entered. - inline Handle LastEnteredContext(); - inline Handle LastEnteredOrMicrotaskContext(); + inline Handle LastEnteredContext(); + inline Handle LastEnteredOrMicrotaskContext(); inline void SaveContext(Context context); inline Context RestoreContext(); @@ -386,7 +386,7 @@ class HandleScopeImplementer { // `is_microtask_context_[i]` is 1. // TODO(tzik): Remove |is_microtask_context_| after the deprecated // v8::Isolate::GetEnteredContext() is removed. - DetachableVector entered_contexts_; + DetachableVector entered_contexts_; DetachableVector is_microtask_context_; // Used as a stack to keep track of saved contexts. @@ -428,7 +428,7 @@ void HandleScopeImplementer::LeaveContext() { is_microtask_context_.pop_back(); } -bool HandleScopeImplementer::LastEnteredContextWas(Context context) { +bool HandleScopeImplementer::LastEnteredContextWas(NativeContext context) { return !entered_contexts_.empty() && entered_contexts_.back() == context; } @@ -488,10 +488,15 @@ void InvokeAccessorGetterCallback( // enabled the side-effects checking mode. // It gets additional argument, the v8::FunctionCallback address, via // IsolateData::api_callback_thunk_argument slot. -void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info); +void InvokeFunctionCallbackGeneric( + const v8::FunctionCallbackInfo& info); +void InvokeFunctionCallbackNoSideEffects( + const v8::FunctionCallbackInfo& info); +void InvokeFunctionCallbackWithSideEffects( + const v8::FunctionCallbackInfo& info); void InvokeFinalizationRegistryCleanupFromTask( - Handle context, + Handle native_context, Handle finalization_registry, Handle callback); diff --git a/v8/src/asmjs/asm-parser.cc b/v8/src/asmjs/asm-parser.cc index a01ce9596..48b8ed06e 100644 --- a/v8/src/asmjs/asm-parser.cc +++ b/v8/src/asmjs/asm-parser.cc @@ -228,7 +228,8 @@ void AsmJsParser::AddGlobalImport(base::Vector name, AsmType* type, // Allocate a separate variable for the import. // TODO(asmjs): Consider using the imported global directly instead of // allocating a separate global variable for immutable (i.e. const) imports. - DeclareGlobal(info, mutable_variable, type, vtype); + DeclareGlobal(info, mutable_variable, type, vtype, + WasmInitExpr::DefaultValue(vtype)); // Record the need to initialize the global from the import. global_imports_.push_back({name, vtype, info}); diff --git a/v8/src/asmjs/asm-parser.h b/v8/src/asmjs/asm-parser.h index 92dd17a86..179043a02 100644 --- a/v8/src/asmjs/asm-parser.h +++ b/v8/src/asmjs/asm-parser.h @@ -313,7 +313,7 @@ class AsmJsParser { VarInfo* GetVarInfo(AsmJsScanner::token_t token); uint32_t VarIndex(VarInfo* info); void DeclareGlobal(VarInfo* info, bool mutable_variable, AsmType* type, - ValueType vtype, WasmInitExpr init = WasmInitExpr()); + ValueType vtype, WasmInitExpr init); void DeclareStdlibFunc(VarInfo* info, VarKind kind, AsmType* type); void AddGlobalImport(base::Vector name, AsmType* type, ValueType vtype, bool mutable_variable, VarInfo* info); diff --git a/v8/src/ast/ast-value-factory.cc b/v8/src/ast/ast-value-factory.cc index 6cf5796c6..4afdab9f6 100644 --- a/v8/src/ast/ast-value-factory.cc +++ b/v8/src/ast/ast-value-factory.cc @@ -44,14 +44,14 @@ namespace { // For using StringToIndex. class OneByteStringStream { public: - explicit OneByteStringStream(base::Vector lb) + explicit OneByteStringStream(base::Vector lb) : literal_bytes_(lb), pos_(0) {} bool HasMore() { return pos_ < literal_bytes_.length(); } uint16_t GetNext() { return literal_bytes_[pos_++]; } private: - base::Vector literal_bytes_; + base::Vector literal_bytes_; int pos_; }; @@ -335,7 +335,7 @@ const AstRawString* AstValueFactory::GetTwoByteStringInternal( uint32_t raw_hash_field = StringHasher::HashSequentialString( literal.begin(), literal.length(), hash_seed_); return GetString(raw_hash_field, false, - base::Vector::cast(literal)); + base::Vector::cast(literal)); } const AstRawString* AstValueFactory::GetString( @@ -386,7 +386,7 @@ template EXPORT_TEMPLATE_DEFINE( const AstRawString* AstValueFactory::GetString( uint32_t raw_hash_field, bool is_one_byte, - base::Vector literal_bytes) { + base::Vector literal_bytes) { // literal_bytes here points to whatever the user passed, and this is OK // because we use vector_compare (which checks the contents) to compare // against the AstRawStrings which are in the string_table_. We should not @@ -397,10 +397,11 @@ const AstRawString* AstValueFactory::GetString( [&]() { // Copy literal contents for later comparison. int length = literal_bytes.length(); - byte* new_literal_bytes = ast_raw_string_zone()->NewArray(length); + uint8_t* new_literal_bytes = + ast_raw_string_zone()->NewArray(length); memcpy(new_literal_bytes, literal_bytes.begin(), length); AstRawString* new_string = ast_raw_string_zone()->New( - is_one_byte, base::Vector(new_literal_bytes, length), + is_one_byte, base::Vector(new_literal_bytes, length), raw_hash_field); CHECK_NOT_NULL(new_string); AddString(new_string); diff --git a/v8/src/ast/ast-value-factory.h b/v8/src/ast/ast-value-factory.h index b15e31358..bea71bd67 100644 --- a/v8/src/ast/ast-value-factory.h +++ b/v8/src/ast/ast-value-factory.h @@ -101,7 +101,7 @@ class AstRawString final : public ZoneObject { friend Zone; // Members accessed only by the AstValueFactory & related classes: - AstRawString(bool is_one_byte, base::Vector literal_bytes, + AstRawString(bool is_one_byte, base::Vector literal_bytes, uint32_t raw_hash_field) : next_(nullptr), literal_bytes_(literal_bytes), @@ -130,7 +130,7 @@ class AstRawString final : public ZoneObject { Handle string_; }; - base::Vector literal_bytes_; // Memory owned by Zone. + base::Vector literal_bytes_; // Memory owned by Zone. uint32_t raw_hash_field_; bool is_one_byte_; #ifdef DEBUG @@ -398,7 +398,7 @@ class AstValueFactory { const AstRawString* GetTwoByteStringInternal( base::Vector literal); const AstRawString* GetString(uint32_t raw_hash_field, bool is_one_byte, - base::Vector literal_bytes); + base::Vector literal_bytes); // All strings are copied here. AstRawStringMap string_table_; diff --git a/v8/src/ast/modules.cc b/v8/src/ast/modules.cc index e80222f92..2b07460c2 100644 --- a/v8/src/ast/modules.cc +++ b/v8/src/ast/modules.cc @@ -137,15 +137,15 @@ Handle SourceTextModuleDescriptor::AstModuleRequest::Serialize( AllocationType::kOld); { DisallowGarbageCollection no_gc; - auto raw_import_assertions = *import_assertions_array; + Tagged raw_import_assertions = *import_assertions_array; int i = 0; for (auto iter = import_assertions()->cbegin(); iter != import_assertions()->cend(); ++iter, i += ModuleRequest::kAssertionEntrySize) { - raw_import_assertions.set(i, *iter->first->string()); - raw_import_assertions.set(i + 1, *iter->second.first->string()); - raw_import_assertions.set(i + 2, - Smi::FromInt(iter->second.second.beg_pos)); + raw_import_assertions->set(i, *iter->first->string()); + raw_import_assertions->set(i + 1, *iter->second.first->string()); + raw_import_assertions->set(i + 2, + Smi::FromInt(iter->second.second.beg_pos)); } } return v8::internal::ModuleRequest::New(isolate, specifier()->string(), diff --git a/v8/src/ast/scopes.cc b/v8/src/ast/scopes.cc index 1ebe09bc5..215dba2e0 100644 --- a/v8/src/ast/scopes.cc +++ b/v8/src/ast/scopes.cc @@ -751,6 +751,13 @@ void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) { THIS_VARIABLE, derived_constructor ? kNeedsInitialization : kCreatedInitialized, kNotAssigned); + // Derived constructors have hole checks when calling super. Mark the 'this' + // variable as having hole initialization forced so that TDZ elision analysis + // applies and numbers the variable. + if (derived_constructor) { + receiver_->ForceHoleInitialization( + Variable::kHasHoleCheckUseInUnknownScope); + } locals_.Add(receiver_); } @@ -2282,9 +2289,10 @@ void Scope::ResolveVariable(VariableProxy* proxy) { namespace { -void SetNeedsHoleCheck(Variable* var, VariableProxy* proxy) { +void SetNeedsHoleCheck(Variable* var, VariableProxy* proxy, + Variable::ForceHoleInitializationFlag flag) { proxy->set_needs_hole_check(); - var->ForceHoleInitialization(); + var->ForceHoleInitialization(flag); } void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) { @@ -2303,7 +2311,7 @@ void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) { // unknown at compilation time whether the binding referred to in the // exporting module itself requires hole checks. if (var->location() == VariableLocation::MODULE && !var->IsExport()) { - SetNeedsHoleCheck(var, proxy); + SetNeedsHoleCheck(var, proxy, Variable::kHasHoleCheckUseInUnknownScope); return; } @@ -2326,7 +2334,8 @@ void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) { // The scope of the variable needs to be checked, in case the use is // in a sub-block which may be linear. if (var->scope()->GetClosureScope() != scope->GetClosureScope()) { - SetNeedsHoleCheck(var, proxy); + SetNeedsHoleCheck(var, proxy, + Variable::kHasHoleCheckUseInDifferentClosureScope); return; } @@ -2336,7 +2345,7 @@ void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) { if (var->scope()->is_nonlinear() || var->initializer_position() >= proxy->position()) { - SetNeedsHoleCheck(var, proxy); + SetNeedsHoleCheck(var, proxy, Variable::kHasHoleCheckUseInSameClosureScope); return; } } diff --git a/v8/src/ast/variables.cc b/v8/src/ast/variables.cc index 3b474f951..6f5d94257 100644 --- a/v8/src/ast/variables.cc +++ b/v8/src/ast/variables.cc @@ -49,7 +49,8 @@ void Variable::AssignHoleCheckBitmapIndex(ZoneVector& list, DCHECK_EQ(next_index, list.size() + 1); DCHECK_NE(kUncacheableHoleCheckBitmapIndex, next_index); DCHECK_LT(next_index, kHoleCheckBitmapBits); - hole_check_bitmap_index_ = next_index; + hole_check_analysis_bit_field_ = HoleCheckBitmapIndexField::update( + hole_check_analysis_bit_field_, next_index); list.push_back(this); } diff --git a/v8/src/ast/variables.h b/v8/src/ast/variables.h index 590390b31..30d41ea80 100644 --- a/v8/src/ast/variables.h +++ b/v8/src/ast/variables.h @@ -30,16 +30,18 @@ class Variable final : public ZoneObject { next_(nullptr), index_(-1), initializer_position_(kNoSourcePosition), - hole_check_bitmap_index_(kUncacheableHoleCheckBitmapIndex), bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) | InitializationFlagField::encode(initialization_flag) | VariableModeField::encode(mode) | IsUsedField::encode(false) | ForceContextAllocationBit::encode(false) | - ForceHoleInitializationField::encode(false) | LocationField::encode(VariableLocation::UNALLOCATED) | VariableKindField::encode(kind) | - IsStaticFlagField::encode(is_static_flag)) { + IsStaticFlagField::encode(is_static_flag)), + hole_check_analysis_bit_field_(HoleCheckBitmapIndexField::encode( + kUncacheableHoleCheckBitmapIndex) | + ForceHoleInitializationFlagField::encode( + kHoleInitializationNotForced)) { // Var declared variables never need initialization. DCHECK(!(mode == VariableMode::kVar && initialization_flag == kNeedsInitialization)); @@ -164,18 +166,38 @@ class Variable final : public ZoneObject { return initialization_flag() == kNeedsInitialization; } + enum ForceHoleInitializationFlag { + kHoleInitializationNotForced = 0, + kHasHoleCheckUseInDifferentClosureScope = 1 << 0, + kHasHoleCheckUseInSameClosureScope = 1 << 1, + kHasHoleCheckUseInUnknownScope = kHasHoleCheckUseInDifferentClosureScope | + kHasHoleCheckUseInSameClosureScope + }; + ForceHoleInitializationFlag force_hole_initialization_flag_field() const { + return ForceHoleInitializationFlagField::decode( + hole_check_analysis_bit_field_); + } + bool IsHoleInitializationForced() const { - return ForceHoleInitializationField::decode(bit_field_); + return force_hole_initialization_flag_field() != + kHoleInitializationNotForced; + } + + bool HasHoleCheckUseInSameClosureScope() const { + return force_hole_initialization_flag_field() & + kHasHoleCheckUseInSameClosureScope; } // Called during scope analysis when a VariableProxy is found to // reference this Variable in such a way that a hole check will // be required at runtime. - void ForceHoleInitialization() { + void ForceHoleInitialization(ForceHoleInitializationFlag flag) { DCHECK_EQ(kNeedsInitialization, initialization_flag()); + DCHECK_NE(kHoleInitializationNotForced, flag); DCHECK(IsLexicalVariableMode(mode()) || IsPrivateMethodOrAccessorVariableMode(mode())); - bit_field_ = ForceHoleInitializationField::update(bit_field_, true); + hole_check_analysis_bit_field_ |= + ForceHoleInitializationFlagField::encode(flag); } // The first N-1 lexical bindings that need hole checks in a compilation are @@ -193,28 +215,29 @@ class Variable final : public ZoneObject { std::numeric_limits::digits; void ResetHoleCheckBitmapIndex() { - hole_check_bitmap_index_ = kUncacheableHoleCheckBitmapIndex; + hole_check_analysis_bit_field_ = HoleCheckBitmapIndexField::update( + hole_check_analysis_bit_field_, kUncacheableHoleCheckBitmapIndex); } void RememberHoleCheckInBitmap(HoleCheckBitmap& bitmap, ZoneVector& list) { DCHECK(v8_flags.ignition_elide_redundant_tdz_checks); - if (V8_UNLIKELY(hole_check_bitmap_index_ == - kUncacheableHoleCheckBitmapIndex)) { - uint8_t next_index = list.size() + 1; + uint8_t index = HoleCheckBitmapIndex(); + if (V8_UNLIKELY(index == kUncacheableHoleCheckBitmapIndex)) { + index = list.size() + 1; // The bitmap is full. - if (next_index == kHoleCheckBitmapBits) return; - AssignHoleCheckBitmapIndex(list, next_index); + if (index == kHoleCheckBitmapBits) return; + AssignHoleCheckBitmapIndex(list, index); } - bitmap |= HoleCheckBitmap{1} << hole_check_bitmap_index_; + bitmap |= HoleCheckBitmap{1} << index; DCHECK_EQ( 0, bitmap & (HoleCheckBitmap{1} << kUncacheableHoleCheckBitmapIndex)); } bool HasRememberedHoleCheck(HoleCheckBitmap bitmap) const { - bool result = bitmap & (HoleCheckBitmap{1} << hole_check_bitmap_index_); - DCHECK_IMPLIES(hole_check_bitmap_index_ == kUncacheableHoleCheckBitmapIndex, - !result); + uint8_t index = HoleCheckBitmapIndex(); + bool result = bitmap & (HoleCheckBitmap{1} << index); + DCHECK_IMPLIES(index == kUncacheableHoleCheckBitmapIndex, !result); return result; } @@ -305,13 +328,17 @@ class Variable final : public ZoneObject { Variable* next_; int index_; int initializer_position_; - uint8_t hole_check_bitmap_index_; uint16_t bit_field_; + uint16_t hole_check_analysis_bit_field_; void set_maybe_assigned() { bit_field_ = MaybeAssignedFlagField::update(bit_field_, kMaybeAssigned); } + uint8_t HoleCheckBitmapIndex() const { + return HoleCheckBitmapIndexField::decode(hole_check_analysis_bit_field_); + } + void AssignHoleCheckBitmapIndex(ZoneVector& list, uint8_t next_index); @@ -321,11 +348,14 @@ class Variable final : public ZoneObject { using ForceContextAllocationBit = LocationField::Next; using IsUsedField = ForceContextAllocationBit::Next; using InitializationFlagField = IsUsedField::Next; - using ForceHoleInitializationField = InitializationFlagField::Next; using MaybeAssignedFlagField = - ForceHoleInitializationField::Next; + InitializationFlagField::Next; using IsStaticFlagField = MaybeAssignedFlagField::Next; + using HoleCheckBitmapIndexField = base::BitField16; + using ForceHoleInitializationFlagField = + HoleCheckBitmapIndexField::Next; + Variable** next() { return &next_; } friend List; friend base::ThreadedListTraits; diff --git a/v8/src/base/macros.h b/v8/src/base/macros.h index fc29d5c02..25a533c5a 100644 --- a/v8/src/base/macros.h +++ b/v8/src/base/macros.h @@ -174,14 +174,18 @@ V8_INLINE Dest bit_cast(Source const& source) { #define DISABLE_CFI_PERF V8_CLANG_NO_SANITIZE("cfi") // DISABLE_CFI_ICALL -- Disable Control Flow Integrity indirect call checks, -// useful because calls into JITed code can not be CFI verified. +// useful because calls into JITed code can not be CFI verified. Same for +// UBSan's function pointer type checks. #ifdef V8_OS_WIN // On Windows, also needs __declspec(guard(nocf)) for CFG. #define DISABLE_CFI_ICALL \ V8_CLANG_NO_SANITIZE("cfi-icall") \ + V8_CLANG_NO_SANITIZE("function") \ __declspec(guard(nocf)) #else -#define DISABLE_CFI_ICALL V8_CLANG_NO_SANITIZE("cfi-icall") +#define DISABLE_CFI_ICALL \ + V8_CLANG_NO_SANITIZE("cfi-icall") \ + V8_CLANG_NO_SANITIZE("function") #endif namespace v8 { diff --git a/v8/src/base/memory.h b/v8/src/base/memory.h index e2676a82e..3441dddd2 100644 --- a/v8/src/base/memory.h +++ b/v8/src/base/memory.h @@ -11,7 +11,6 @@ namespace v8 { namespace base { using Address = uintptr_t; -using byte = uint8_t; // Memory provides an interface to 'raw' memory. It encapsulates the casts // that typically are needed when incompatible pointer types are used. @@ -23,7 +22,7 @@ inline T& Memory(Address addr) { return *reinterpret_cast(addr); } template -inline T& Memory(byte* addr) { +inline T& Memory(uint8_t* addr) { return Memory(reinterpret_cast
(addr)); } @@ -47,8 +46,8 @@ static inline V ReadLittleEndianValue(Address p) { return ReadUnalignedValue(p); #elif defined(V8_TARGET_BIG_ENDIAN) V ret{}; - const byte* src = reinterpret_cast(p); - byte* dst = reinterpret_cast(&ret); + const uint8_t* src = reinterpret_cast(p); + uint8_t* dst = reinterpret_cast(&ret); for (size_t i = 0; i < sizeof(V); i++) { dst[i] = src[sizeof(V) - i - 1]; } @@ -61,8 +60,8 @@ static inline void WriteLittleEndianValue(Address p, V value) { #if defined(V8_TARGET_LITTLE_ENDIAN) WriteUnalignedValue(p, value); #elif defined(V8_TARGET_BIG_ENDIAN) - byte* src = reinterpret_cast(&value); - byte* dst = reinterpret_cast(p); + uint8_t* src = reinterpret_cast(&value); + uint8_t* dst = reinterpret_cast(p); for (size_t i = 0; i < sizeof(V); i++) { dst[i] = src[sizeof(V) - i - 1]; } diff --git a/v8/src/base/platform/OWNERS b/v8/src/base/platform/OWNERS index 9e6d78335..9cd6bebc8 100644 --- a/v8/src/base/platform/OWNERS +++ b/v8/src/base/platform/OWNERS @@ -2,4 +2,5 @@ hpayer@chromium.org mlippautz@chromium.org victorgomes@chromium.org +per-file memory-protection-key.*=sroettger@google.com per-file platform-fuchsia.cc=wez@chromium.org diff --git a/v8/src/base/platform/memory-protection-key.cc b/v8/src/base/platform/memory-protection-key.cc index d014556e9..ac5eee89a 100644 --- a/v8/src/base/platform/memory-protection-key.cc +++ b/v8/src/base/platform/memory-protection-key.cc @@ -5,169 +5,60 @@ #include "src/base/platform/memory-protection-key.h" #if V8_HAS_PKU_JIT_WRITE_PROTECT -#include // For {mprotect()} protection macros. -#include // For {uname()}. + +#include // For {mprotect()} protection macros. #undef MAP_TYPE // Conflicts with MAP_TYPE in Torque-generated instance-types.h -#endif #include "src/base/logging.h" #include "src/base/macros.h" -// Runtime-detection of PKU support with {dlsym()}. -// -// For now, we support memory protection keys/PKEYs/PKU only for Linux on x64 -// based on glibc functions {pkey_alloc()}, {pkey_free()}, etc. -// Those functions are only available since glibc version 2.27: -// https://man7.org/linux/man-pages/man2/pkey_alloc.2.html -// However, if we check the glibc verison with V8_GLIBC_PREPREQ here at compile -// time, this causes two problems due to dynamic linking of glibc: -// 1) If the compiling system _has_ a new enough glibc, the binary will include -// calls to {pkey_alloc()} etc., and then the runtime system must supply a -// new enough glibc version as well. That is, this potentially breaks runtime -// compatability on older systems (e.g., Ubuntu 16.04 with glibc 2.23). -// 2) If the compiling system _does not_ have a new enough glibc, PKU support -// will not be compiled in, even though the runtime system potentially _does_ -// have support for it due to a new enough Linux kernel and glibc version. -// That is, this results in non-optimal security (PKU available, but not used). -// Hence, we do _not_ check the glibc version during compilation, and instead -// only at runtime try to load {pkey_alloc()} etc. with {dlsym()}. -// TODO(dlehmann): Move this import and freestanding functions below to -// base/platform/platform.h {OS} (lower-level functions) and -// {base::PageAllocator} (exported API). -#if V8_HAS_PKU_JIT_WRITE_PROTECT -#include -#endif +// Declare all the pkey functions as weak to support older glibc versions where +// they don't exist yet. +int pkey_mprotect(void* addr, size_t len, int prot, int pkey) V8_WEAK; +int pkey_get(int key) V8_WEAK; +int pkey_set(int, unsigned) V8_WEAK; namespace v8 { namespace base { namespace { -using pkey_alloc_t = int (*)(unsigned, unsigned); -using pkey_free_t = int (*)(int); -using pkey_mprotect_t = int (*)(void*, size_t, int, int); -using pkey_get_t = int (*)(int); -using pkey_set_t = int (*)(int, unsigned); - -pkey_alloc_t pkey_alloc = nullptr; -pkey_free_t pkey_free = nullptr; -pkey_mprotect_t pkey_mprotect = nullptr; -pkey_get_t pkey_get = nullptr; -pkey_set_t pkey_set = nullptr; #if DEBUG bool pkey_api_initialized = false; #endif int GetProtectionFromMemoryPermission(PageAllocator::Permission permission) { -#if V8_HAS_PKU_JIT_WRITE_PROTECT // Mappings for PKU are either RWX (for code), no access (for uncommitted - // memory), or RW (for assembler buffers). + // memory), or RO for globals. switch (permission) { case PageAllocator::kNoAccess: return PROT_NONE; - case PageAllocator::kReadWrite: - return PROT_READ | PROT_WRITE; + case PageAllocator::kRead: + return PROT_READ; case PageAllocator::kReadWriteExecute: return PROT_READ | PROT_WRITE | PROT_EXEC; default: UNREACHABLE(); } -#endif - // Other platforms do not use PKU. - UNREACHABLE(); } } // namespace -void MemoryProtectionKey::InitializeMemoryProtectionKeySupport() { +bool MemoryProtectionKey::InitializeMemoryProtectionKeySupport() { // Flip {pkey_api_initialized} (in debug mode) and check the new value. DCHECK_EQ(true, pkey_api_initialized = !pkey_api_initialized); -#if V8_HAS_PKU_JIT_WRITE_PROTECT - // PKU was broken on Linux kernels before 5.13 (see - // https://lore.kernel.org/all/20210623121456.399107624@linutronix.de/). - // A fix is also included in the 5.4.182 and 5.10.103 versions ("x86/fpu: - // Correct pkru/xstate inconsistency" by Brian Geffon ). - // Thus check the kernel version we are running on, and bail out if does not - // contain the fix. - struct utsname uname_buffer; - CHECK_EQ(0, uname(&uname_buffer)); - int kernel, major, minor; - // Conservatively return if the release does not match the format we expect. - if (sscanf(uname_buffer.release, "%d.%d.%d", &kernel, &major, &minor) != 3) { - return; - } - bool kernel_has_pkru_fix = - kernel > 5 || (kernel == 5 && major >= 13) || // anything >= 5.13 - (kernel == 5 && major == 4 && minor >= 182) || // 5.4 >= 5.4.182 - (kernel == 5 && major == 10 && minor >= 103); // 5.10 >= 5.10.103 - if (!kernel_has_pkru_fix) return; - - // Try to find the pkey functions in glibc. - void* pkey_alloc_ptr = dlsym(RTLD_DEFAULT, "pkey_alloc"); - if (!pkey_alloc_ptr) return; - - // If {pkey_alloc} is available, the others must also be available. - void* pkey_free_ptr = dlsym(RTLD_DEFAULT, "pkey_free"); - void* pkey_mprotect_ptr = dlsym(RTLD_DEFAULT, "pkey_mprotect"); - void* pkey_get_ptr = dlsym(RTLD_DEFAULT, "pkey_get"); - void* pkey_set_ptr = dlsym(RTLD_DEFAULT, "pkey_set"); - CHECK(pkey_free_ptr && pkey_mprotect_ptr && pkey_get_ptr && pkey_set_ptr); - - pkey_alloc = reinterpret_cast(pkey_alloc_ptr); - pkey_free = reinterpret_cast(pkey_free_ptr); - pkey_mprotect = reinterpret_cast(pkey_mprotect_ptr); - pkey_get = reinterpret_cast(pkey_get_ptr); - pkey_set = reinterpret_cast(pkey_set_ptr); -#endif -} -// TODO(dlehmann) Security: Are there alternatives to disabling CFI altogether -// for the functions below? Since they are essentially an arbitrary indirect -// call gadget, disabling CFI should be only a last resort. In Chromium, there -// was {base::ProtectedMemory} to protect the function pointer from being -// overwritten, but t seems it was removed to not begin used and AFAICT no such -// thing exists in V8 to begin with. See -// https://www.chromium.org/developers/testing/control-flow-integrity and -// https://crrev.com/c/1884819. -// What is the general solution for CFI + {dlsym()}? -// An alternative would be to not rely on glibc and instead implement PKEY -// directly on top of Linux syscalls + inline asm, but that is quite some low- -// level code (probably in the order of 100 lines). -// static -DISABLE_CFI_ICALL -int MemoryProtectionKey::AllocateKey() { - DCHECK(pkey_api_initialized); - if (!pkey_alloc) return kNoMemoryProtectionKey; - - // If there is support in glibc, try to allocate a new key. - // This might still return -1, e.g., because the kernel does not support - // PKU or because there is no more key available. - // Different reasons for why {pkey_alloc()} failed could be checked with - // errno, e.g., EINVAL vs ENOSPC vs ENOSYS. See manpages and glibc manual - // (the latter is the authorative source): - // https://www.gnu.org/software/libc/manual/html_mono/libc.html#Memory-Protection-Keys - static_assert(kNoMemoryProtectionKey == -1); - return pkey_alloc(/* flags, unused */ 0, kDisableAccess); -} - -// static -DISABLE_CFI_ICALL -void MemoryProtectionKey::FreeKey(int key) { - DCHECK(pkey_api_initialized); - // Only free the key if one was allocated. - if (key == kNoMemoryProtectionKey) return; + if (!pkey_mprotect) return false; + // If {pkey_mprotect} is available, the others must also be available. + CHECK(pkey_get && pkey_set); - // On platforms without PKU support, we should have already returned because - // the key must be {kNoMemoryProtectionKey}. - DCHECK_NOT_NULL(pkey_free); - CHECK_EQ(/* success */ 0, pkey_free(key)); + return true; } // static -DISABLE_CFI_ICALL bool MemoryProtectionKey::SetPermissionsAndKey( - v8::PageAllocator* page_allocator, base::AddressRegion region, - v8::PageAllocator::Permission page_permissions, int key) { + base::AddressRegion region, v8::PageAllocator::Permission page_permissions, + int key) { DCHECK(pkey_api_initialized); DCHECK_NE(key, kNoMemoryProtectionKey); CHECK_NOT_NULL(pkey_mprotect); @@ -175,29 +66,12 @@ bool MemoryProtectionKey::SetPermissionsAndKey( void* address = reinterpret_cast(region.begin()); size_t size = region.size(); - // Copied with slight modifications from base/platform/platform-posix.cc - // {OS::SetPermissions()}. - // TODO(dlehmann): Move this block into its own function at the right - // abstraction boundary (likely some static method in platform.h {OS}) - // once the whole PKU code is moved into base/platform/. - DCHECK_EQ(0, region.begin() % page_allocator->CommitPageSize()); - DCHECK_EQ(0, size % page_allocator->CommitPageSize()); - int protection = GetProtectionFromMemoryPermission(page_permissions); - int ret = pkey_mprotect(address, size, protection, key); - - if (ret == 0 && page_permissions == PageAllocator::kNoAccess) { - // Similar to {OS::SetPermissions}, also discard the pages after switching - // to no access. This is advisory; ignore errors and continue execution. - USE(page_allocator->DiscardSystemPages(address, size)); - } - - return ret == /* success */ 0; + return pkey_mprotect(address, size, protection, key) == 0; } // static -DISABLE_CFI_ICALL void MemoryProtectionKey::SetPermissionsForKey(int key, Permission permissions) { DCHECK(pkey_api_initialized); @@ -210,7 +84,6 @@ void MemoryProtectionKey::SetPermissionsForKey(int key, } // static -DISABLE_CFI_ICALL MemoryProtectionKey::Permission MemoryProtectionKey::GetKeyPermission(int key) { DCHECK(pkey_api_initialized); DCHECK_NE(kNoMemoryProtectionKey, key); @@ -226,3 +99,5 @@ MemoryProtectionKey::Permission MemoryProtectionKey::GetKeyPermission(int key) { } // namespace base } // namespace v8 + +#endif // V8_HAS_PKU_JIT_WRITE_PROTECT diff --git a/v8/src/base/platform/memory-protection-key.h b/v8/src/base/platform/memory-protection-key.h index 7d89a4142..24c81ffa7 100644 --- a/v8/src/base/platform/memory-protection-key.h +++ b/v8/src/base/platform/memory-protection-key.h @@ -5,10 +5,9 @@ #ifndef V8_BASE_PLATFORM_MEMORY_PROTECTION_KEY_H_ #define V8_BASE_PLATFORM_MEMORY_PROTECTION_KEY_H_ +#include "src/base/build_config.h" + #if V8_HAS_PKU_JIT_WRITE_PROTECT -#include // For static_assert of permission values. -#undef MAP_TYPE // Conflicts with MAP_TYPE in Torque-generated instance-types.h -#endif #include "include/v8-platform.h" #include "src/base/address-region.h" @@ -22,7 +21,7 @@ namespace base { // This class has static methods for the different platform specific // functions related to memory protection key support. -// TODO(dlehmann): Consider adding this to {base::PageAllocator} (higher-level, +// TODO(sroettger): Consider adding this to {base::PageAllocator} (higher-level, // exported API) once the API is more stable and we have converged on a better // design (e.g., typed class wrapper around int memory protection key). class V8_BASE_EXPORT MemoryProtectionKey { @@ -51,20 +50,7 @@ class V8_BASE_EXPORT MemoryProtectionKey { // Call exactly once per process to determine if PKU is supported on this // platform and initialize global data structures. - static void InitializeMemoryProtectionKeySupport(); - - // Allocates a memory protection key on platforms with PKU support, returns - // {kNoMemoryProtectionKey} on platforms without support or when allocation - // failed at runtime. - static int AllocateKey(); - - // Frees the given memory protection key, to make it available again for the - // next call to {AllocateKey()}. Note that this does NOT - // invalidate access rights to pages that are still tied to that key. That is, - // if the key is reused and pages with that key are still accessable, this - // might be a security issue. See - // https://www.gnu.org/software/libc/manual/html_mono/libc.html#Memory-Protection-Keys - static void FreeKey(int key); + static bool InitializeMemoryProtectionKeySupport(); // Associates a memory protection {key} with the given {region}. // If {key} is {kNoMemoryProtectionKey} this behaves like "plain" @@ -78,7 +64,7 @@ class V8_BASE_EXPORT MemoryProtectionKey { // permissions of the page, not the key. For changing the permissions of the // key, use {SetPermissionsForKey()} instead. static bool SetPermissionsAndKey( - v8::PageAllocator* page_allocator, base::AddressRegion region, + base::AddressRegion region, v8::PageAllocator::Permission page_permissions, int key); // Set the key's permissions. {key} must be valid, i.e. not @@ -92,4 +78,6 @@ class V8_BASE_EXPORT MemoryProtectionKey { } // namespace base } // namespace v8 +#endif // V8_HAS_PKU_JIT_WRITE_PROTECT + #endif // V8_BASE_PLATFORM_MEMORY_PROTECTION_KEY_H_ diff --git a/v8/src/base/platform/platform-fuchsia.cc b/v8/src/base/platform/platform-fuchsia.cc index 885bffa34..ad9b9dc3f 100644 --- a/v8/src/base/platform/platform-fuchsia.cc +++ b/v8/src/base/platform/platform-fuchsia.cc @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include -#include +#include +#include #include #include #include @@ -24,22 +24,26 @@ static zx_handle_t g_vmex_resource = ZX_HANDLE_INVALID; static void* g_root_vmar_base = nullptr; -#ifdef V8_USE_VMEX_RESOURCE +// If VmexResource is unavailable or does not return a valid handle then +// this will be observed as failures in vmo_replace_as_executable() calls. void SetVmexResource() { DCHECK_EQ(g_vmex_resource, ZX_HANDLE_INVALID); - zx::resource vmex_resource; - fuchsia::kernel::VmexResourceSyncPtr vmex_resource_svc; - zx_status_t status = fdio_service_connect( - "/svc/fuchsia.kernel.VmexResource", - vmex_resource_svc.NewRequest().TakeChannel().release()); - DCHECK_EQ(status, ZX_OK); - status = vmex_resource_svc->Get(&vmex_resource); - USE(status); - DCHECK_EQ(status, ZX_OK); - DCHECK(vmex_resource.is_valid()); - g_vmex_resource = vmex_resource.release(); -} -#endif + + auto vmex_resource_client = + component::Connect(); + if (vmex_resource_client.is_error()) { + return; + } + + fidl::SyncClient sync_vmex_resource_client( + std::move(vmex_resource_client.value())); + auto result = sync_vmex_resource_client->Get(); + if (result.is_error()) { + return; + } + + g_vmex_resource = result->resource().release(); +} zx_vm_option_t GetProtectionFromMemoryPermission(OS::MemoryPermission access) { switch (access) { @@ -246,9 +250,7 @@ void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) { CHECK_EQ(ZX_OK, status); g_root_vmar_base = reinterpret_cast(info.base); -#ifdef V8_USE_VMEX_RESOURCE SetVmexResource(); -#endif } // static diff --git a/v8/src/base/platform/platform-posix.cc b/v8/src/base/platform/platform-posix.cc index da42cda6c..2dd97bee0 100644 --- a/v8/src/base/platform/platform-posix.cc +++ b/v8/src/base/platform/platform-posix.cc @@ -9,6 +9,8 @@ #include #include #include + +#include "src/base/logging.h" #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) #include // for pthread_set_name_np #endif @@ -923,10 +925,12 @@ void OS::FPrint(FILE* out, const char* format, ...) { void OS::VFPrint(FILE* out, const char* format, va_list args) { #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) - __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args); -#else - vfprintf(out, format, args); + if (out == stdout) { + __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args); + return; + } #endif + vfprintf(out, format, args); } @@ -1245,28 +1249,52 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) { #if !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) && !defined(_AIX) && \ !defined(V8_OS_SOLARIS) +namespace { +#if DEBUG +bool MainThreadIsCurrentThread() { + // This method assumes the first time is called is from the main thread. + // It returns true for subsequent calls only if they are called from the + // same thread. + static int main_thread_id = -1; + if (main_thread_id == -1) { + main_thread_id = OS::GetCurrentThreadId(); + } + return main_thread_id == OS::GetCurrentThreadId(); +} +#endif // DEBUG +} // namespace + // static Stack::StackSlot Stack::ObtainCurrentThreadStackStart() { pthread_attr_t attr; int error = pthread_getattr_np(pthread_self(), &attr); - if (!error) { - void* base; - size_t size; - error = pthread_attr_getstack(&attr, &base, &size); - CHECK(!error); - pthread_attr_destroy(&attr); - return reinterpret_cast(base) + size; - } - + if (error) { + DCHECK(MainThreadIsCurrentThread()); #if defined(V8_LIBC_GLIBC) - // pthread_getattr_np can fail for the main thread. In this case - // just like NaCl we rely on the __libc_stack_end to give us - // the start of the stack. - // See https://code.google.com/p/nativeclient/issues/detail?id=3431. - return __libc_stack_end; + // pthread_getattr_np can fail for the main thread. + // For the main thread we prefer using __libc_stack_end (if it exists) since + // it generally provides a tighter limit for CSS. + return __libc_stack_end; #else - return nullptr; + return nullptr; +#endif // !defined(V8_LIBC_GLIBC) + } + void* base; + size_t size; + error = pthread_attr_getstack(&attr, &base, &size); + CHECK(!error); + pthread_attr_destroy(&attr); + void* stack_start = reinterpret_cast(base) + size; +#if defined(V8_LIBC_GLIBC) + // __libc_stack_end is process global and thus is only valid for + // the main thread. Check whether this is the main thread by checking + // __libc_stack_end is within the thread's stack. + if ((base <= __libc_stack_end) && (__libc_stack_end <= stack_start)) { + DCHECK(MainThreadIsCurrentThread()); + return __libc_stack_end; + } #endif // !defined(V8_LIBC_GLIBC) + return stack_start; } #endif // !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) && diff --git a/v8/src/base/small-vector.h b/v8/src/base/small-vector.h index ebec11f6b..d109f324d 100644 --- a/v8/src/base/small-vector.h +++ b/v8/src/base/small-vector.h @@ -169,7 +169,7 @@ class SmallVector { resize_no_init(old_size + count); pos = begin_ + offset; T* old_end = begin_ + old_size; - DCHECK_LT(old_end, end_); + DCHECK_LE(old_end, end_); std::move_backward(pos, old_end, end_); std::fill_n(pos, count, value); return pos; @@ -183,7 +183,7 @@ class SmallVector { resize_no_init(old_size + count); pos = begin_ + offset; T* old_end = begin_ + old_size; - DCHECK_LT(old_end, end_); + DCHECK_LE(old_end, end_); std::move_backward(pos, old_end, end_); std::copy(begin, end, pos); return pos; diff --git a/v8/src/base/template-utils.h b/v8/src/base/template-utils.h index d72e34c5a..8577d31fc 100644 --- a/v8/src/base/template-utils.h +++ b/v8/src/base/template-utils.h @@ -73,18 +73,71 @@ using prepend_tuple_type = decltype(std::tuple_cat( namespace detail { +template +constexpr bool NIsNotGreaterThanTupleSize = + N <= std::tuple_size_v>; + template -auto tuple_drop_impl(const T& tpl, std::index_sequence) { +constexpr auto tuple_slice_impl(const T& tpl, std::index_sequence) { return std::tuple{std::get(tpl)...}; } +template +constexpr auto tuple_for_each_impl(const Tuple& tpl, Function&& function, + std::index_sequence) { + (function(std::get(tpl)), ...); +} + +template +constexpr auto tuple_for_each_with_index_impl(const Tuple& tpl, + Function&& function, + std::index_sequence) { + (function(std::get(tpl), std::integral_constant()), + ...); +} + } // namespace detail +// Get the first N elements from a tuple. +template +constexpr auto tuple_head(Tuple&& tpl) { + constexpr size_t total_size = std::tuple_size_v>; + static_assert(N <= total_size); + return detail::tuple_slice_impl<0>(std::forward(tpl), + std::make_index_sequence()); +} + // Drop the first N elements from a tuple. -template -auto tuple_drop(const T& tpl) { - return detail::tuple_drop_impl( - tpl, std::make_index_sequence - N>()); +template < + size_t N, typename Tuple, + // If the user accidentally passes in an N that is larger than the tuple + // size, the unsigned subtraction will create a giant index sequence and + // crash the compiler. To avoid this and fail early, disable this function + // for invalid N. + typename = std::enable_if_t>> +constexpr auto tuple_drop(Tuple&& tpl) { + constexpr size_t total_size = std::tuple_size_v>; + static_assert(N <= total_size); + return detail::tuple_slice_impl( + std::forward(tpl), std::make_index_sequence()); +} + +// Calls `function(v)` for each `v` in the tuple. +template +constexpr void tuple_for_each(Tuple&& tpl, Function&& function) { + detail::tuple_for_each_impl( + std::forward(tpl), function, + std::make_index_sequence>>()); +} + +// Calls `function(v, i)` for each `v` in the tuple, with index `i`. The index +// `i` is passed as an std::integral_constant, rather than a raw size_t, +// to allow it to be used +template +constexpr void tuple_for_each_with_index(Tuple&& tpl, Function&& function) { + detail::tuple_for_each_with_index_impl( + std::forward(tpl), function, + std::make_index_sequence>>()); } #ifdef __clang__ diff --git a/v8/src/base/vector.h b/v8/src/base/vector.h index 143d33d69..2c6324061 100644 --- a/v8/src/base/vector.h +++ b/v8/src/base/vector.h @@ -81,20 +81,27 @@ class Vector { const T& at(size_t index) const { return operator[](index); } T& first() { return start_[0]; } + const T& first() const { return start_[0]; } T& last() { DCHECK_LT(0, length_); return start_[length_ - 1]; } + const T& last() const { + DCHECK_LT(0, length_); + return start_[length_ - 1]; + } // Returns a pointer to the start of the data in the vector. constexpr T* begin() const { return start_; } + constexpr const T* cbegin() const { return start_; } // For consistency with other containers, do also provide a {data} accessor. constexpr T* data() const { return start_; } // Returns a pointer past the end of the data in the vector. constexpr T* end() const { return start_ + length_; } + constexpr const T* cend() const { return start_ + length_; } constexpr std::reverse_iterator rbegin() const { return std::make_reverse_iterator(end()); diff --git a/v8/src/base/vlq.h b/v8/src/base/vlq.h index f17652bb0..225dcdf55 100644 --- a/v8/src/base/vlq.h +++ b/v8/src/base/vlq.h @@ -23,10 +23,10 @@ static constexpr uint32_t kDataMask = kContinueBit - 1; // writing it. template inline typename std::enable_if< - std::is_same()(0)), byte*>::value, + std::is_same()(0)), uint8_t*>::value, void>::type VLQEncodeUnsigned(Function&& process_byte, uint32_t value) { - byte* written_byte = process_byte(value); + uint8_t* written_byte = process_byte(value); if (value <= kDataMask) { // Value fits in first byte, early return. return; @@ -53,7 +53,7 @@ inline uint32_t VLQConvertToUnsigned(int32_t value) { // process_byte function. template inline typename std::enable_if< - std::is_same()(0)), byte*>::value, + std::is_same()(0)), uint8_t*>::value, void>::type VLQEncode(Function&& process_byte, int32_t value) { uint32_t bits = VLQConvertToUnsigned(value); @@ -62,9 +62,9 @@ VLQEncode(Function&& process_byte, int32_t value) { // Wrapper of VLQEncode for std::vector backed storage containers. template -inline void VLQEncode(std::vector* data, int32_t value) { +inline void VLQEncode(std::vector* data, int32_t value) { VLQEncode( - [data](byte value) { + [data](uint8_t value) { data->push_back(value); return &data->back(); }, @@ -73,9 +73,9 @@ inline void VLQEncode(std::vector* data, int32_t value) { // Wrapper of VLQEncodeUnsigned for std::vector backed storage containers. template -inline void VLQEncodeUnsigned(std::vector* data, uint32_t value) { +inline void VLQEncodeUnsigned(std::vector* data, uint32_t value) { VLQEncodeUnsigned( - [data](byte value) { + [data](uint8_t value) { data->push_back(value); return &data->back(); }, @@ -86,10 +86,10 @@ inline void VLQEncodeUnsigned(std::vector* data, uint32_t value) { // successive calls to the given function. template inline typename std::enable_if< - std::is_same()()), byte>::value, + std::is_same()()), uint8_t>::value, uint32_t>::type VLQDecodeUnsigned(GetNextFunction&& get_next) { - byte cur_byte = get_next(); + uint8_t cur_byte = get_next(); // Single byte fast path; no need to mask. if (cur_byte <= kDataMask) { return cur_byte; @@ -106,13 +106,13 @@ VLQDecodeUnsigned(GetNextFunction&& get_next) { // Decodes a variable-length encoded unsigned value stored in contiguous memory // starting at data_start + index, updating index to where the next encoded // value starts. -inline uint32_t VLQDecodeUnsigned(byte* data_start, int* index) { +inline uint32_t VLQDecodeUnsigned(uint8_t* data_start, int* index) { return VLQDecodeUnsigned([&] { return data_start[(*index)++]; }); } // Decodes a variable-length encoded value stored in contiguous memory starting // at data_start + index, updating index to where the next encoded value starts. -inline int32_t VLQDecode(byte* data_start, int* index) { +inline int32_t VLQDecode(uint8_t* data_start, int* index) { uint32_t bits = VLQDecodeUnsigned(data_start, index); bool is_negative = (bits & 1) == 1; int32_t result = bits >> 1; diff --git a/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/v8/src/baseline/arm/baseline-assembler-arm-inl.h index b63499c85..41bafd975 100644 --- a/v8/src/baseline/arm/baseline-assembler-arm-inl.h +++ b/v8/src/baseline/arm/baseline-assembler-arm-inl.h @@ -498,25 +498,8 @@ void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) { void BaselineAssembler::Switch(Register reg, int case_value_base, Label** labels, int num_labels) { - ASM_CODE_COMMENT(masm_); - Label fallthrough; - if (case_value_base != 0) { - __ sub(reg, reg, Operand(case_value_base)); - } - - // Mostly copied from code-generator-arm.cc - ScratchRegisterScope scope(this); - JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough); - // Ensure to emit the constant pool first if necessary. - __ CheckConstPool(true, true); - __ BlockConstPoolFor(num_labels); - int entry_size_log2 = 2; - __ add(pc, pc, Operand(reg, LSL, entry_size_log2), LeaveCC, lo); - __ b(&fallthrough); - for (int i = 0; i < num_labels; ++i) { - __ b(labels[i]); - } - __ bind(&fallthrough); + __ MacroAssembler::Switch(Register::no_reg(), reg, case_value_base, labels, + num_labels); } #undef __ diff --git a/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h index 6aa1419e9..0d2f15c02 100644 --- a/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h +++ b/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h @@ -448,8 +448,9 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, FeedbackSlot slot, Label* on_result, Label::Distance) { - __ TryLoadOptimizedOsrCode(scratch_and_result, feedback_vector, slot, - on_result, Label::Distance::kFar); + __ TryLoadOptimizedOsrCode(scratch_and_result, CodeKind::MAGLEV, + feedback_vector, slot, on_result, + Label::Distance::kFar); } void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( diff --git a/v8/src/baseline/baseline-batch-compiler.cc b/v8/src/baseline/baseline-batch-compiler.cc index 222360be6..a12ee3b4a 100644 --- a/v8/src/baseline/baseline-batch-compiler.cc +++ b/v8/src/baseline/baseline-batch-compiler.cc @@ -75,6 +75,7 @@ class BaselineCompilerTask { } shared_function_info_->set_baseline_code(*code, kReleaseStore); + shared_function_info_->set_age(0); if (v8_flags.trace_baseline_concurrent_compilation) { CodeTracer::Scope scope(isolate->GetCodeTracer()); std::stringstream ss; @@ -163,7 +164,6 @@ class ConcurrentBaselineCompiler { outgoing_queue_(outcoming_queue) {} void Run(JobDelegate* delegate) override { - RwxMemoryWriteScope::SetDefaultPermissionsForNewThread(); LocalIsolate local_isolate(isolate_, ThreadKind::kBackground); UnparkedScope unparked_scope(&local_isolate); LocalHandleScope handle_scope(&local_isolate); diff --git a/v8/src/baseline/baseline-compiler.h b/v8/src/baseline/baseline-compiler.h index 801a809b3..37e5f5336 100644 --- a/v8/src/baseline/baseline-compiler.h +++ b/v8/src/baseline/baseline-compiler.h @@ -48,7 +48,7 @@ class BytecodeOffsetTableBuilder { private: size_t previous_pc_ = 0; - std::vector bytes_; + std::vector bytes_; }; class BaselineCompiler { diff --git a/v8/src/baseline/bytecode-offset-iterator.h b/v8/src/baseline/bytecode-offset-iterator.h index 4ca0c7d2b..dbf17688b 100644 --- a/v8/src/baseline/bytecode-offset-iterator.h +++ b/v8/src/baseline/bytecode-offset-iterator.h @@ -79,7 +79,7 @@ class V8_EXPORT_PRIVATE BytecodeOffsetIterator { } Handle mapping_table_; - byte* data_start_address_; + uint8_t* data_start_address_; int data_length_; int current_index_; Address current_pc_start_offset_; diff --git a/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h b/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h index fd295da3e..2b7a9603b 100644 --- a/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h +++ b/v8/src/baseline/riscv/baseline-assembler-riscv-inl.h @@ -160,18 +160,14 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value, } void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, Label* target, Label::Distance distance) { - ScratchRegisterScope temps(this); - Register temp = temps.AcquireScratch(); - __ li(temp, Operand(smi)); - __ SmiUntag(temp); - __ Branch(target, cc, value, Operand(temp), distance); + __ CompareTaggedAndBranch(target, cc, value, Operand(smi)); } void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, Label* target, Label::Distance distance) { // todo: compress pointer __ AssertSmi(lhs); __ AssertSmi(rhs); - __ Branch(target, cc, lhs, Operand(rhs), distance); + __ CompareTaggedAndBranch(target, cc, lhs, Operand(rhs), distance); } void BaselineAssembler::JumpIfTagged(Condition cc, Register value, MemOperand operand, Label* target, @@ -180,7 +176,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value, ScratchRegisterScope temps(this); Register scratch = temps.AcquireScratch(); __ LoadWord(scratch, operand); - __ Branch(target, cc, value, Operand(scratch), distance); + __ CompareTaggedAndBranch(target, cc, value, Operand(scratch), distance); } void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, Register value, Label* target, @@ -189,7 +185,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, ScratchRegisterScope temps(this); Register scratch = temps.AcquireScratch(); __ LoadWord(scratch, operand); - __ Branch(target, cc, scratch, Operand(value), distance); + __ CompareTaggedAndBranch(target, cc, scratch, Operand(value), distance); } void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, Label* target, Label::Distance distance) { diff --git a/v8/src/baseline/riscv/baseline-compiler-riscv-inl.h b/v8/src/baseline/riscv/baseline-compiler-riscv-inl.h index d3b85ac49..0a84881ac 100644 --- a/v8/src/baseline/riscv/baseline-compiler-riscv-inl.h +++ b/v8/src/baseline/riscv/baseline-compiler-riscv-inl.h @@ -72,11 +72,10 @@ void BaselineCompiler::PrologueFillFrame() { void BaselineCompiler::VerifyFrameSize() { ASM_CODE_COMMENT(&masm_); - __ masm()->AddWord(kScratchReg, sp, + __ masm()->AddWord(t0, sp, Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp + bytecode_->frame_size())); - __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg, - Operand(fp)); + __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, t0, Operand(fp)); } #undef __ diff --git a/v8/src/baseline/s390/baseline-assembler-s390-inl.h b/v8/src/baseline/s390/baseline-assembler-s390-inl.h index 517453c60..257426897 100644 --- a/v8/src/baseline/s390/baseline-assembler-s390-inl.h +++ b/v8/src/baseline/s390/baseline-assembler-s390-inl.h @@ -16,7 +16,7 @@ namespace baseline { namespace detail { -static constexpr Register kScratchRegisters[] = {r8, r9, ip, r1}; +static constexpr Register kScratchRegisters[] = {r8, ip, r1}; static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters); #ifdef DEBUG diff --git a/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/v8/src/baseline/x64/baseline-assembler-x64-inl.h index 9f475ad4a..1f2217f91 100644 --- a/v8/src/baseline/x64/baseline-assembler-x64-inl.h +++ b/v8/src/baseline/x64/baseline-assembler-x64-inl.h @@ -386,8 +386,9 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, FeedbackSlot slot, Label* on_result, Label::Distance distance) { - __ MacroAssembler::TryLoadOptimizedOsrCode( - scratch_and_result, feedback_vector, slot, on_result, distance); + __ MacroAssembler::TryLoadOptimizedOsrCode(scratch_and_result, + CodeKind::MAGLEV, feedback_vector, + slot, on_result, distance); } void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( diff --git a/v8/src/builtins/accessors.cc b/v8/src/builtins/accessors.cc index 5cf53111d..3cdd974f5 100644 --- a/v8/src/builtins/accessors.cc +++ b/v8/src/builtins/accessors.cc @@ -33,18 +33,18 @@ Handle Accessors::MakeAccessor( Handle info = factory->NewAccessorInfo(); { DisallowGarbageCollection no_gc; - auto raw = *info; - raw.set_all_can_read(false); - raw.set_all_can_write(false); - raw.set_is_special_data_property(true); - raw.set_is_sloppy(false); - raw.set_replace_on_access(false); - raw.set_getter_side_effect_type(SideEffectType::kHasSideEffect); - raw.set_setter_side_effect_type(SideEffectType::kHasSideEffect); - raw.set_name(*name); - raw.set_getter(isolate, reinterpret_cast
(getter)); + Tagged raw = *info; + raw->set_all_can_read(false); + raw->set_all_can_write(false); + raw->set_is_special_data_property(true); + raw->set_is_sloppy(false); + raw->set_replace_on_access(false); + raw->set_getter_side_effect_type(SideEffectType::kHasSideEffect); + raw->set_setter_side_effect_type(SideEffectType::kHasSideEffect); + raw->set_name(*name); + raw->set_getter(isolate, reinterpret_cast
(getter)); if (setter == nullptr) setter = &ReconfigureToDataProperty; - raw.set_setter(isolate, reinterpret_cast
(setter)); + raw->set_setter(isolate, reinterpret_cast
(setter)); } return info; } diff --git a/v8/src/builtins/arm/builtins-arm.cc b/v8/src/builtins/arm/builtins-arm.cc index e80def01c..31995fe5b 100644 --- a/v8/src/builtins/arm/builtins-arm.cc +++ b/v8/src/builtins/arm/builtins-arm.cc @@ -771,9 +771,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ mov(r5, r4); __ mov(r6, r4); __ mov(r8, r4); - if (kR9Available == 1) { - __ mov(r9, r4); - } + __ mov(r9, r4); // Invoke the code. Handle builtin = is_construct @@ -920,12 +918,18 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, namespace { -void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array, - Register scratch) { - DCHECK(!AreAliased(bytecode_array, scratch)); +void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi, + Register scratch) { + DCHECK(!AreAliased(sfi, scratch)); __ mov(scratch, Operand(0)); - __ strh(scratch, - FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset)); + __ strh(scratch, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset)); +} + +void ResetJSFunctionAge(MacroAssembler* masm, Register js_function, + Register scratch1, Register scratch2) { + __ Move(scratch1, + FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, scratch1, scratch2); } void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, @@ -999,6 +1003,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kCalleeContext); Register callee_js_function = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); + { + UseScratchRegisterScope temps(masm); + ResetJSFunctionAge(masm, callee_js_function, temps.Acquire(), + temps.Acquire()); + } __ Push(callee_context, callee_js_function); DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); DCHECK_EQ(callee_js_function, kJSFunctionRegister); @@ -1009,10 +1018,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecodeArray = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - { - UseScratchRegisterScope temps(masm); - ResetBytecodeAge(masm, bytecodeArray, temps.Acquire()); - } __ Push(argc, bytecodeArray); // Baseline code frames store the feedback vector where interpreter would @@ -1122,6 +1127,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Get the bytecode array from the function object and load it into // kInterpreterBytecodeArrayRegister. __ ldr(r4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, r4, r8); __ ldr(kInterpreterBytecodeArrayRegister, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset)); @@ -1185,8 +1191,6 @@ void Builtins::Generate_InterpreterEntryTrampoline( FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, r9); - // Load the initial bytecode offset. __ mov(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); @@ -2742,6 +2746,141 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); } + +void Builtins::Generate_NewGenericJSToWasmWrapper(MacroAssembler* masm) { + __ EnterFrame(StackFrame::JS_TO_WASM); + Register wrapper_buffer = + WasmNewJSToWasmWrapperDescriptor::WrapperBufferRegister(); + // Push the wrapper_buffer stack, it's needed later for the results. + __ Push(wrapper_buffer); + + Register result_size = r0; + __ ldr(result_size, + MemOperand( + wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferStackReturnBufferSize)); + __ sub(sp, sp, Operand(result_size, LSL, kPointerSizeLog2)); + + __ str(sp, + MemOperand( + wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferStackReturnBufferStart)); + // Push stack parameters on the stack. + Register params_end = r9; + __ ldr(params_end, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferParamEnd)); + + Register params_start = r4; + __ ldr(params_start, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferParamStart)); + + // The first GP parameter is the instance, which we handle specially. + int stack_params_offset = + (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + + arraysize(wasm::kFpParamRegisters) * kDoubleSize; + int param_padding = stack_params_offset & kSystemPointerSize; + stack_params_offset += param_padding; + Register last_stack_param = r0; + __ add(last_stack_param, params_start, Operand(stack_params_offset)); + + Label loop_start; + __ bind(&loop_start); + + Label finish_stack_params; + __ cmp(last_stack_param, params_end); + __ b(ge, &finish_stack_params); + + // Push parameter + { + Register scratch = r1; + __ ldr(scratch, MemOperand(params_end, -kSystemPointerSize, PreIndex)); + __ push(scratch); + } + + __ jmp(&loop_start); + + __ bind(&finish_stack_params); + + size_t next_offset = 0; + for (size_t i = 1; i < arraysize(wasm::kGpParamRegisters); ++i) { + // Check that {params_start} does not overlap with any of the parameter + // registers, so that we don't overwrite it by accident with the loads + // below. + DCHECK_NE(params_start, wasm::kGpParamRegisters[i]); + __ ldr(wasm::kGpParamRegisters[i], MemOperand(params_start, next_offset)); + next_offset += kSystemPointerSize; + } + + next_offset += param_padding; + for (size_t i = 0; i < arraysize(wasm::kFpParamRegisters); ++i) { + __ vldr(wasm::kFpParamRegisters[i], MemOperand(params_start, next_offset)); + next_offset += kDoubleSize; + } + DCHECK_EQ(next_offset, stack_params_offset); + + // Load the instance into r3. + __ ldr(kWasmInstanceRegister, + MemOperand(fp, JSToWasmWrapperConstants::kInstanceOffset)); + + { + Register thread_in_wasm_flag_addr = r1; + __ ldr(thread_in_wasm_flag_addr, + MemOperand(kRootRegister, + Isolate::thread_in_wasm_flag_address_offset())); + Register scratch = r9; + __ mov(scratch, Operand(1)); + __ str(scratch, MemOperand(thread_in_wasm_flag_addr, 0)); + } + + Register function_entry = r1; + __ ldr(function_entry, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferCallTarget)); + __ Call(function_entry); + { + Register thread_in_wasm_flag_addr = r4; + __ ldr(thread_in_wasm_flag_addr, + MemOperand(kRootRegister, + Isolate::thread_in_wasm_flag_address_offset())); + Register scratch = r9; + __ mov(scratch, Operand(0)); + __ str(scratch, MemOperand(thread_in_wasm_flag_addr, 0)); + } + + // `wrapper_buffer` is a parameter for `JSToWasmHandleReturns`, it therefore + // has to be in r2. + wrapper_buffer = r2; + __ ldr(wrapper_buffer, MemOperand(fp, -2 * kSystemPointerSize)); + + __ vstr( + wasm::kFpReturnRegisters[0], + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferFPReturnRegister1)); + __ vstr( + wasm::kFpReturnRegisters[1], + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferFPReturnRegister2)); + __ str(wasm::kGpReturnRegisters[0], + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferGPReturnRegister1)); + __ str(wasm::kGpReturnRegisters[1], + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferGPReturnRegister2)); + + // r0: wasm instance. + // r1: the result JSArray for multi-return. + // r2: pointer to the byte buffer which contains all parameters. + __ ldr(r1, MemOperand(fp, JSToWasmWrapperConstants::kResultArrayOffset)); + __ ldr(r0, MemOperand(fp, JSToWasmWrapperConstants::kInstanceOffset)); + __ Call(BUILTIN_CODE(masm->isolate(), JSToWasmHandleReturns), + RelocInfo::CODE_TARGET); + + __ LeaveFrame(StackFrame::JS_TO_WASM); + __ add(sp, sp, Operand(2 * kSystemPointerSize), LeaveCC); + __ Jump(lr); +} #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -3009,7 +3148,6 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, Register return_value = r0; Register scratch = r8; - CHECK(kR9Available); Register scratch2 = r9; // Allocate HandleScope in callee-saved registers. @@ -3165,13 +3303,20 @@ MemOperand ExitFrameCallerStackSlotOperand(int index) { } // namespace -void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { +void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, + CallApiCallbackMode mode) { // ----------- S t a t e ------------- - // -- cp : context + // CallApiCallbackMode::kGeneric mode: + // -- r2 : arguments count (not including the receiver) + // -- r3 : call handler info + // -- r0 : holder + // CallApiCallbackMode::kNoSideEffects/kWithSideEffectsSideEffects modes: // -- r1 : api function address // -- r2 : arguments count (not including the receiver) // -- r3 : call data // -- r0 : holder + // Both modes: + // -- cp : context // -- sp[0] : receiver // -- sp[8] : first argument // -- ... @@ -3180,13 +3325,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { Register function_callback_info_arg = arg_reg_1; - Register api_function_address = r1; - Register argc = r2; - Register call_data = r3; - Register holder = r0; + Register api_function_address = no_reg; + Register argc = no_reg; + Register call_data = no_reg; + Register callback = no_reg; + Register holder = no_reg; Register scratch = r4; - - DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch)); + Register scratch2 = r5; + + switch (mode) { + case CallApiCallbackMode::kGeneric: + api_function_address = r1; + argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); + callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + holder = CallApiCallbackGenericDescriptor::HolderRegister(); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + api_function_address = + CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister(); + argc = CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister(); + call_data = CallApiCallbackOptimizedDescriptor::CallDataRegister(); + holder = CallApiCallbackOptimizedDescriptor::HolderRegister(); + break; + } + DCHECK(!AreAliased(api_function_address, argc, holder, call_data, callback, + scratch, scratch2)); using FCI = FunctionCallbackInfo; using FCA = FunctionCallbackArguments; @@ -3211,7 +3376,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // sp[7 * kSystemPointerSize]: <= FCA:::values_ // Reserve space on the stack. - __ AllocateStackSpace(FCA::kArgsLength * kPointerSize); + __ AllocateStackSpace(FCA::kArgsLength * kSystemPointerSize); // kHolder __ str(holder, MemOperand(sp, FCA::kHolderIndex * kSystemPointerSize)); @@ -3228,7 +3393,17 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { __ str(scratch, MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize)); // kData. - __ str(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + switch (mode) { + case CallApiCallbackMode::kGeneric: + __ ldr(scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + __ str(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + __ str(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + } // kNewTarget. __ str(scratch, MemOperand(sp, FCA::kNewTargetIndex * kSystemPointerSize)); @@ -3246,9 +3421,35 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { static_assert(FCI::kImplicitArgsOffset == 0); static_assert(FCI::kValuesOffset == 1 * kPointerSize); static_assert(FCI::kLengthOffset == 2 * kPointerSize); + const int exit_frame_params_size = + mode == CallApiCallbackMode::kGeneric ? 2 : 0; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + if (mode == CallApiCallbackMode::kGeneric) { + ASM_CODE_COMMENT_STRING(masm, "Push API_CALLBACK_EXIT frame arguments"); + __ AllocateStackSpace(exit_frame_params_size * kSystemPointerSize); + + // Argc parameter as a Smi. + static_assert(ApiCallbackExitFrameConstants::kArgcOffset == + 3 * kSystemPointerSize); + __ SmiTag(scratch, argc); + __ str(scratch, MemOperand(sp, 1 * kSystemPointerSize)); + + // Target parameter. + static_assert(ApiCallbackExitFrameConstants::kTargetOffset == + 2 * kSystemPointerSize); + __ ldr(scratch, + FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); + __ str(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + + __ ldr(api_function_address, + FieldMemOperand(callback, + CallHandlerInfo::kMaybeRedirectedCallbackOffset)); + + __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); + } else { + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + } { ASM_CODE_COMMENT_STRING(masm, "Initialize FunctionCallbackInfo"); @@ -3270,8 +3471,9 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // from the API function here. MemOperand stack_space_operand = ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropOnStackSize); - __ mov(scratch, - Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize)); + __ mov(scratch, Operand((FCA::kArgsLength + 1 /* receiver */ + + exit_frame_params_size) * + kPointerSize)); __ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2)); __ str(scratch, stack_space_operand); @@ -3280,13 +3482,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { DCHECK(!AreAliased(api_function_address, function_callback_info_arg)); - ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(mode); // Pass api function address to thunk wrapper in case profiler or side-effect // checking is enabled. Register thunk_arg = api_function_address; - MemOperand return_value_operand = - ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex); + MemOperand return_value_operand = ExitFrameCallerStackSlotOperand( + FCA::kReturnValueIndex + exit_frame_params_size); static constexpr int kUseStackSpaceOperand = 0; CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, thunk_arg, @@ -3692,6 +3895,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, Register code_obj = r4; __ ldr(code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + + if (is_osr) { + ResetSharedFunctionInfoAge(masm, code_obj, r3); + } + __ ldr(code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); @@ -3788,8 +3996,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ Pop(kInterpreterAccumulatorRegister); if (is_osr) { - UseScratchRegisterScope temps(masm); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, temps.Acquire()); Generate_OSREntry(masm, code_obj); } else { __ Jump(code_obj); diff --git a/v8/src/builtins/arm64/builtins-arm64.cc b/v8/src/builtins/arm64/builtins-arm64.cc index 8b3fecb45..b4f7db2dd 100644 --- a/v8/src/builtins/arm64/builtins-arm64.cc +++ b/v8/src/builtins/arm64/builtins-arm64.cc @@ -5,6 +5,7 @@ #if V8_TARGET_ARCH_ARM64 #include "src/api/api-arguments.h" +#include "src/builtins/builtins-descriptors.h" #include "src/codegen/code-factory.h" #include "src/codegen/interface-descriptors-inl.h" // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. @@ -1085,9 +1086,17 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, namespace { -void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array) { - __ Strh(wzr, - FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset)); +void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi) { + __ Strh(wzr, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset)); +} + +void ResetJSFunctionAge(MacroAssembler* masm, Register js_function, + Register scratch) { + const Register shared_function_info(scratch); + __ LoadTaggedField( + shared_function_info, + FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, shared_function_info); } void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, @@ -1156,6 +1165,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kCalleeContext); Register callee_js_function = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); + { + UseScratchRegisterScope temps(masm); + ResetJSFunctionAge(masm, callee_js_function, temps.AcquireX()); + } __ Push(callee_context, callee_js_function); DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); DCHECK_EQ(callee_js_function, kJSFunctionRegister); @@ -1166,7 +1179,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecode_array = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - ResetBytecodeAge(masm, bytecode_array); __ Push(argc, bytecode_array); // Baseline code frames store the feedback vector where interpreter would @@ -1277,6 +1289,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // kInterpreterBytecodeArrayRegister. __ LoadTaggedField( x4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, x4); __ LoadTaggedField( kInterpreterBytecodeArrayRegister, FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset)); @@ -1346,8 +1359,6 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ mov(fp, sp); __ Push(cp, closure); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); - // Load the initial bytecode offset. __ Mov(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); @@ -3246,9 +3257,8 @@ void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_continuation, LoadJumpBuffer(masm, target_jmpbuf, false, tmp); } -void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance, - Register return_reg, Register tmp1, - Register tmp2) { +void ReloadParentContinuation(MacroAssembler* masm, Register return_reg, + Register tmp1, Register tmp2) { Register active_continuation = tmp1; __ LoadRoot(active_continuation, RootIndex::kActiveContinuation); @@ -3288,12 +3298,11 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance, __ Mov(tmp1, 1); __ Str(tmp1, MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset)); - __ Stp(wasm_instance, return_reg, - MemOperand(sp, -2 * kSystemPointerSize, PreIndex)); // Spill. + __ Stp(padreg, return_reg, + MemOperand(sp, -2 * kSystemPointerSize, PreIndex)); // Spill. __ Move(kContextRegister, Smi::zero()); __ CallRuntime(Runtime::kWasmSyncStackLimit); - __ Ldp(wasm_instance, return_reg, - MemOperand(sp, 2 * kSystemPointerSize, PostIndex)); + __ Ldp(padreg, return_reg, MemOperand(sp, 2 * kSystemPointerSize, PostIndex)); } void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, @@ -3485,6 +3494,8 @@ class RegisterAllocator { Register Name = no_reg; \ regs.Pinned(Reg, &Name); +#define ASSIGN_PINNED(Name, Reg) regs.Pinned(Reg, &Name); + #define DEFINE_SCOPED(Name) \ DEFINE_REG(Name) \ RegisterAllocator::Scoped scope_##Name(®s, &Name); @@ -3587,7 +3598,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { target_continuation, FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset)); FREE_REG(suspender); - // Save the old stack's rbp in r9, and use it to access the parameters in + // Save the old stack's fp in x9, and use it to access the parameters in // the parent frame. // We also distribute the spill slots across the two stacks as needed by // creating a "shadow frame": @@ -4266,14 +4277,13 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // ------------------------------------------- // Resetting after the Wasm call. // ------------------------------------------- - // Restore rsp to free the reserved stack slots for the sections. + // Restore sp to free the reserved stack slots for the sections. __ Add(sp, fp, kLastSpillOffset - kSystemPointerSize); // Unset thread_in_wasm_flag. __ Ldr( thread_in_wasm_flag_addr, - MemOperand(kRootRegister, - Isolate::thread_in_wasm_flag_address_offset())); + MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); __ Str(xzr, MemOperand(thread_in_wasm_flag_addr, 0)); regs.ResetExcept(original_fp, wasm_instance); @@ -4296,11 +4306,41 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Label return_done; __ bind(&return_done); + regs.ResetExcept(return_reg); + Label return_promise; if (stack_switch) { + // The return value of the wasm function becomes the parameter of the + // FulfillPromise builtin, and the promise is the return value of this + // wrapper. + DEFINE_PINNED(return_value, x1); + __ Move(return_value, return_reg); DEFINE_SCOPED(tmp); DEFINE_SCOPED(tmp2); - ReloadParentContinuation(masm, wasm_instance, return_reg, tmp, tmp2); + Register promise = return_reg; + __ LoadRoot(promise, RootIndex::kActiveSuspender); + __ LoadTaggedField( + promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); + + __ Move(kContextRegister, MemOperand(fp, kFunctionDataOffset)); + __ LoadTaggedField( + kContextRegister, + FieldMemOperand(kContextRegister, + WasmExportedFunctionData::kInstanceOffset)); + __ LoadTaggedField( + kContextRegister, + FieldMemOperand(kContextRegister, + WasmInstanceObject::kNativeContextOffset)); + __ Mov(tmp, 1); + __ Str(tmp, + MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset)); + __ Push(padreg, promise); + __ CallBuiltin(Builtin::kFulfillPromise); + __ Pop(promise, padreg); + + __ bind(&return_promise); + ReloadParentContinuation(masm, promise, tmp, tmp2); RestoreParentSuspender(masm, tmp, tmp2); + FREE_REG(return_value); } __ bind(&suspend); // No need to process the return value if the stack is suspended, there is @@ -4454,15 +4494,66 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { __ Stp(wasm_instance, function_data, MemOperand(sp, -2 * kSystemPointerSize, PreIndex)); // Push the arguments for the runtime call. - __ Push(wasm_instance, function_data); + __ Push(xzr, function_data); // Set up context. __ Move(kContextRegister, Smi::zero()); // Call the runtime function that kicks off compilation. - __ CallRuntime(Runtime::kWasmCompileWrapper, 2); + __ CallRuntime(Runtime::kWasmCompileWrapper, 1); __ Ldp(wasm_instance, function_data, MemOperand(sp, 2 * kSystemPointerSize, PostIndex)); __ jmp(&compile_wrapper_done); } + + // Catch handler for the stack-switching wrapper: reject the promise with the + // thrown exception. + if (stack_switch) { + int catch_handler = __ pc_offset(); + // Restore sp to free the reserved stack slots for the sections. + __ Add(sp, fp, kLastSpillOffset - kSystemPointerSize); + + thread_in_wasm_flag_addr = x2; + // Unset thread_in_wasm_flag. + __ Ldr(thread_in_wasm_flag_addr, + MemOperand(kRootRegister, + Isolate::thread_in_wasm_flag_address_offset())); + __ Str(xzr, MemOperand(thread_in_wasm_flag_addr, 0)); + + // The exception becomes the parameter of the RejectPromise builtin, and the + // promise is the return value of this wrapper. + __ Move(x1, kReturnRegister0); + Register promise = kReturnRegister0; + __ LoadRoot(promise, RootIndex::kActiveSuspender); + __ LoadTaggedField( + promise, FieldMemOperand(promise, WasmSuspenderObject::kPromiseOffset)); + + __ Move(kContextRegister, MemOperand(fp, kFunctionDataOffset)); + __ LoadTaggedField( + kContextRegister, + FieldMemOperand(kContextRegister, + WasmExportedFunctionData::kInstanceOffset)); + __ LoadTaggedField( + kContextRegister, + FieldMemOperand(kContextRegister, + WasmInstanceObject::kNativeContextOffset)); + __ Mov(x2, 1); + __ Str(x2, + MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset)); + __ Push(padreg, promise); + static const Builtin_RejectPromise_InterfaceDescriptor desc; + static_assert(desc.GetRegisterParameter(0) == x0 && // promise + desc.GetRegisterParameter(1) == x1 && // reason + desc.GetRegisterParameter(2) == x2 // debugEvent + ); + __ LoadRoot(x2, RootIndex::kTrueValue); + __ CallBuiltin(Builtin::kRejectPromise); + __ Pop(promise, padreg); + + // Run the rest of the wrapper normally (switch to the old stack, + // deconstruct the frame, ...). + __ jmp(&return_promise); + + masm->isolate()->builtins()->SetJSPIPromptHandlerOffset(catch_handler); + } } } // namespace @@ -4480,8 +4571,8 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { // Set up the stackframe. __ EnterFrame(StackFrame::STACK_SWITCH); - DEFINE_PINNED(promise, x0); - DEFINE_PINNED(suspender, x1); + DEFINE_PINNED(suspender, x0); + DEFINE_PINNED(context, kContextRegister); __ Sub(sp, sp, RoundUp(-(BuiltinWasmWrapperConstants::kGCScanSlotCountOffset - TypedFrameConstants::kFixedFrameSizeFromFp), 16)); @@ -4517,7 +4608,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { __ StoreTaggedField( scratch, FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset)); - regs.ResetExcept(promise, suspender, continuation); + regs.ResetExcept(suspender, continuation); DEFINE_REG(suspender_continuation); __ LoadTaggedField( @@ -4555,7 +4646,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { MacroAssembler::RootRegisterOffsetForRootIndex( RootIndex::kActiveSuspender); __ Str(parent, MemOperand(kRootRegister, active_suspender_offset)); - regs.ResetExcept(promise, caller); + regs.ResetExcept(suspender, caller); // ------------------------------------------- // Load jump buffer. @@ -4565,17 +4656,16 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { ASSIGN_REG(scratch); __ Mov(scratch, 2); __ Str(scratch, GCScanSlotPlace); - __ Stp(caller, promise, - MemOperand(sp, -2 * kSystemPointerSize, PreIndex)); - __ Move(kContextRegister, Smi::zero()); + __ Stp(caller, suspender, MemOperand(sp, -2 * kSystemPointerSize, PreIndex)); __ CallRuntime(Runtime::kWasmSyncStackLimit); - __ Ldp(caller, promise, - MemOperand(sp, 2 * kSystemPointerSize, PostIndex)); + __ Ldp(caller, suspender, MemOperand(sp, 2 * kSystemPointerSize, PostIndex)); ASSIGN_REG(jmpbuf); __ LoadExternalPointerField( jmpbuf, FieldMemOperand(caller, WasmContinuationObject::kJmpbufOffset), kWasmContinuationJmpbufTag); - __ Mov(kReturnRegister0, promise); + __ LoadTaggedField( + kReturnRegister0, + FieldMemOperand(suspender, WasmSuspenderObject::kPromiseOffset)); __ Str(xzr, GCScanSlotPlace); LoadJumpBuffer(masm, jmpbuf, true, scratch); __ Trap(); @@ -4718,9 +4808,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset); __ Mov(scratch, 1); __ Str(scratch, GCScanSlotPlace); - __ Stp(target_continuation, scratch, // Scratch for padding. - MemOperand(sp, -2*kSystemPointerSize, PreIndex)); - __ Move(kContextRegister, Smi::zero()); + __ Stp(target_continuation, scratch, // Scratch for padding. + MemOperand(sp, -2 * kSystemPointerSize, PreIndex)); __ CallRuntime(Runtime::kWasmSyncStackLimit); __ Ldp(target_continuation, scratch, MemOperand(sp, 2*kSystemPointerSize, PostIndex)); @@ -4744,6 +4833,9 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { if (on_resume == wasm::OnResume::kThrow) { // Switch to the continuation's stack without restoring the PC. LoadJumpBuffer(masm, target_jmpbuf, false, scratch); + // Pop this frame now. The unwinder expects that the first STACK_SWITCH + // frame is the outermost one. + __ LeaveFrame(StackFrame::STACK_SWITCH); // Forward the onRejected value to kThrow. __ Push(xzr, kReturnRegister0); __ CallRuntime(Runtime::kThrow); @@ -4772,6 +4864,184 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); } + +void Builtins::Generate_NewGenericJSToWasmWrapper(MacroAssembler* masm) { + auto regs = RegisterAllocator::WithAllocatableGeneralRegisters(); + __ EnterFrame(StackFrame::JS_TO_WASM); + DEFINE_PINNED(wrapper_buffer, + WasmNewJSToWasmWrapperDescriptor::WrapperBufferRegister()); + // Push the wrapper_buffer stack, it's needed later for the results. + __ Push(wrapper_buffer, xzr); + { + DEFINE_SCOPED(result_size); + __ Ldr(result_size, + MemOperand( + wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferStackReturnBufferSize)); + // The `result_size` is the number of slots needed on the stack to store the + // return values of the wasm function. If `result_size` is an odd number, we + // have to add `1` to preserve stack pointer alignment. + __ Add(result_size, result_size, 1); + __ Bic(result_size, result_size, 1); + __ Sub(sp, sp, Operand(result_size, LSL, kSystemPointerSizeLog2)); + } + { + DEFINE_SCOPED(scratch); + __ Mov(scratch, sp); + __ Str(scratch, + MemOperand( + wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferStackReturnBufferStart)); + } + DEFINE_PINNED(param1, wasm::kGpParamRegisters[1]); + DEFINE_PINNED(param2, wasm::kGpParamRegisters[2]); + DEFINE_PINNED(param3, wasm::kGpParamRegisters[3]); + DEFINE_PINNED(param4, wasm::kGpParamRegisters[4]); + DEFINE_PINNED(param5, wasm::kGpParamRegisters[5]); + DEFINE_PINNED(param6, wasm::kGpParamRegisters[6]); + + // The first GP parameter is the instance, which we handle specially. + int stack_params_offset = + (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + + arraysize(wasm::kFpParamRegisters) * kDoubleSize; + + { + DEFINE_SCOPED(params_start); + __ Ldr(params_start, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferParamStart)); + + { + // Push stack parameters on the stack. + DEFINE_SCOPED(params_end); + __ Ldr(params_end, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferParamEnd)); + DEFINE_SCOPED(last_stack_param); + + __ Add(last_stack_param, params_start, Immediate(stack_params_offset)); + + Label loop_start; + { + DEFINE_SCOPED(scratch); + // Check if there is an even number of parameters, so no alignment + // needed. + __ Sub(scratch, params_end, last_stack_param); + __ TestAndBranchIfAllClear(scratch, 0x8, &loop_start); + + // Push the first parameter with alignment. + __ Ldr(scratch, MemOperand(params_end, -kSystemPointerSize, PreIndex)); + __ Push(xzr, scratch); + } + __ bind(&loop_start); + + Label finish_stack_params; + __ Cmp(last_stack_param, params_end); + __ B(ge, &finish_stack_params); + + // Push parameter + { + DEFINE_SCOPED(scratch1); + DEFINE_SCOPED(scratch2); + __ Ldp(scratch2, scratch1, + MemOperand(params_end, -2 * kSystemPointerSize, PreIndex)); + __ Push(scratch1, scratch2); + } + __ jmp(&loop_start); + + __ bind(&finish_stack_params); + } + + size_t next_offset = 0; + for (size_t i = 1; i < arraysize(wasm::kGpParamRegisters); i += 2) { + // Check that {params_start} does not overlap with any of the parameter + // registers, so that we don't overwrite it by accident with the loads + // below. + DCHECK_NE(params_start, wasm::kGpParamRegisters[i]); + DCHECK_NE(params_start, wasm::kGpParamRegisters[i + 1]); + __ Ldp(wasm::kGpParamRegisters[i], wasm::kGpParamRegisters[i + 1], + MemOperand(params_start, next_offset)); + next_offset += 2 * kSystemPointerSize; + } + + for (size_t i = 0; i < arraysize(wasm::kFpParamRegisters); i += 2) { + __ Ldp(wasm::kFpParamRegisters[i], wasm::kFpParamRegisters[i + 1], + MemOperand(params_start, next_offset)); + next_offset += 2 * kDoubleSize; + } + DCHECK_EQ(next_offset, stack_params_offset); + } + + DEFINE_PINNED(wasm_instance, kWasmInstanceRegister); + __ Ldr(wasm_instance, + MemOperand(fp, JSToWasmWrapperConstants::kInstanceOffset)); + + { + DEFINE_SCOPED(thread_in_wasm_flag_addr); + __ Ldr(thread_in_wasm_flag_addr, + MemOperand(kRootRegister, + Isolate::thread_in_wasm_flag_address_offset())); + DEFINE_SCOPED(scratch); + __ Mov(scratch, 1); + __ Str(scratch, MemOperand(thread_in_wasm_flag_addr, 0)); + } + + { + DEFINE_SCOPED(call_target); + __ Ldr(call_target, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferCallTarget)); + __ Call(call_target); + } + FREE_REG(wasm_instance); + FREE_REG(param6); + FREE_REG(param5); + FREE_REG(param4); + FREE_REG(param3); + FREE_REG(param2); + FREE_REG(param1); + // The wrapper_buffer has to be in x2 as the correct parameter register. + FREE_REG(wrapper_buffer); + ASSIGN_PINNED(wrapper_buffer, x2); + DEFINE_PINNED(gp_result1, x0); + DEFINE_PINNED(gp_result2, x1); + { + DEFINE_SCOPED(thread_in_wasm_flag_addr); + __ Ldr(thread_in_wasm_flag_addr, + MemOperand(kRootRegister, + Isolate::thread_in_wasm_flag_address_offset())); + __ Str(xzr, MemOperand(thread_in_wasm_flag_addr, 0)); + } + + __ Ldr(wrapper_buffer, MemOperand(fp, -3 * kSystemPointerSize)); + + __ Str(wasm::kFpReturnRegisters[0], + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferFPReturnRegister1)); + __ Str(wasm::kFpReturnRegisters[1], + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferFPReturnRegister2)); + __ Str(wasm::kGpReturnRegisters[0], + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferGPReturnRegister1)); + __ Str(wasm::kGpReturnRegisters[1], + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferGPReturnRegister2)); + + // Call the return value builtin with + // x0: wasm instance. + // x1: the result JSArray for multi-return. + // x2: pointer to the byte buffer which contains all parameters. + __ Ldr(x1, MemOperand(fp, JSToWasmWrapperConstants::kResultArrayOffset)); + __ Ldr(x0, MemOperand(fp, JSToWasmWrapperConstants::kInstanceOffset)); + __ Call(BUILTIN_CODE(masm->isolate(), JSToWasmHandleReturns), + RelocInfo::CODE_TARGET); + + __ LeaveFrame(StackFrame::JS_TO_WASM); + __ DropArguments(2, MacroAssembler::kCountIncludesReceiver); + __ Ret(); +} + #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -5204,13 +5474,20 @@ MemOperand ExitFrameCallerStackSlotOperand(int index) { } // namespace -void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { +void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, + CallApiCallbackMode mode) { // ----------- S t a t e ------------- - // -- cp : context + // CallApiCallbackMode::kGeneric mode: + // -- x2 : arguments count (not including the receiver) + // -- x3 : call handler info + // -- x0 : holder + // CallApiCallbackMode::kNoSideEffects/kWithSideEffectsSideEffects modes: // -- x1 : api function address // -- x2 : arguments count (not including the receiver) // -- x3 : call data // -- x0 : holder + // Both modes: + // -- cp : context // -- sp[0] : receiver // -- sp[8] : first argument // -- ... @@ -5219,13 +5496,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { Register function_callback_info_arg = arg_reg_1; - Register api_function_address = x1; - Register argc = x2; - Register call_data = x3; - Register holder = x0; + Register api_function_address = no_reg; + Register argc = no_reg; + Register call_data = no_reg; + Register callback = no_reg; + Register holder = no_reg; Register scratch = x4; + Register scratch2 = x5; - DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch)); + switch (mode) { + case CallApiCallbackMode::kGeneric: + api_function_address = x1; + argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); + callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + holder = CallApiCallbackGenericDescriptor::HolderRegister(); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + api_function_address = + CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister(); + argc = CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister(); + call_data = CallApiCallbackOptimizedDescriptor::CallDataRegister(); + holder = CallApiCallbackOptimizedDescriptor::HolderRegister(); + break; + } + DCHECK(!AreAliased(api_function_address, argc, holder, call_data, callback, + scratch, scratch2)); using FCI = FunctionCallbackInfo; using FCA = FunctionCallbackArguments; @@ -5269,7 +5566,18 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { __ Str(scratch, MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize)); // kData. - __ Str(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + switch (mode) { + case CallApiCallbackMode::kGeneric: + __ LoadTaggedField( + scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + __ Str(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + __ Str(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + } // kNewTarget. __ Str(scratch, MemOperand(sp, FCA::kNewTargetIndex * kSystemPointerSize)); @@ -5287,9 +5595,38 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { static_assert(FCI::kImplicitArgsOffset == 0); static_assert(FCI::kValuesOffset == 1 * kSystemPointerSize); static_assert(FCI::kLengthOffset == 2 * kSystemPointerSize); + const int exit_frame_params_size = + mode == CallApiCallbackMode::kGeneric ? 2 : 0; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(scratch, kApiStackSpace, StackFrame::EXIT); + if (mode == CallApiCallbackMode::kGeneric) { + ASM_CODE_COMMENT_STRING(masm, "Push API_CALLBACK_EXIT frame arguments"); + __ Claim(exit_frame_params_size, kSystemPointerSize); + + // Argc parameter as a Smi. + static_assert(ApiCallbackExitFrameConstants::kArgcOffset == + 3 * kSystemPointerSize); + __ SmiTag(scratch, argc); + __ Str(scratch, MemOperand(sp, 1 * kSystemPointerSize)); + + // Target parameter. + static_assert(ApiCallbackExitFrameConstants::kTargetOffset == + 2 * kSystemPointerSize); + __ LoadTaggedField( + scratch, + FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); + __ Str(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + + __ LoadExternalPointerField( + api_function_address, + FieldMemOperand(callback, + CallHandlerInfo::kMaybeRedirectedCallbackOffset), + kCallHandlerInfoCallbackTag); + + __ EnterExitFrame(scratch, kApiStackSpace, StackFrame::API_CALLBACK_EXIT); + } else { + __ EnterExitFrame(scratch, kApiStackSpace, StackFrame::EXIT); + } { ASM_CODE_COMMENT_STRING(masm, "Initialize FunctionCallbackInfo"); @@ -5315,7 +5652,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // register containing the slot count. MemOperand stack_space_operand = ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropOnStackSize); - __ Add(scratch, argc, Operand(FCA::kArgsLengthWithReceiver)); + __ Add(scratch, argc, + Operand(FCA::kArgsLengthWithReceiver + exit_frame_params_size)); __ Str(scratch, stack_space_operand); __ RecordComment("v8::FunctionCallback's argument."); @@ -5324,7 +5662,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { DCHECK(!AreAliased(api_function_address, function_callback_info_arg)); - ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(mode); // Pass api function address to thunk wrapper in case profiler or side-effect // checking is enabled. Register thunk_arg = api_function_address; @@ -5332,8 +5671,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // The current frame needs to be aligned. DCHECK_EQ(FCA::kArgsLength % 2, 0); - MemOperand return_value_operand = - ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex); + MemOperand return_value_operand = ExitFrameCallerStackSlotOperand( + FCA::kReturnValueIndex + exit_frame_params_size); static constexpr int kUseStackSpaceOperand = 0; CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, thunk_arg, @@ -5780,6 +6119,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ LoadTaggedField( code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + + if (is_osr) { + ResetSharedFunctionInfoAge(masm, code_obj); + } + __ LoadTaggedField( code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); @@ -5876,7 +6220,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ Pop(kInterpreterAccumulatorRegister, padreg); if (is_osr) { - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); Generate_OSREntry(masm, code_obj); } else { __ Jump(code_obj); diff --git a/v8/src/builtins/array-every.tq b/v8/src/builtins/array-every.tq index 2514a18b7..afcbbca55 100644 --- a/v8/src/builtins/array-every.tq +++ b/v8/src/builtins/array-every.tq @@ -138,7 +138,7 @@ ArrayEvery( o, callbackfn, thisArg, Undefined, o, kValue, len, Undefined); } } label TypeError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/array-filter.tq b/v8/src/builtins/array-filter.tq index bd892a2e7..94f18d05a 100644 --- a/v8/src/builtins/array-filter.tq +++ b/v8/src/builtins/array-filter.tq @@ -193,7 +193,7 @@ ArrayFilter( return ArrayFilterLoopContinuation( o, callbackfn, thisArg, output, o, k, len, to); } label TypeError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/array-find.tq b/v8/src/builtins/array-find.tq index 9b53f9c70..921baebb2 100644 --- a/v8/src/builtins/array-find.tq +++ b/v8/src/builtins/array-find.tq @@ -145,7 +145,7 @@ ArrayPrototypeFind( return ArrayFindLoopContinuation(o, callbackfn, thisArg, o, k, len); } } label NotCallableError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/array-findindex.tq b/v8/src/builtins/array-findindex.tq index ed70a1225..0041121d2 100644 --- a/v8/src/builtins/array-findindex.tq +++ b/v8/src/builtins/array-findindex.tq @@ -145,7 +145,7 @@ ArrayPrototypeFindIndex( return ArrayFindIndexLoopContinuation(o, callbackfn, thisArg, o, k, len); } } label NotCallableError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/array-findlast.tq b/v8/src/builtins/array-findlast.tq index a359ec915..c47a6d32d 100644 --- a/v8/src/builtins/array-findlast.tq +++ b/v8/src/builtins/array-findlast.tq @@ -104,7 +104,7 @@ ArrayPrototypeFindLast( return ArrayFindLastLoopContinuation(predicate, thisArg, o, k); } } label NotCallableError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/array-findlastindex.tq b/v8/src/builtins/array-findlastindex.tq index 3b5498f96..f89783704 100644 --- a/v8/src/builtins/array-findlastindex.tq +++ b/v8/src/builtins/array-findlastindex.tq @@ -105,7 +105,7 @@ ArrayPrototypeFindLastIndex( return ArrayFindLastIndexLoopContinuation(predicate, thisArg, o, k); } } label NotCallableError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/array-foreach.tq b/v8/src/builtins/array-foreach.tq index 938210dcd..4fef65aac 100644 --- a/v8/src/builtins/array-foreach.tq +++ b/v8/src/builtins/array-foreach.tq @@ -122,7 +122,7 @@ ArrayForEach( return ArrayForEachLoopContinuation( o, callbackfn, thisArg, Undefined, o, k, len, Undefined); } label TypeError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/array-from.tq b/v8/src/builtins/array-from.tq index 103892d74..7d94228a1 100644 --- a/v8/src/builtins/array-from.tq +++ b/v8/src/builtins/array-from.tq @@ -35,7 +35,7 @@ ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments): } else { // a. If IsCallable(mapfn) is false, throw a TypeError exception. if (!Is(mapfn)) deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, mapfn); + ThrowCalledNonCallable(mapfn); } // b. Let mapping be true. mapping = true; @@ -78,7 +78,7 @@ ArrayFrom(js-implicit context: NativeContext, receiver: JSAny)(...arguments): // done to true and a target array which somehow never ran out of // memory, e.g. a proxy that discarded the values. Ignoring this case // just means we would repeatedly call CreateDataProperty with index = - // 2^53 + // 2^53. dcheck(k < kMaxSafeInteger); // ii. Let Pk be ! ToString(k). diff --git a/v8/src/builtins/array-join.tq b/v8/src/builtins/array-join.tq index 7fd9c07a1..7d0711230 100644 --- a/v8/src/builtins/array-join.tq +++ b/v8/src/builtins/array-join.tq @@ -88,7 +88,7 @@ transitioning builtin ConvertToLocaleString( return ToString_Inline(result); } label TypeError { - ThrowTypeError(MessageTemplate::kCalledNonCallable, prop); + ThrowCalledNonCallable(prop); } } diff --git a/v8/src/builtins/array-map.tq b/v8/src/builtins/array-map.tq index 1958d1eb5..3ec6e118f 100644 --- a/v8/src/builtins/array-map.tq +++ b/v8/src/builtins/array-map.tq @@ -14,7 +14,7 @@ ArrayMapPreLoopLazyDeoptContinuation( const numberLength = Cast(length) otherwise unreachable; const callbackfn = Cast(callback) - otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, callback); + otherwise ThrowCalledNonCallable(callback); return ArrayMapLoopContinuation( jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, kZero, numberLength); @@ -269,7 +269,7 @@ ArrayMap( return ArrayMapLoopContinuation(o, callbackfn, thisArg, array, o, k, len); } label TypeError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/array-reduce-right.tq b/v8/src/builtins/array-reduce-right.tq index 90e0e496f..a8e566ed1 100644 --- a/v8/src/builtins/array-reduce-right.tq +++ b/v8/src/builtins/array-reduce-right.tq @@ -191,7 +191,7 @@ ArrayReduceRight( o, callbackfn, accumulator, o, value, len); } } label NoCallableError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/array-reduce.tq b/v8/src/builtins/array-reduce.tq index 8ab85a0cb..a50b2000c 100644 --- a/v8/src/builtins/array-reduce.tq +++ b/v8/src/builtins/array-reduce.tq @@ -190,7 +190,7 @@ ArrayReduce( o, callbackfn, accumulator, o, value, len); } } label NoCallableError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/array-some.tq b/v8/src/builtins/array-some.tq index 69467bba2..5082f5fe2 100644 --- a/v8/src/builtins/array-some.tq +++ b/v8/src/builtins/array-some.tq @@ -138,7 +138,7 @@ ArraySome( o, callbackfn, thisArg, Undefined, o, kValue, len, Undefined); } } label TypeError deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/base.tq b/v8/src/builtins/base.tq index 2c8ba75f8..97e9f0457 100644 --- a/v8/src/builtins/base.tq +++ b/v8/src/builtins/base.tq @@ -58,6 +58,8 @@ type TaggedZeroPattern extends TaggedIndex; // A value with the size of Tagged which may contain arbitrary data. type Uninitialized extends Tagged; +type BuiltinsName extends int31 constexpr 'Builtin'; + extern macro MakeWeak(HeapObject): WeakHeapObject; extern macro GetHeapObjectAssumeWeak(MaybeObject): HeapObject labels IfCleared; extern macro GetHeapObjectIfStrong(MaybeObject): HeapObject labels IfNotStrong; @@ -441,6 +443,7 @@ extern enum MessageTemplate { kWasmTrapFuncSigMismatch, kWasmTrapDataSegmentOutOfBounds, kWasmTrapElementSegmentOutOfBounds, + kWasmTrapJSTypeError, kWasmTrapTableOutOfBounds, kWasmTrapRethrowNull, kWasmTrapNullDereference, @@ -458,6 +461,9 @@ extern enum MessageTemplate { kPropertyDescObject, kMustBePositive, kIteratorReduceNoInitial, + kSizeIsNaN, + kArgumentIsNonObject, + kKeysMethodInvalid, ... } @@ -538,9 +544,11 @@ extern macro DefaultStringConstant(): String; extern macro EmptyStringConstant(): EmptyString; extern macro ErrorsStringConstant(): String; extern macro FalseConstant(): False; +extern macro HasStringConstant(): String; extern macro Int32FalseConstant(): bool; extern macro Int32TrueConstant(): bool; extern macro IteratorSymbolConstant(): PublicSymbol; +extern macro KeysStringConstant(): String; extern macro LengthStringConstant(): String; extern macro MatchSymbolConstant(): Symbol; extern macro MessageStringConstant(): String; @@ -551,6 +559,7 @@ extern macro NullConstant(): Null; extern macro NumberStringConstant(): String; extern macro ReturnStringConstant(): String; extern macro SearchSymbolConstant(): Symbol; +extern macro SizeStringConstant(): String; extern macro StringStringConstant(): String; extern macro TheHoleConstant(): TheHole; extern macro ToPrimitiveSymbolConstant(): PublicSymbol; @@ -570,6 +579,9 @@ const kLengthString: String = LengthStringConstant(); const kMessageString: String = MessageStringConstant(); const kNextString: String = NextStringConstant(); const kReturnString: String = ReturnStringConstant(); +const kSizeString: String = SizeStringConstant(); +const kHasString: String = HasStringConstant(); +const kKeysString: String = KeysStringConstant(); const kNaN: NaN = NanConstant(); const kZero: Zero = %RawDownCast(SmiConstant(0)); @@ -697,6 +709,8 @@ extern transitioning macro ToThisValue(implicit context: Context)( JSAny, constexpr PrimitiveType, constexpr string): JSAny; extern transitioning macro GetProperty(implicit context: Context)( JSAny, JSAny): JSAny; +extern macro GetInterestingProperty(Context, JSReceiver, Symbol): JSAny + labels NotFound; extern transitioning builtin SetProperty(implicit context: Context)( JSAny, JSAny, JSAny): JSAny; extern transitioning builtin SetPropertyIgnoreAttributes( @@ -767,11 +781,20 @@ extern macro SpeciesConstructor(implicit context: Context)( extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool; extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32): NameDictionary; +extern macro CodeStubAssembler::AllocateNameDictionary(intptr): NameDictionary; extern macro CodeStubAssembler::AllocateOrderedNameDictionary(constexpr int32): OrderedNameDictionary; extern macro CodeStubAssembler::AllocateSwissNameDictionary(constexpr int32): SwissNameDictionary; +extern macro CodeStubAssembler::AddToDictionary( + NameDictionary, Name, Object): void labels Bailout; +extern macro CodeStubAssembler::AddToDictionary( + SwissNameDictionary, Name, Object): void labels Bailout; + +extern macro AllocateOrderedHashSet(): OrderedHashSet; +extern macro AllocateOrderedHashMap(): OrderedHashMap; + extern builtin ToObject(Context, JSAny): JSReceiver; extern macro ToObject_Inline(Context, JSAny): JSReceiver; extern macro IsUndefined(Object): bool; @@ -798,6 +821,8 @@ extern transitioning runtime NormalizeElements(Context, JSObject): void; extern transitioning runtime TransitionElementsKindWithKind( Context, JSObject, Smi): void; +extern macro ArrayListElements(ArrayList): FixedArray; + extern macro LoadObjectField(HeapObject, constexpr int32): Object; extern macro LoadBufferObject(RawPtr, constexpr int32): Object; @@ -1236,10 +1261,12 @@ extern macro SmiToTaggedIndex(Smi): TaggedIndex; extern macro RoundIntPtrToFloat64(intptr): float64; extern macro IntPtrRoundUpToPowerOfTwo32(intptr): intptr; extern macro ChangeFloat32ToFloat64(float32): float64; +extern macro RoundInt32ToFloat32(int32): float32; extern macro ChangeNumberToFloat64(Number): float64; extern macro ChangeNumberToUint32(Number): uint32; -extern macro ChangeTaggedNonSmiToInt32(implicit context: Context)(JSAnyNotSmi): +extern macro ChangeTaggedNonSmiToInt32(implicit context: Context)(HeapObject): int32; +extern macro ChangeFloat32ToTagged(float32): Number; extern macro ChangeTaggedToFloat64(implicit context: Context)(JSAny): float64; extern macro ChangeFloat64ToTagged(float64): Number; extern macro ChangeFloat64ToUintPtr(float64): uintptr; @@ -1335,6 +1362,8 @@ extern macro IsMockArrayBufferAllocatorFlag(): bool; extern macro HasBuiltinSubclassingFlag(): bool; extern macro IsPrototypeTypedArrayPrototype(implicit context: Context)(Map): bool; +extern macro IsSetIteratorProtectorCellInvalid(): bool; +extern macro IsMapIteratorProtectorCellInvalid(): bool; extern operator '.data_ptr' macro LoadJSTypedArrayDataPtr(JSTypedArray): RawPtr; @@ -1882,6 +1911,8 @@ GetDerivedMap(Context, JSFunction, JSReceiver, JSAny): Map; } extern macro IsDeprecatedMap(Map): bool; +extern macro LoadSlowObjectWithNullPrototypeMap(NativeContext): Map; + transitioning builtin FastCreateDataProperty(implicit context: Context)( receiver: JSReceiver, key: JSAny, value: JSAny): Object { try { @@ -2050,6 +2081,8 @@ const kNoHashSentinel: constexpr int32 generates 'PropertyArray::kNoHashSentinel'; extern macro LoadNameHash(Name): uint32; +extern transitioning builtin ToName(implicit context: Context)(JSAny): AnyName; + extern macro LoadSimd128(intptr): Simd128; extern macro I8x16BitMask(I8X16): int32; extern macro I8x16Eq(I8X16, I8X16): I8X16; diff --git a/v8/src/builtins/builtins-api.cc b/v8/src/builtins/builtins-api.cc index ac4083df5..94120f0b5 100644 --- a/v8/src/builtins/builtins-api.cc +++ b/v8/src/builtins/builtins-api.cc @@ -86,7 +86,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle HandleApiCallHelper( // Proxies never need access checks. DCHECK(js_receiver->IsJSObject()); Handle js_object = Handle::cast(js_receiver); - if (!isolate->MayAccess(handle(isolate->context(), isolate), js_object)) { + if (!isolate->MayAccess(isolate->native_context(), js_object)) { isolate->ReportFailedAccessCheck(js_object); RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object); return isolate->factory()->undefined_value(); @@ -132,23 +132,18 @@ V8_WARN_UNUSED_RESULT MaybeHandle HandleApiCallHelper( } // anonymous namespace -BUILTIN(HandleApiCall) { +BUILTIN(HandleApiConstruct) { HandleScope scope(isolate); Handle receiver = args.receiver(); Handle new_target = args.new_target(); + DCHECK(!new_target->IsUndefined(isolate)); Handle fun_data( args.target()->shared().get_api_func_data(), isolate); int argc = args.length() - 1; Address* argv = args.address_of_first_argument(); - if (new_target->IsUndefined()) { - RETURN_RESULT_OR_FAILURE( - isolate, HandleApiCallHelper(isolate, new_target, fun_data, - receiver, argv, argc)); - } else { - RETURN_RESULT_OR_FAILURE( - isolate, HandleApiCallHelper(isolate, new_target, fun_data, - receiver, argv, argc)); - } + RETURN_RESULT_OR_FAILURE( + isolate, HandleApiCallHelper(isolate, new_target, fun_data, + receiver, argv, argc)); } namespace { @@ -210,8 +205,10 @@ MaybeHandle Builtins::InvokeApiFunction( // Helper function to handle calls to non-function objects created through the // API. The object can be called as either a constructor (using new) or just as // a function (without new). -V8_WARN_UNUSED_RESULT static Object HandleApiCallAsFunctionOrConstructor( - Isolate* isolate, bool is_construct_call, BuiltinArguments args) { +V8_WARN_UNUSED_RESULT static Object +HandleApiCallAsFunctionOrConstructorDelegate(Isolate* isolate, + bool is_construct_call, + BuiltinArguments args) { Handle receiver = args.receiver(); // Get the object called. @@ -260,14 +257,14 @@ V8_WARN_UNUSED_RESULT static Object HandleApiCallAsFunctionOrConstructor( // Handle calls to non-function objects created through the API. This delegate // function is used when the call is a normal function call. -BUILTIN(HandleApiCallAsFunction) { - return HandleApiCallAsFunctionOrConstructor(isolate, false, args); +BUILTIN(HandleApiCallAsFunctionDelegate) { + return HandleApiCallAsFunctionOrConstructorDelegate(isolate, false, args); } // Handle calls to non-function objects created through the API. This delegate // function is used when the call is a construct call. -BUILTIN(HandleApiCallAsConstructor) { - return HandleApiCallAsFunctionOrConstructor(isolate, true, args); +BUILTIN(HandleApiCallAsConstructorDelegate) { + return HandleApiCallAsFunctionOrConstructorDelegate(isolate, true, args); } } // namespace internal diff --git a/v8/src/builtins/builtins-array-gen.cc b/v8/src/builtins/builtins-array-gen.cc index 20989b185..05eb990ff 100644 --- a/v8/src/builtins/builtins-array-gen.cc +++ b/v8/src/builtins/builtins-array-gen.cc @@ -38,7 +38,16 @@ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() { context(), method_name, original_array, len()); // In the Spec and our current implementation, the length check is already // performed in TypedArraySpeciesCreate. - CSA_DCHECK(this, UintPtrLessThanOrEqual(len(), LoadJSTypedArrayLength(a))); +#ifdef DEBUG + Label detached_or_out_of_bounds(this), done(this); + CSA_DCHECK(this, UintPtrLessThanOrEqual( + len(), LoadJSTypedArrayLengthAndCheckDetached( + a, &detached_or_out_of_bounds))); + Goto(&done); + BIND(&detached_or_out_of_bounds); + Unreachable(); + BIND(&done); +#endif // DEBUG fast_typed_array_target_ = Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a)); a_ = a; diff --git a/v8/src/builtins/builtins-array.cc b/v8/src/builtins/builtins-array.cc index b9288c740..08ef30875 100644 --- a/v8/src/builtins/builtins-array.cc +++ b/v8/src/builtins/builtins-array.cc @@ -771,10 +771,10 @@ class ArrayConcatVisitor { array, fast_elements() ? HOLEY_ELEMENTS : DICTIONARY_ELEMENTS); { DisallowGarbageCollection no_gc; - auto raw = *array; - raw.set_length(*length); - raw.set_elements(*storage_fixed_array()); - raw.set_map(*map, kReleaseStore); + Tagged raw = *array; + raw->set_length(*length); + raw->set_elements(*storage_fixed_array()); + raw->set_map(*map, kReleaseStore); } return array; } diff --git a/v8/src/builtins/builtins-call-gen.cc b/v8/src/builtins/builtins-call-gen.cc index 72531c88e..6d865b8b5 100644 --- a/v8/src/builtins/builtins-call-gen.cc +++ b/v8/src/builtins/builtins-call-gen.cc @@ -64,6 +64,18 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) { masm->isolate()->builtins()->CallFunction()); } +void Builtins::Generate_CallApiCallbackGeneric(MacroAssembler* masm) { + Generate_CallApiCallbackImpl(masm, CallApiCallbackMode::kGeneric); +} + +void Builtins::Generate_CallApiCallbackNoSideEffects(MacroAssembler* masm) { + Generate_CallApiCallbackImpl(masm, CallApiCallbackMode::kNoSideEffects); +} + +void Builtins::Generate_CallApiCallbackWithSideEffects(MacroAssembler* masm) { + Generate_CallApiCallbackImpl(masm, CallApiCallbackMode::kWithSideEffects); +} + // TODO(cbruni): Try reusing code between builtin versions to avoid binary // overhead. TF_BUILTIN(Call_ReceiverIsNullOrUndefined_Baseline_Compact, @@ -702,8 +714,11 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate( BIND(&receiver_needs_access_check); { - CallRuntime(Runtime::kAccessCheck, context, receiver); - Goto(&receiver_done); + TNode has_access = + IsTrue(CallRuntime(Runtime::kAccessCheck, context, receiver)); + GotoIf(has_access, &receiver_done); + // Access check failed, return undefined value. + args.PopAndReturn(UndefinedConstant()); } BIND(&receiver_done); @@ -731,15 +746,25 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate( [&]() { return GetCompatibleReceiver(receiver, signature, context); }); } + TNode call_code = CAST(LoadObjectField( + function_template_info, FunctionTemplateInfo::kCallCodeOffset)); + // If the function doesn't have an associated C++ code to execute, just + // return the receiver as would an empty function do (see + // HandleApiCallHelper). + { + Label if_continue(this); + GotoIfNot(IsUndefined(call_code), &if_continue); + args.PopAndReturn(receiver); + + Bind(&if_continue); + } + // Perform the actual API callback invocation via CallApiCallback. - TNode call_handler_info = LoadObjectField( - function_template_info, FunctionTemplateInfo::kCallCodeOffset); - TNode callback = LoadCallHandlerInfoJsCallbackPtr(call_handler_info); - TNode call_data = - LoadObjectField(call_handler_info, CallHandlerInfo::kDataOffset); - TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback, - TruncateIntPtrToInt32(args.GetLengthWithoutReceiver()), - call_data, holder); + TNode call_handler_info = CAST(call_code); + TailCallStub( + Builtins::CallableFor(isolate(), Builtin::kCallApiCallbackGeneric), + context, TruncateIntPtrToInt32(args.GetLengthWithoutReceiver()), + call_handler_info, holder); } TF_BUILTIN(CallFunctionTemplate_CheckAccess, CallOrConstructBuiltinsAssembler) { @@ -772,5 +797,36 @@ TF_BUILTIN(CallFunctionTemplate_CheckAccessAndCompatibleReceiver, function_template_info, argc, context); } +TF_BUILTIN(HandleApiCallOrConstruct, CallOrConstructBuiltinsAssembler) { + auto target = Parameter(Descriptor::kTarget); + auto new_target = Parameter(Descriptor::kNewTarget); + auto context = Parameter(Descriptor::kContext); + auto argc = UncheckedParameter(Descriptor::kActualArgumentsCount); + + Label if_call(this), if_construct(this); + Branch(IsUndefined(new_target), &if_call, &if_construct); + + BIND(&if_call); + { + TNode shared = + LoadJSFunctionSharedFunctionInfo(CAST(target)); + TNode function_template_info = + CAST(LoadSharedFunctionInfoFunctionData(shared)); + + // Tail call to the stub while leaving all the incoming JS arguments on + // the stack. + TailCallBuiltin( + Builtin::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver, + context, function_template_info, ChangeUint32ToWord(argc)); + } + BIND(&if_construct); + { + // Tail call to the stub while leaving all the incoming JS arguments on + // the stack. + TailCallBuiltin(Builtin::kHandleApiConstruct, context, target, new_target, + argc); + } +} + } // namespace internal } // namespace v8 diff --git a/v8/src/builtins/builtins-collections-gen.cc b/v8/src/builtins/builtins-collections-gen.cc index 05b7a7bf8..d22faf86a 100644 --- a/v8/src/builtins/builtins-collections-gen.cc +++ b/v8/src/builtins/builtins-collections-gen.cc @@ -508,174 +508,6 @@ TNode BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement( return entry.value(); } -class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { - public: - explicit CollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state) - : BaseCollectionsAssembler(state) {} - - // Check whether |iterable| is a JS_MAP_KEY_ITERATOR_TYPE or - // JS_MAP_VALUE_ITERATOR_TYPE object that is not partially consumed and still - // has original iteration behavior. - void BranchIfIterableWithOriginalKeyOrValueMapIterator(TNode iterable, - TNode context, - Label* if_true, - Label* if_false); - - // Check whether |iterable| is a JS_SET_TYPE or JS_SET_VALUE_ITERATOR_TYPE - // object that still has original iteration behavior. In case of the iterator, - // the iterator also must not have been partially consumed. - void BranchIfIterableWithOriginalValueSetIterator(TNode iterable, - TNode context, - Label* if_true, - Label* if_false); - - protected: - template - TNode AllocateJSCollectionIterator( - const TNode context, int map_index, - const TNode collection); - TNode AllocateTable(Variant variant, - TNode at_least_space_for) override; - TNode GetHash(const TNode key); - TNode CallGetHashRaw(const TNode key); - TNode CallGetOrCreateHashRaw(const TNode key); - - // Transitions the iterator to the non obsolete backing store. - // This is a NOP if the [table] is not obsolete. - template - using UpdateInTransition = std::function table, - const TNode index)>; - template - std::pair, TNode> Transition( - const TNode table, const TNode index, - UpdateInTransition const& update_in_transition); - template - std::pair, TNode> TransitionAndUpdate( - const TNode iterator); - template - std::tuple, TNode, TNode> NextSkipHoles( - TNode table, TNode index, Label* if_end); - - // Specialization for Smi. - // The {result} variable will contain the entry index if the key was found, - // or the hash code otherwise. - template - void FindOrderedHashTableEntryForSmiKey(TNode table, - TNode key_tagged, - TVariable* result, - Label* entry_found, Label* not_found); - void SameValueZeroSmi(TNode key_smi, TNode candidate_key, - Label* if_same, Label* if_not_same); - - // Specialization for heap numbers. - // The {result} variable will contain the entry index if the key was found, - // or the hash code otherwise. - void SameValueZeroHeapNumber(TNode key_float, - TNode candidate_key, Label* if_same, - Label* if_not_same); - template - void FindOrderedHashTableEntryForHeapNumberKey( - TNode table, TNode key_heap_number, - TVariable* result, Label* entry_found, Label* not_found); - - // Specialization for bigints. - // The {result} variable will contain the entry index if the key was found, - // or the hash code otherwise. - void SameValueZeroBigInt(TNode key, TNode candidate_key, - Label* if_same, Label* if_not_same); - template - void FindOrderedHashTableEntryForBigIntKey(TNode table, - TNode key_big_int, - TVariable* result, - Label* entry_found, - Label* not_found); - - // Specialization for string. - // The {result} variable will contain the entry index if the key was found, - // or the hash code otherwise. - template - void FindOrderedHashTableEntryForStringKey(TNode table, - TNode key_tagged, - TVariable* result, - Label* entry_found, - Label* not_found); - TNode ComputeStringHash(TNode string_key); - void SameValueZeroString(TNode key_string, - TNode candidate_key, Label* if_same, - Label* if_not_same); - - // Specialization for non-strings, non-numbers. For those we only need - // reference equality to compare the keys. - // The {result} variable will contain the entry index if the key was found, - // or the hash code otherwise. If the hash-code has not been computed, it - // should be Smi -1. - template - void FindOrderedHashTableEntryForOtherKey(TNode table, - TNode key_heap_object, - TVariable* result, - Label* entry_found, - Label* not_found); - - template - void TryLookupOrderedHashTableIndex(const TNode table, - const TNode key, - TVariable* result, - Label* if_entry_found, - Label* if_not_found); - - const TNode NormalizeNumberKey(const TNode key); - void StoreOrderedHashMapNewEntry(const TNode table, - const TNode key, - const TNode value, - const TNode hash, - const TNode number_of_buckets, - const TNode occupancy); - - void StoreOrderedHashSetNewEntry(const TNode table, - const TNode key, - const TNode hash, - const TNode number_of_buckets, - const TNode occupancy); - - // Create a JSArray with PACKED_ELEMENTS kind from a Map.prototype.keys() or - // Map.prototype.values() iterator. The iterator is assumed to satisfy - // IterableWithOriginalKeyOrValueMapIterator. This function will skip the - // iterator and iterate directly on the underlying hash table. In the end it - // will update the state of the iterator to 'exhausted'. - TNode MapIteratorToList(TNode context, - TNode iterator); - - // Create a JSArray with PACKED_ELEMENTS kind from a Set.prototype.keys() or - // Set.prototype.values() iterator, or a Set. The |iterable| is assumed to - // satisfy IterableWithOriginalValueSetIterator. This function will skip the - // iterator and iterate directly on the underlying hash table. In the end, if - // |iterable| is an iterator, it will update the state of the iterator to - // 'exhausted'. - TNode SetOrSetIteratorToList(TNode context, - TNode iterable); - - void BranchIfMapIteratorProtectorValid(Label* if_true, Label* if_false); - void BranchIfSetIteratorProtectorValid(Label* if_true, Label* if_false); - - // Builds code that finds OrderedHashTable entry for a key with hash code - // {hash} with using the comparison code generated by {key_compare}. The code - // jumps to {entry_found} if the key is found, or to {not_found} if the key - // was not found. In the {entry_found} branch, the variable - // entry_start_position will be bound to the index of the entry (relative to - // OrderedHashTable::kHashTableStartIndex). - // - // The {CollectionType} template parameter stands for the particular instance - // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet. - template - void FindOrderedHashTableEntry( - const TNode table, const TNode hash, - const std::function, Label*, Label*)>& key_compare, - TVariable* entry_start_position, Label* entry_found, - Label* not_found); - - TNode ComputeUnseededHash(TNode key); -}; - template void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry( const TNode table, const TNode hash, @@ -724,9 +556,8 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry( number_of_buckets); // Load the key from the entry. - const TNode candidate_key = UnsafeLoadFixedArrayElement( - table, entry_start, - CollectionType::HashTableStartIndex() * kTaggedSize); + const TNode candidate_key = + UnsafeLoadKeyFromOrderedHashTableEntry(table, entry_start); key_compare(candidate_key, &if_key_found, &continue_next_entry); @@ -1061,10 +892,7 @@ TNode CollectionsBuiltinsAssembler::MapIteratorToList( CSA_DCHECK(this, InstanceTypeEqual(LoadInstanceType(iterator), JS_MAP_VALUE_ITERATOR_TYPE)); TNode entry_value = - UnsafeLoadFixedArrayElement(table, entry_start_position, - (OrderedHashMap::HashTableStartIndex() + - OrderedHashMap::kValueOffset) * - kTaggedSize); + UnsafeLoadValueFromOrderedHashMapEntry(table, entry_start_position); Store(elements, var_offset.value(), entry_value); Goto(&continue_loop); @@ -1466,6 +1294,28 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode table, TNode used_capacity = Int32Add(number_of_elements, number_of_deleted_elements); + return NextSkipHoles(table, number_of_buckets, used_capacity, index, if_end); +} + +template +std::tuple, TNode, TNode> +CollectionsBuiltinsAssembler::NextSkipHoles(TNode table, + TNode number_of_buckets, + TNode used_capacity, + TNode index, + Label* if_end) { + CSA_DCHECK(this, Word32Equal(number_of_buckets, + LoadAndUntagToWord32ObjectField( + table, TableType::NumberOfBucketsOffset()))); + CSA_DCHECK( + this, + Word32Equal( + used_capacity, + Int32Add(LoadAndUntagToWord32ObjectField( + table, TableType::NumberOfElementsOffset()), + LoadAndUntagToWord32ObjectField( + table, TableType::NumberOfDeletedElementsOffset())))); + TNode entry_key; TNode entry_start_position; TVARIABLE(Int32T, var_index, TruncateIntPtrToInt32(index)); @@ -1477,9 +1327,8 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode table, entry_start_position = Int32Add( Int32Mul(var_index.value(), Int32Constant(TableType::kEntrySize)), number_of_buckets); - entry_key = UnsafeLoadFixedArrayElement( - table, ChangePositiveInt32ToIntPtr(entry_start_position), - TableType::HashTableStartIndex() * kTaggedSize); + entry_key = UnsafeLoadKeyFromOrderedHashTableEntry( + table, ChangePositiveInt32ToIntPtr(entry_start_position)); var_index = Int32Add(var_index.value(), Int32Constant(1)); Branch(IsTheHole(entry_key), &loop, &done_loop); } @@ -1490,6 +1339,91 @@ CollectionsBuiltinsAssembler::NextSkipHoles(TNode table, ChangePositiveInt32ToIntPtr(var_index.value())}; } +template +TorqueStructKeyIndexPair +CollectionsBuiltinsAssembler::NextKeyIndexPairUnmodifiedTable( + const TNode table, const TNode number_of_buckets, + const TNode used_capacity, const TNode index, + Label* if_end) { + TNode key; + TNode entry_start_position; + TNode next_index; + + std::tie(key, entry_start_position, next_index) = + NextSkipHoles(table, number_of_buckets, used_capacity, index, if_end); + + return TorqueStructKeyIndexPair{key, next_index}; +} + +template TorqueStructKeyIndexPair +CollectionsBuiltinsAssembler::NextKeyIndexPairUnmodifiedTable( + const TNode table, const TNode number_of_buckets, + const TNode used_capacity, const TNode index, + Label* if_end); +template TorqueStructKeyIndexPair +CollectionsBuiltinsAssembler::NextKeyIndexPairUnmodifiedTable( + const TNode table, const TNode number_of_buckets, + const TNode used_capacity, const TNode index, + Label* if_end); + +template +TorqueStructKeyIndexPair CollectionsBuiltinsAssembler::NextKeyIndexPair( + const TNode table, const TNode index, + Label* if_end) { + TNode key; + TNode entry_start_position; + TNode next_index; + + std::tie(key, entry_start_position, next_index) = + NextSkipHoles(table, index, if_end); + + return TorqueStructKeyIndexPair{key, next_index}; +} + +template TorqueStructKeyIndexPair +CollectionsBuiltinsAssembler::NextKeyIndexPair( + const TNode table, const TNode index, + Label* if_end); +template TorqueStructKeyIndexPair +CollectionsBuiltinsAssembler::NextKeyIndexPair( + const TNode table, const TNode index, + Label* if_end); + +TorqueStructKeyValueIndexTuple +CollectionsBuiltinsAssembler::NextKeyValueIndexTupleUnmodifiedTable( + const TNode table, const TNode number_of_buckets, + const TNode used_capacity, const TNode index, + Label* if_end) { + TNode key; + TNode entry_start_position; + TNode next_index; + + std::tie(key, entry_start_position, next_index) = + NextSkipHoles(table, number_of_buckets, used_capacity, index, if_end); + + TNode value = + UnsafeLoadValueFromOrderedHashMapEntry(table, entry_start_position); + + return TorqueStructKeyValueIndexTuple{key, value, next_index}; +} + +TorqueStructKeyValueIndexTuple +CollectionsBuiltinsAssembler::NextKeyValueIndexTuple( + const TNode table, const TNode index, + Label* if_end) { + TNode key; + TNode entry_start_position; + TNode next_index; + + std::tie(key, entry_start_position, next_index) = + NextSkipHoles(table, index, if_end); + + TNode value = + UnsafeLoadValueFromOrderedHashMapEntry(table, entry_start_position); + + return TorqueStructKeyValueIndexTuple{key, value, next_index}; +} + TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) { const auto receiver = Parameter(Descriptor::kReceiver); const auto key = Parameter(Descriptor::kKey); @@ -1507,10 +1441,7 @@ TF_BUILTIN(MapPrototypeGet, CollectionsBuiltinsAssembler) { &if_not_found); BIND(&if_found); - Return(LoadFixedArrayElement( - CAST(table), SmiUntag(index), - (OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) * - kTaggedSize)); + Return(LoadValueFromOrderedHashMapEntry(CAST(table), SmiUntag(index))); BIND(&if_not_found); Return(UndefinedConstant()); @@ -1523,14 +1454,11 @@ TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) { ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.has"); - const TNode table = - LoadObjectField(CAST(receiver), JSMap::kTableOffset); - TNode index = - CAST(CallBuiltin(Builtin::kFindOrderedHashMapEntry, context, table, key)); + const TNode table = + CAST(LoadObjectField(CAST(receiver), JSMap::kTableOffset)); Label if_found(this), if_not_found(this); - Branch(SmiGreaterThanOrEqual(index, SmiConstant(0)), &if_found, - &if_not_found); + Branch(TableHasKey(context, table, key), &if_found, &if_not_found); BIND(&if_found); Return(TrueConstant()); @@ -1539,6 +1467,15 @@ TF_BUILTIN(MapPrototypeHas, CollectionsBuiltinsAssembler) { Return(FalseConstant()); } +TNode CollectionsBuiltinsAssembler::TableHasKey( + const TNode context, TNode table, + TNode key) { + TNode index = + CAST(CallBuiltin(Builtin::kFindOrderedHashMapEntry, context, table, key)); + + return SmiGreaterThanOrEqual(index, SmiConstant(0)); +} + const TNode CollectionsBuiltinsAssembler::NormalizeNumberKey( const TNode key) { TVARIABLE(Object, result, key); @@ -1557,32 +1494,25 @@ const TNode CollectionsBuiltinsAssembler::NormalizeNumberKey( return result.value(); } -TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { - const auto receiver = Parameter(Descriptor::kReceiver); - auto key = Parameter(Descriptor::kKey); - const auto value = Parameter(Descriptor::kValue); - const auto context = Parameter(Descriptor::kContext); - - ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.set"); - - key = NormalizeNumberKey(key); - - const TNode table = - LoadObjectField(CAST(receiver), JSMap::kTableOffset); - +template +TNode CollectionsBuiltinsAssembler::AddToOrderedHashTable( + const TNode table, const TNode key, + const GrowCollection& grow, + const StoreAtEntry& store_at_new_entry, + const StoreAtEntry& store_at_existing_entry) { + TVARIABLE(CollectionType, table_var, table); TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); - Label entry_found(this), not_found(this); + Label entry_found(this), not_found(this), done(this); - TryLookupOrderedHashTableIndex( + TryLookupOrderedHashTableIndex( table, key, &entry_start_position_or_hash, &entry_found, ¬_found); BIND(&entry_found); - // If we found the entry, we just store the value there. - StoreFixedArrayElement(table, entry_start_position_or_hash.value(), value, - UPDATE_WRITE_BARRIER, - kTaggedSize * (OrderedHashMap::HashTableStartIndex() + - OrderedHashMap::kValueOffset)); - Return(receiver); + { + // If we found the entry, we just store the value there. + store_at_existing_entry(table, entry_start_position_or_hash.value()); + Goto(&done); + } Label no_hash(this), add_entry(this), store_new_entry(this); BIND(¬_found); @@ -1600,85 +1530,138 @@ TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { BIND(&add_entry); TVARIABLE(IntPtrT, number_of_buckets); TVARIABLE(IntPtrT, occupancy); - TVARIABLE(OrderedHashMap, table_var, table); { // Check we have enough space for the entry. number_of_buckets = PositiveSmiUntag(CAST(UnsafeLoadFixedArrayElement( - table, OrderedHashMap::NumberOfBucketsIndex()))); + table, CollectionType::NumberOfBucketsIndex()))); - static_assert(OrderedHashMap::kLoadFactor == 2); + static_assert(CollectionType::kLoadFactor == 2); const TNode capacity = WordShl(number_of_buckets.value(), 1); const TNode number_of_elements = LoadAndUntagPositiveSmiObjectField( - table, OrderedHashMap::NumberOfElementsOffset()); + table, CollectionType::NumberOfElementsOffset()); const TNode number_of_deleted = PositiveSmiUntag(CAST(LoadObjectField( - table, OrderedHashMap::NumberOfDeletedElementsOffset()))); + table, CollectionType::NumberOfDeletedElementsOffset()))); occupancy = IntPtrAdd(number_of_elements, number_of_deleted); GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry); // We do not have enough space, grow the table and reload the relevant // fields. - CallRuntime(Runtime::kMapGrow, context, receiver); - table_var = - LoadObjectField(CAST(receiver), JSMap::kTableOffset); + table_var = grow(); number_of_buckets = PositiveSmiUntag(CAST(UnsafeLoadFixedArrayElement( - table_var.value(), OrderedHashMap::NumberOfBucketsIndex()))); + table_var.value(), CollectionType::NumberOfBucketsIndex()))); const TNode new_number_of_elements = LoadAndUntagPositiveSmiObjectField( - table_var.value(), OrderedHashMap::NumberOfElementsOffset()); + table_var.value(), CollectionType::NumberOfElementsOffset()); const TNode new_number_of_deleted = PositiveSmiUntag( CAST(LoadObjectField(table_var.value(), - OrderedHashMap::NumberOfDeletedElementsOffset()))); + CollectionType::NumberOfDeletedElementsOffset()))); occupancy = IntPtrAdd(new_number_of_elements, new_number_of_deleted); Goto(&store_new_entry); } + BIND(&store_new_entry); - // Store the key, value and connect the element to the bucket chain. - StoreOrderedHashMapNewEntry(table_var.value(), key, value, - entry_start_position_or_hash.value(), - number_of_buckets.value(), occupancy.value()); + { + StoreOrderedHashTableNewEntry( + table_var.value(), entry_start_position_or_hash.value(), + number_of_buckets.value(), occupancy.value(), store_at_new_entry); + Goto(&done); + } + + BIND(&done); + return table_var.value(); +} + +TF_BUILTIN(MapPrototypeSet, CollectionsBuiltinsAssembler) { + const auto receiver = Parameter(Descriptor::kReceiver); + auto key = Parameter(Descriptor::kKey); + const auto value = Parameter(Descriptor::kValue); + const auto context = Parameter(Descriptor::kContext); + + ThrowIfNotInstanceType(context, receiver, JS_MAP_TYPE, "Map.prototype.set"); + + key = NormalizeNumberKey(key); + + GrowCollection grow = [this, context, receiver]() { + CallRuntime(Runtime::kMapGrow, context, receiver); + return LoadObjectField(CAST(receiver), JSMap::kTableOffset); + }; + + StoreAtEntry store_at_new_entry = + [this, key, value](const TNode table, + const TNode entry_start) { + UnsafeStoreKeyValueInOrderedHashMapEntry(table, key, value, + entry_start); + }; + + StoreAtEntry store_at_existing_entry = + [this, value](const TNode table, + const TNode entry_start) { + UnsafeStoreValueInOrderedHashMapEntry(table, value, entry_start); + }; + + const TNode table = + LoadObjectField(CAST(receiver), JSMap::kTableOffset); + AddToOrderedHashTable(table, key, grow, store_at_new_entry, + store_at_existing_entry); Return(receiver); } -void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry( - const TNode table, const TNode key, - const TNode value, const TNode hash, - const TNode number_of_buckets, const TNode occupancy) { +template +void CollectionsBuiltinsAssembler::StoreOrderedHashTableNewEntry( + const TNode table, const TNode hash, + const TNode number_of_buckets, const TNode occupancy, + const StoreAtEntry& store_at_new_entry) { const TNode bucket = WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); TNode bucket_entry = CAST(UnsafeLoadFixedArrayElement( - table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize)); + table, bucket, CollectionType::HashTableStartIndex() * kTaggedSize)); // Store the entry elements. const TNode entry_start = IntPtrAdd( - IntPtrMul(occupancy, IntPtrConstant(OrderedHashMap::kEntrySize)), + IntPtrMul(occupancy, IntPtrConstant(CollectionType::kEntrySize)), number_of_buckets); - UnsafeStoreFixedArrayElement( - table, entry_start, key, UPDATE_WRITE_BARRIER, - kTaggedSize * OrderedHashMap::HashTableStartIndex()); - UnsafeStoreFixedArrayElement( - table, entry_start, value, UPDATE_WRITE_BARRIER, - kTaggedSize * (OrderedHashMap::HashTableStartIndex() + - OrderedHashMap::kValueOffset)); + store_at_new_entry(table, entry_start); + + // Connect the element to the bucket chain. UnsafeStoreFixedArrayElement( table, entry_start, bucket_entry, - kTaggedSize * (OrderedHashMap::HashTableStartIndex() + - OrderedHashMap::kChainOffset)); + kTaggedSize * (CollectionType::HashTableStartIndex() + + CollectionType::kChainOffset)); // Update the bucket head. UnsafeStoreFixedArrayElement( table, bucket, SmiTag(occupancy), - OrderedHashMap::HashTableStartIndex() * kTaggedSize); + CollectionType::HashTableStartIndex() * kTaggedSize); // Bump the elements count. const TNode number_of_elements = - CAST(LoadObjectField(table, OrderedHashMap::NumberOfElementsOffset())); + CAST(LoadObjectField(table, CollectionType::NumberOfElementsOffset())); StoreObjectFieldNoWriteBarrier(table, - OrderedHashMap::NumberOfElementsOffset(), + CollectionType::NumberOfElementsOffset(), SmiAdd(number_of_elements, SmiConstant(1))); } +void CollectionsBuiltinsAssembler::StoreValueInOrderedHashMapEntry( + const TNode table, const TNode value, + const TNode entry_start, CheckBounds check_bounds) { + StoreFixedArrayElement(table, entry_start, value, UPDATE_WRITE_BARRIER, + kTaggedSize * (OrderedHashMap::HashTableStartIndex() + + OrderedHashMap::kValueOffset), + check_bounds); +} + +void CollectionsBuiltinsAssembler::StoreKeyValueInOrderedHashMapEntry( + const TNode table, const TNode key, + const TNode value, const TNode entry_start, + CheckBounds check_bounds) { + StoreFixedArrayElement(table, entry_start, key, UPDATE_WRITE_BARRIER, + kTaggedSize * OrderedHashMap::HashTableStartIndex(), + check_bounds); + StoreValueInOrderedHashMapEntry(table, value, entry_start, check_bounds); +} + TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) { const auto receiver = Parameter(Descriptor::kReceiver); const auto key = Parameter(Descriptor::kKey); @@ -1704,13 +1687,9 @@ TF_BUILTIN(MapPrototypeDelete, CollectionsBuiltinsAssembler) { BIND(&entry_found); // If we found the entry, mark the entry as deleted. - StoreFixedArrayElement(table, entry_start_position_or_hash.value(), - TheHoleConstant(), UPDATE_WRITE_BARRIER, - kTaggedSize * OrderedHashMap::HashTableStartIndex()); - StoreFixedArrayElement(table, entry_start_position_or_hash.value(), - TheHoleConstant(), UPDATE_WRITE_BARRIER, - kTaggedSize * (OrderedHashMap::HashTableStartIndex() + - OrderedHashMap::kValueOffset)); + StoreKeyValueInOrderedHashMapEntry(table, TheHoleConstant(), + TheHoleConstant(), + entry_start_position_or_hash.value()); // Decrement the number of elements, increment the number of deleted elements. const TNode number_of_elements = SmiSub( @@ -1750,107 +1729,82 @@ TF_BUILTIN(SetPrototypeAdd, CollectionsBuiltinsAssembler) { key = NormalizeNumberKey(key); - const TNode table = - LoadObjectField(CAST(receiver), JSMap::kTableOffset); + GrowCollection grow = [this, context, receiver]() { + CallRuntime(Runtime::kSetGrow, context, receiver); + return LoadObjectField(CAST(receiver), JSSet::kTableOffset); + }; - TVARIABLE(IntPtrT, entry_start_position_or_hash, IntPtrConstant(0)); - Label entry_found(this), not_found(this); + StoreAtEntry store_at_new_entry = + [this, key](const TNode table, + const TNode entry_start) { + UnsafeStoreKeyInOrderedHashSetEntry(table, key, entry_start); + }; - TryLookupOrderedHashTableIndex( - table, key, &entry_start_position_or_hash, &entry_found, ¬_found); + StoreAtEntry store_at_existing_entry = + [](const TNode, const TNode) { + // If the entry was found, there is nothing to do. + }; - BIND(&entry_found); - // The entry was found, there is nothing to do. + const TNode table = + LoadObjectField(CAST(receiver), JSSet::kTableOffset); + AddToOrderedHashTable(table, key, grow, store_at_new_entry, + store_at_existing_entry); Return(receiver); +} - Label no_hash(this), add_entry(this), store_new_entry(this); - BIND(¬_found); - { - // If we have a hash code, we can start adding the new entry. - GotoIf(IntPtrGreaterThan(entry_start_position_or_hash.value(), - IntPtrConstant(0)), - &add_entry); +TNode CollectionsBuiltinsAssembler::AddToSetTable( + const TNode context, TNode table, TNode key, + TNode method_name) { + key = NormalizeNumberKey(key); - // Otherwise, go to runtime to compute the hash code. - entry_start_position_or_hash = SmiUntag(CallGetOrCreateHashRaw(CAST(key))); - Goto(&add_entry); - } + GrowCollection grow = [this, context, table, method_name]() { + TNode new_table = Cast(CallRuntime( + Runtime::kOrderedHashSetEnsureGrowable, context, table, method_name)); + // TODO(v8:13556): check if the table is updated and remove pointer to the + // new table. + return new_table; + }; - BIND(&add_entry); - TVARIABLE(IntPtrT, number_of_buckets); - TVARIABLE(IntPtrT, occupancy); - TVARIABLE(OrderedHashSet, table_var, table); - { - // Check we have enough space for the entry. - number_of_buckets = PositiveSmiUntag(CAST(UnsafeLoadFixedArrayElement( - table, OrderedHashSet::NumberOfBucketsIndex()))); + StoreAtEntry store_at_new_entry = + [this, key](const TNode table, + const TNode entry_start) { + UnsafeStoreKeyInOrderedHashSetEntry(table, key, entry_start); + }; - static_assert(OrderedHashSet::kLoadFactor == 2); - const TNode capacity = WordShl(number_of_buckets.value(), 1); - const TNode number_of_elements = - LoadAndUntagPositiveSmiObjectField( - table, OrderedHashSet::NumberOfElementsOffset()); - const TNode number_of_deleted = LoadAndUntagPositiveSmiObjectField( - table, OrderedHashSet::NumberOfDeletedElementsOffset()); - occupancy = IntPtrAdd(number_of_elements, number_of_deleted); - GotoIf(IntPtrLessThan(occupancy.value(), capacity), &store_new_entry); + StoreAtEntry store_at_existing_entry = + [](const TNode, const TNode) { + // If the entry was found, there is nothing to do. + }; - // We do not have enough space, grow the table and reload the relevant - // fields. - CallRuntime(Runtime::kSetGrow, context, receiver); - table_var = - LoadObjectField(CAST(receiver), JSMap::kTableOffset); - number_of_buckets = PositiveSmiUntag(CAST(UnsafeLoadFixedArrayElement( - table_var.value(), OrderedHashSet::NumberOfBucketsIndex()))); - const TNode new_number_of_elements = - LoadAndUntagPositiveSmiObjectField( - table_var.value(), OrderedHashSet::NumberOfElementsOffset()); - const TNode new_number_of_deleted = - LoadAndUntagPositiveSmiObjectField( - table_var.value(), OrderedHashSet::NumberOfDeletedElementsOffset()); - occupancy = IntPtrAdd(new_number_of_elements, new_number_of_deleted); - Goto(&store_new_entry); - } - BIND(&store_new_entry); - // Store the key, value and connect the element to the bucket chain. - StoreOrderedHashSetNewEntry(table_var.value(), key, - entry_start_position_or_hash.value(), - number_of_buckets.value(), occupancy.value()); - Return(receiver); + return AddToOrderedHashTable(table, key, grow, store_at_new_entry, + store_at_existing_entry); } -void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry( +void CollectionsBuiltinsAssembler::StoreKeyInOrderedHashSetEntry( const TNode table, const TNode key, - const TNode hash, const TNode number_of_buckets, - const TNode occupancy) { - const TNode bucket = - WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); - TNode bucket_entry = CAST(UnsafeLoadFixedArrayElement( - table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize)); - - // Store the entry elements. - const TNode entry_start = IntPtrAdd( - IntPtrMul(occupancy, IntPtrConstant(OrderedHashSet::kEntrySize)), - number_of_buckets); - UnsafeStoreFixedArrayElement( - table, entry_start, key, UPDATE_WRITE_BARRIER, - kTaggedSize * OrderedHashSet::HashTableStartIndex()); - UnsafeStoreFixedArrayElement( - table, entry_start, bucket_entry, - kTaggedSize * (OrderedHashSet::HashTableStartIndex() + - OrderedHashSet::kChainOffset)); - - // Update the bucket head. - UnsafeStoreFixedArrayElement( - table, bucket, SmiTag(occupancy), - OrderedHashSet::HashTableStartIndex() * kTaggedSize); + const TNode entry_start, CheckBounds check_bounds) { + StoreFixedArrayElement(table, entry_start, key, UPDATE_WRITE_BARRIER, + kTaggedSize * OrderedHashSet::HashTableStartIndex(), + check_bounds); +} - // Bump the elements count. - const TNode number_of_elements = - CAST(LoadObjectField(table, OrderedHashSet::NumberOfElementsOffset())); - StoreObjectFieldNoWriteBarrier(table, - OrderedHashSet::NumberOfElementsOffset(), - SmiAdd(number_of_elements, SmiConstant(1))); +template +TNode CollectionsBuiltinsAssembler::LoadKeyFromOrderedHashTableEntry( + const TNode table, const TNode entry, + CheckBounds check_bounds) { + return LoadFixedArrayElement( + table, entry, kTaggedSize * CollectionType::HashTableStartIndex(), + check_bounds); +} + +TNode CollectionsBuiltinsAssembler::LoadValueFromOrderedHashMapEntry( + const TNode table, const TNode entry, + CheckBounds check_bounds) { + return LoadFixedArrayElement( + table, entry, + kTaggedSize * (OrderedHashMap::HashTableStartIndex() + + OrderedHashMap::kValueOffset), + check_bounds); } TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) { @@ -1878,9 +1832,8 @@ TF_BUILTIN(SetPrototypeDelete, CollectionsBuiltinsAssembler) { BIND(&entry_found); // If we found the entry, mark the entry as deleted. - StoreFixedArrayElement(table, entry_start_position_or_hash.value(), - TheHoleConstant(), UPDATE_WRITE_BARRIER, - kTaggedSize * OrderedHashSet::HashTableStartIndex()); + StoreKeyInOrderedHashSetEntry(table, TheHoleConstant(), + entry_start_position_or_hash.value()); // Decrement the number of elements, increment the number of deleted elements. const TNode number_of_elements = SmiSub( @@ -1967,10 +1920,8 @@ TF_BUILTIN(MapPrototypeForEach, CollectionsBuiltinsAssembler) { NextSkipHoles(table, index, &done_loop); // Load the entry value as well. - TNode entry_value = LoadFixedArrayElement( - table, entry_start_position, - (OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) * - kTaggedSize); + TNode entry_value = + LoadValueFromOrderedHashMapEntry(table, entry_start_position); // Invoke the {callback} passing the {entry_key}, {entry_value} and the // {receiver}. @@ -2057,10 +2008,7 @@ TF_BUILTIN(MapIteratorPrototypeNext, CollectionsBuiltinsAssembler) { // Check how to return the {key} (depending on {receiver} type). GotoIf(InstanceTypeEqual(receiver_instance_type, JS_MAP_KEY_ITERATOR_TYPE), &return_value); - var_value = LoadFixedArrayElement( - table, entry_start_position, - (OrderedHashMap::HashTableStartIndex() + OrderedHashMap::kValueOffset) * - kTaggedSize); + var_value = LoadValueFromOrderedHashMapEntry(table, entry_start_position); Branch(InstanceTypeEqual(receiver_instance_type, JS_MAP_VALUE_ITERATOR_TYPE), &return_value, &return_entry); @@ -2093,14 +2041,11 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) { ThrowIfNotInstanceType(context, receiver, JS_SET_TYPE, "Set.prototype.has"); - const TNode table = - LoadObjectField(CAST(receiver), JSMap::kTableOffset); - TNode index = - CAST(CallBuiltin(Builtin::kFindOrderedHashSetEntry, context, table, key)); + const TNode table = + CAST(LoadObjectField(CAST(receiver), JSMap::kTableOffset)); Label if_found(this), if_not_found(this); - Branch(SmiGreaterThanOrEqual(index, SmiConstant(0)), &if_found, - &if_not_found); + Branch(TableHasKey(context, table, key), &if_found, &if_not_found); BIND(&if_found); Return(TrueConstant()); @@ -2109,6 +2054,15 @@ TF_BUILTIN(SetPrototypeHas, CollectionsBuiltinsAssembler) { Return(FalseConstant()); } +TNode CollectionsBuiltinsAssembler::TableHasKey( + const TNode context, TNode table, + TNode key) { + TNode index = + CAST(CallBuiltin(Builtin::kFindOrderedHashSetEntry, context, table, key)); + + return SmiGreaterThanOrEqual(index, SmiConstant(0)); +} + TF_BUILTIN(SetPrototypeEntries, CollectionsBuiltinsAssembler) { const auto receiver = Parameter(Descriptor::kReceiver); const auto context = Parameter(Descriptor::kContext); @@ -2340,6 +2294,50 @@ TF_BUILTIN(FindOrderedHashSetEntry, CollectionsBuiltinsAssembler) { Return(SmiConstant(-1)); } +const TNode CollectionsBuiltinsAssembler::AddValueToKeyedGroup( + const TNode groups, const TNode key, + const TNode value, const TNode methodName) { + GrowCollection grow = [this, groups, methodName]() { + TNode new_groups = + CAST(CallRuntime(Runtime::kOrderedHashMapEnsureGrowable, + NoContextConstant(), groups, methodName)); + // The groups OrderedHashMap is not escaped to user script while grouping + // items, so there can't be live iterators. So we don't need to keep the + // pointer from the old table to the new one. + Label did_grow(this), done(this); + Branch(TaggedEqual(groups, new_groups), &done, &did_grow); + BIND(&did_grow); + { + StoreObjectFieldNoWriteBarrier(groups, OrderedHashMap::NextTableIndex(), + SmiConstant(0)); + Goto(&done); + } + BIND(&done); + return new_groups; + }; + + StoreAtEntry store_at_new_entry = + [this, key, value](const TNode table, + const TNode entry_start) { + TNode array = AllocateArrayList(SmiConstant(1)); + ArrayListSet(array, SmiConstant(0), value); + ArrayListSetLength(array, SmiConstant(1)); + StoreKeyValueInOrderedHashMapEntry(table, key, array, entry_start); + }; + + StoreAtEntry store_at_existing_entry = + [this, key, value](const TNode table, + const TNode entry_start) { + TNode array = + CAST(LoadValueFromOrderedHashMapEntry(table, entry_start)); + TNode new_array = ArrayListAdd(array, value); + StoreKeyValueInOrderedHashMapEntry(table, key, new_array, entry_start); + }; + + return AddToOrderedHashTable(groups, key, grow, store_at_new_entry, + store_at_existing_entry); +} + void WeakCollectionsBuiltinsAssembler::AddEntry( TNode table, TNode key_index, TNode key, TNode value, TNode number_of_elements) { diff --git a/v8/src/builtins/builtins-collections-gen.h b/v8/src/builtins/builtins-collections-gen.h index 723b68071..90eb21df3 100644 --- a/v8/src/builtins/builtins-collections-gen.h +++ b/v8/src/builtins/builtins-collections-gen.h @@ -141,6 +141,296 @@ class BaseCollectionsAssembler : public CodeStubAssembler { TNode elements, TNode index); }; +class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler { + public: + explicit CollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state) + : BaseCollectionsAssembler(state) {} + + // Check whether |iterable| is a JS_MAP_KEY_ITERATOR_TYPE or + // JS_MAP_VALUE_ITERATOR_TYPE object that is not partially consumed and still + // has original iteration behavior. + void BranchIfIterableWithOriginalKeyOrValueMapIterator(TNode iterable, + TNode context, + Label* if_true, + Label* if_false); + + // Check whether |iterable| is a JS_SET_TYPE or JS_SET_VALUE_ITERATOR_TYPE + // object that still has original iteration behavior. In case of the iterator, + // the iterator also must not have been partially consumed. + void BranchIfIterableWithOriginalValueSetIterator(TNode iterable, + TNode context, + Label* if_true, + Label* if_false); + + // Adds an element to a set if the element is not already in the set. + TNode AddToSetTable(TNode context, + TNode table, + TNode key, + TNode method_name); + // Direct iteration helpers. + template + TorqueStructKeyIndexPair NextKeyIndexPairUnmodifiedTable( + const TNode table, const TNode number_of_buckets, + const TNode used_capacity, const TNode index, + Label* if_end); + + template + TorqueStructKeyIndexPair NextKeyIndexPair(const TNode table, + const TNode index, + Label* if_end); + + TorqueStructKeyValueIndexTuple NextKeyValueIndexTupleUnmodifiedTable( + const TNode table, const TNode number_of_buckets, + const TNode used_capacity, const TNode index, + Label* if_end); + + TorqueStructKeyValueIndexTuple NextKeyValueIndexTuple( + const TNode table, const TNode index, + Label* if_end); + + // Checks if the set/map contains a key. + TNode TableHasKey(const TNode context, + TNode table, TNode key); + TNode TableHasKey(const TNode context, + TNode table, TNode key); + + // Adds {value} to a FixedArray keyed by {key} in {groups}. + // + // Utility used by Object.groupBy and Map.groupBy. + const TNode AddValueToKeyedGroup( + const TNode groups, const TNode key, + const TNode value, const TNode methodName); + + // Normalizes -0 to +0. + const TNode NormalizeNumberKey(const TNode key); + + protected: + template + TNode AllocateJSCollectionIterator( + const TNode context, int map_index, + const TNode collection); + TNode AllocateTable(Variant variant, + TNode at_least_space_for) override; + TNode GetHash(const TNode key); + TNode CallGetHashRaw(const TNode key); + TNode CallGetOrCreateHashRaw(const TNode key); + + // Transitions the iterator to the non obsolete backing store. + // This is a NOP if the [table] is not obsolete. + template + using UpdateInTransition = std::function table, + const TNode index)>; + template + std::pair, TNode> Transition( + const TNode table, const TNode index, + UpdateInTransition const& update_in_transition); + template + std::pair, TNode> TransitionAndUpdate( + const TNode iterator); + template + std::tuple, TNode, TNode> NextSkipHoles( + TNode table, TNode index, Label* if_end); + template + std::tuple, TNode, TNode> NextSkipHoles( + TNode table, TNode number_of_buckets, + TNode used_capacity, TNode index, Label* if_end); + + // Specialization for Smi. + // The {result} variable will contain the entry index if the key was found, + // or the hash code otherwise. + template + void FindOrderedHashTableEntryForSmiKey(TNode table, + TNode key_tagged, + TVariable* result, + Label* entry_found, Label* not_found); + void SameValueZeroSmi(TNode key_smi, TNode candidate_key, + Label* if_same, Label* if_not_same); + + // Specialization for heap numbers. + // The {result} variable will contain the entry index if the key was found, + // or the hash code otherwise. + void SameValueZeroHeapNumber(TNode key_float, + TNode candidate_key, Label* if_same, + Label* if_not_same); + template + void FindOrderedHashTableEntryForHeapNumberKey( + TNode table, TNode key_heap_number, + TVariable* result, Label* entry_found, Label* not_found); + + // Specialization for bigints. + // The {result} variable will contain the entry index if the key was found, + // or the hash code otherwise. + void SameValueZeroBigInt(TNode key, TNode candidate_key, + Label* if_same, Label* if_not_same); + template + void FindOrderedHashTableEntryForBigIntKey(TNode table, + TNode key_big_int, + TVariable* result, + Label* entry_found, + Label* not_found); + + // Specialization for string. + // The {result} variable will contain the entry index if the key was found, + // or the hash code otherwise. + template + void FindOrderedHashTableEntryForStringKey(TNode table, + TNode key_tagged, + TVariable* result, + Label* entry_found, + Label* not_found); + TNode ComputeStringHash(TNode string_key); + void SameValueZeroString(TNode key_string, + TNode candidate_key, Label* if_same, + Label* if_not_same); + + // Specialization for non-strings, non-numbers. For those we only need + // reference equality to compare the keys. + // The {result} variable will contain the entry index if the key was found, + // or the hash code otherwise. If the hash-code has not been computed, it + // should be Smi -1. + template + void FindOrderedHashTableEntryForOtherKey(TNode table, + TNode key_heap_object, + TVariable* result, + Label* entry_found, + Label* not_found); + + // Generates code to add an entry keyed by {key} to an instance of + // OrderedHashTable subclass {table}. + // + // Takes 3 functions: + // - {grow} generates code to return a OrderedHashTable subclass instance + // with space to store the entry. + // - {store_new_entry} generates code to store into a new entry, for the + // case when {table} didn't already have an entry keyed by {key}. + // - {store_existing_entry} generates code to store into an existing entry, + // for the case when {table} already has an entry keyed by {key}. + // + // Both {store_new_entry} and {store_existing_entry} take the table and an + // offset to the entry as parameters. + template + using GrowCollection = std::function()>; + template + using StoreAtEntry = std::function table, + const TNode entry_start)>; + template + TNode AddToOrderedHashTable( + const TNode table, const TNode key, + const GrowCollection& grow, + const StoreAtEntry& store_at_new_entry, + const StoreAtEntry& store_at_existing_entry); + + template + void TryLookupOrderedHashTableIndex(const TNode table, + const TNode key, + TVariable* result, + Label* if_entry_found, + Label* if_not_found); + + // Generates code to store a new entry into {table}, connecting to the bucket + // chain, and updating the bucket head. {store_new_entry} is called to + // generate the code to store the payload (e.g., the key and value for + // OrderedHashMap). + template + void StoreOrderedHashTableNewEntry( + const TNode table, const TNode hash, + const TNode number_of_buckets, const TNode occupancy, + const StoreAtEntry& store_at_new_entry); + + // Store payload (key, value, or both) in {table} at {entry}. Does not connect + // the bucket chain and update the bucket head. + void StoreValueInOrderedHashMapEntry( + const TNode table, const TNode value, + const TNode entry, + CheckBounds check_bounds = CheckBounds::kAlways); + void StoreKeyValueInOrderedHashMapEntry( + const TNode table, const TNode key, + const TNode value, const TNode entry, + CheckBounds check_bounds = CheckBounds::kAlways); + void StoreKeyInOrderedHashSetEntry( + const TNode table, const TNode key, + const TNode entry, + CheckBounds check_bounds = CheckBounds::kAlways); + + void UnsafeStoreValueInOrderedHashMapEntry(const TNode table, + const TNode value, + const TNode entry) { + return StoreValueInOrderedHashMapEntry(table, value, entry, + CheckBounds::kDebugOnly); + } + void UnsafeStoreKeyValueInOrderedHashMapEntry( + const TNode table, const TNode key, + const TNode value, const TNode entry) { + return StoreKeyValueInOrderedHashMapEntry(table, key, value, entry, + CheckBounds::kDebugOnly); + } + void UnsafeStoreKeyInOrderedHashSetEntry(const TNode table, + const TNode key, + const TNode entry) { + return StoreKeyInOrderedHashSetEntry(table, key, entry, + CheckBounds::kDebugOnly); + } + + // Load payload (key or value) from {table} at {entry}. + template + TNode LoadKeyFromOrderedHashTableEntry( + const TNode table, const TNode entry, + CheckBounds check_bounds = CheckBounds::kAlways); + TNode LoadValueFromOrderedHashMapEntry( + const TNode table, const TNode entry, + CheckBounds check_bounds = CheckBounds::kAlways); + + template + TNode UnsafeLoadKeyFromOrderedHashTableEntry( + const TNode table, const TNode entry) { + return LoadKeyFromOrderedHashTableEntry(table, entry, + CheckBounds::kDebugOnly); + } + TNode UnsafeLoadValueFromOrderedHashMapEntry( + const TNode table, const TNode entry) { + return LoadValueFromOrderedHashMapEntry(table, entry, + CheckBounds::kDebugOnly); + } + + // Create a JSArray with PACKED_ELEMENTS kind from a Map.prototype.keys() or + // Map.prototype.values() iterator. The iterator is assumed to satisfy + // IterableWithOriginalKeyOrValueMapIterator. This function will skip the + // iterator and iterate directly on the underlying hash table. In the end it + // will update the state of the iterator to 'exhausted'. + TNode MapIteratorToList(TNode context, + TNode iterator); + + // Create a JSArray with PACKED_ELEMENTS kind from a Set.prototype.keys() or + // Set.prototype.values() iterator, or a Set. The |iterable| is assumed to + // satisfy IterableWithOriginalValueSetIterator. This function will skip the + // iterator and iterate directly on the underlying hash table. In the end, if + // |iterable| is an iterator, it will update the state of the iterator to + // 'exhausted'. + TNode SetOrSetIteratorToList(TNode context, + TNode iterable); + + void BranchIfMapIteratorProtectorValid(Label* if_true, Label* if_false); + void BranchIfSetIteratorProtectorValid(Label* if_true, Label* if_false); + + // Builds code that finds OrderedHashTable entry for a key with hash code + // {hash} with using the comparison code generated by {key_compare}. The code + // jumps to {entry_found} if the key is found, or to {not_found} if the key + // was not found. In the {entry_found} branch, the variable + // entry_start_position will be bound to the index of the entry (relative to + // OrderedHashTable::kHashTableStartIndex). + // + // The {CollectionType} template parameter stands for the particular instance + // of OrderedHashTable, it should be OrderedHashMap or OrderedHashSet. + template + void FindOrderedHashTableEntry( + const TNode table, const TNode hash, + const std::function, Label*, Label*)>& key_compare, + TVariable* entry_start_position, Label* entry_found, + Label* not_found); + + TNode ComputeUnseededHash(TNode key); +}; + class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler { public: explicit WeakCollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state) @@ -203,6 +493,9 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler { TNode ValueIndexFromKeyIndex(TNode key_index); }; +// Controls the key coercion behavior for Object.groupBy and Map.groupBy. +enum class GroupByCoercionMode { kZero, kProperty }; + } // namespace internal } // namespace v8 diff --git a/v8/src/builtins/builtins-definitions.h b/v8/src/builtins/builtins-definitions.h index c262aecfb..e6d4b6b1d 100644 --- a/v8/src/builtins/builtins-definitions.h +++ b/v8/src/builtins/builtins-definitions.h @@ -18,7 +18,7 @@ namespace internal { // TFJ: Builtin in Turbofan, with JS linkage (callable as Javascript function). // Args: name, arguments count, explicit argument names... // TFS: Builtin in Turbofan, with CodeStub linkage. -// Args: name, explicit argument names... +// Args: name, needs context, explicit argument names... // TFC: Builtin in Turbofan, with CodeStub linkage and custom descriptor. // Args: name, interface descriptor // TFH: Handlers in Turbofan, with CodeStub linkage. @@ -144,7 +144,7 @@ namespace internal { ASM(JSConstructStubGeneric, ConstructStub) \ ASM(JSBuiltinsConstructStub, ConstructStub) \ TFC(FastNewObject, FastNewObject) \ - TFS(FastNewClosure, kSharedFunctionInfo, kFeedbackCell) \ + TFS(FastNewClosure, NeedsContext::kYes, kSharedFunctionInfo, kFeedbackCell) \ /* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */ \ TFC(ConstructProxy, JSTrampoline) \ \ @@ -169,7 +169,7 @@ namespace internal { TFC(StringSubstring, StringSubstring) \ \ /* OrderedHashTable helpers */ \ - TFS(OrderedHashTableHealIndex, kTable, kIndex) \ + TFS(OrderedHashTableHealIndex, NeedsContext::kYes, kTable, kIndex) \ \ /* Interpreter */ \ /* InterpreterEntryTrampoline dispatches to the interpreter to run a */ \ @@ -232,11 +232,14 @@ namespace internal { ASM(ContinueToJavaScriptBuiltinWithResult, ContinueToBuiltin) \ \ /* API callback handling */ \ - ASM(CallApiCallback, ApiCallback) \ + ASM(CallApiCallbackGeneric, CallApiCallbackGeneric) \ + ASM(CallApiCallbackNoSideEffects, CallApiCallbackOptimized) \ + ASM(CallApiCallbackWithSideEffects, CallApiCallbackOptimized) \ ASM(CallApiGetter, ApiGetter) \ - CPP(HandleApiCall) \ - CPP(HandleApiCallAsFunction) \ - CPP(HandleApiCallAsConstructor) \ + TFC(HandleApiCallOrConstruct, JSTrampoline) \ + CPP(HandleApiConstruct) \ + CPP(HandleApiCallAsFunctionDelegate) \ + CPP(HandleApiCallAsConstructorDelegate) \ \ /* Adapters for Turbofan into runtime */ \ TFC(AllocateInYoungGeneration, Allocate) \ @@ -247,7 +250,7 @@ namespace internal { TFC(NewHeapNumber, NewHeapNumber) \ \ /* TurboFan support builtins */ \ - TFS(CopyFastSmiOrObjectElements, kObject) \ + TFS(CopyFastSmiOrObjectElements, NeedsContext::kNo, kObject) \ TFC(GrowFastDoubleElements, GrowArrayElements) \ TFC(GrowFastSmiOrObjectElements, GrowArrayElements) \ \ @@ -306,16 +309,16 @@ namespace internal { TFH(HasIndexedInterceptorIC, LoadWithVector) \ \ /* Microtask helpers */ \ - TFS(EnqueueMicrotask, kMicrotask) \ + TFS(EnqueueMicrotask, NeedsContext::kYes, kMicrotask) \ ASM(RunMicrotasksTrampoline, RunMicrotasksEntry) \ TFC(RunMicrotasks, RunMicrotasks) \ \ /* Object property helpers */ \ - TFS(HasProperty, kObject, kKey) \ - TFS(DeleteProperty, kObject, kKey, kLanguageMode) \ + TFS(HasProperty, NeedsContext::kYes, kObject, kKey) \ + TFS(DeleteProperty, NeedsContext::kYes, kObject, kKey, kLanguageMode) \ /* ES #sec-copydataproperties */ \ - TFS(CopyDataProperties, kTarget, kSource) \ - TFS(SetDataProperties, kTarget, kSource) \ + TFS(CopyDataProperties, NeedsContext::kYes, kTarget, kSource) \ + TFS(SetDataProperties, NeedsContext::kYes, kTarget, kSource) \ TFC(CopyDataPropertiesWithExcludedPropertiesOnStack, \ CopyDataPropertiesWithExcludedPropertiesOnStack) \ TFC(CopyDataPropertiesWithExcludedProperties, \ @@ -373,21 +376,24 @@ namespace internal { /* ES6 #sec-array.prototype.fill */ \ CPP(ArrayPrototypeFill) \ /* ES7 #sec-array.prototype.includes */ \ - TFS(ArrayIncludesSmi, kElements, kSearchElement, kLength, kFromIndex) \ - TFS(ArrayIncludesSmiOrObject, kElements, kSearchElement, kLength, \ - kFromIndex) \ - TFS(ArrayIncludesPackedDoubles, kElements, kSearchElement, kLength, \ - kFromIndex) \ - TFS(ArrayIncludesHoleyDoubles, kElements, kSearchElement, kLength, \ - kFromIndex) \ + TFS(ArrayIncludesSmi, NeedsContext::kYes, kElements, kSearchElement, \ + kLength, kFromIndex) \ + TFS(ArrayIncludesSmiOrObject, NeedsContext::kYes, kElements, kSearchElement, \ + kLength, kFromIndex) \ + TFS(ArrayIncludesPackedDoubles, NeedsContext::kYes, kElements, \ + kSearchElement, kLength, kFromIndex) \ + TFS(ArrayIncludesHoleyDoubles, NeedsContext::kYes, kElements, \ + kSearchElement, kLength, kFromIndex) \ TFJ(ArrayIncludes, kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.indexof */ \ - TFS(ArrayIndexOfSmi, kElements, kSearchElement, kLength, kFromIndex) \ - TFS(ArrayIndexOfSmiOrObject, kElements, kSearchElement, kLength, kFromIndex) \ - TFS(ArrayIndexOfPackedDoubles, kElements, kSearchElement, kLength, \ - kFromIndex) \ - TFS(ArrayIndexOfHoleyDoubles, kElements, kSearchElement, kLength, \ + TFS(ArrayIndexOfSmi, NeedsContext::kYes, kElements, kSearchElement, kLength, \ kFromIndex) \ + TFS(ArrayIndexOfSmiOrObject, NeedsContext::kYes, kElements, kSearchElement, \ + kLength, kFromIndex) \ + TFS(ArrayIndexOfPackedDoubles, NeedsContext::kYes, kElements, \ + kSearchElement, kLength, kFromIndex) \ + TFS(ArrayIndexOfHoleyDoubles, NeedsContext::kYes, kElements, kSearchElement, \ + kLength, kFromIndex) \ TFJ(ArrayIndexOf, kDontAdaptArgumentsSentinel) \ /* ES6 #sec-array.prototype.pop */ \ CPP(ArrayPop) \ @@ -404,9 +410,9 @@ namespace internal { CPP(ArrayUnshift) \ CPP(ArrayFromAsync) \ /* Support for Array.from and other array-copying idioms */ \ - TFS(CloneFastJSArray, kSource) \ - TFS(CloneFastJSArrayFillingHoles, kSource) \ - TFS(ExtractFastJSArray, kSource, kBegin, kCount) \ + TFS(CloneFastJSArray, NeedsContext::kYes, kSource) \ + TFS(CloneFastJSArrayFillingHoles, NeedsContext::kYes, kSource) \ + TFS(ExtractFastJSArray, NeedsContext::kYes, kSource, kBegin, kCount) \ /* ES6 #sec-array.prototype.entries */ \ TFJ(ArrayPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \ /* ES6 #sec-array.prototype.keys */ \ @@ -416,9 +422,10 @@ namespace internal { /* ES6 #sec-%arrayiteratorprototype%.next */ \ TFJ(ArrayIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \ /* https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray */ \ - TFS(FlattenIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth) \ - TFS(FlatMapIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth, \ - kMapperFunction, kThisArg) \ + TFS(FlattenIntoArray, NeedsContext::kYes, kTarget, kSource, kSourceLength, \ + kStart, kDepth) \ + TFS(FlatMapIntoArray, NeedsContext::kYes, kTarget, kSource, kSourceLength, \ + kStart, kDepth, kMapperFunction, kThisArg) \ /* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat */ \ TFJ(ArrayPrototypeFlat, kDontAdaptArgumentsSentinel) \ /* https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap */ \ @@ -436,12 +443,14 @@ namespace internal { CPP(ArrayBufferPrototypeTransferToFixedLength) \ \ /* AsyncFunction */ \ - TFS(AsyncFunctionEnter, kClosure, kReceiver) \ - TFS(AsyncFunctionReject, kAsyncFunctionObject, kReason) \ - TFS(AsyncFunctionResolve, kAsyncFunctionObject, kValue) \ + TFS(AsyncFunctionEnter, NeedsContext::kYes, kClosure, kReceiver) \ + TFS(AsyncFunctionReject, NeedsContext::kYes, kAsyncFunctionObject, kReason) \ + TFS(AsyncFunctionResolve, NeedsContext::kYes, kAsyncFunctionObject, kValue) \ TFC(AsyncFunctionLazyDeoptContinuation, AsyncFunctionStackParameter) \ - TFS(AsyncFunctionAwaitCaught, kAsyncFunctionObject, kValue) \ - TFS(AsyncFunctionAwaitUncaught, kAsyncFunctionObject, kValue) \ + TFS(AsyncFunctionAwaitCaught, NeedsContext::kYes, kAsyncFunctionObject, \ + kValue) \ + TFS(AsyncFunctionAwaitUncaught, NeedsContext::kYes, kAsyncFunctionObject, \ + kValue) \ TFJ(AsyncFunctionAwaitRejectClosure, kJSArgcReceiverSlots + 1, kReceiver, \ kSentError) \ TFJ(AsyncFunctionAwaitResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \ @@ -592,10 +601,10 @@ namespace internal { CPP(FunctionPrototypeToString) \ \ /* Belongs to Objects but is a dependency of GeneratorPrototypeResume */ \ - TFS(CreateIterResultObject, kValue, kDone) \ + TFS(CreateIterResultObject, NeedsContext::kYes, kValue, kDone) \ \ /* Generator and Async */ \ - TFS(CreateGeneratorObject, kClosure, kReceiver) \ + TFS(CreateGeneratorObject, NeedsContext::kYes, kClosure, kReceiver) \ CPP(GeneratorFunctionConstructor) \ /* ES6 #sec-generator.prototype.next */ \ TFJ(GeneratorPrototypeNext, kDontAdaptArgumentsSentinel) \ @@ -688,18 +697,20 @@ namespace internal { \ /* IterableToList */ \ /* ES #sec-iterabletolist */ \ - TFS(IterableToList, kIterable, kIteratorFn) \ - TFS(IterableToFixedArray, kIterable, kIteratorFn) \ - TFS(IterableToListWithSymbolLookup, kIterable) \ - TFS(IterableToFixedArrayWithSymbolLookupSlow, kIterable) \ - TFS(IterableToListMayPreserveHoles, kIterable, kIteratorFn) \ - IF_WASM(TFS, IterableToFixedArrayForWasm, kIterable, kExpectedLength) \ + TFS(IterableToList, NeedsContext::kYes, kIterable, kIteratorFn) \ + TFS(IterableToFixedArray, NeedsContext::kYes, kIterable, kIteratorFn) \ + TFS(IterableToListWithSymbolLookup, NeedsContext::kYes, kIterable) \ + TFS(IterableToFixedArrayWithSymbolLookupSlow, NeedsContext::kYes, kIterable) \ + TFS(IterableToListMayPreserveHoles, NeedsContext::kYes, kIterable, \ + kIteratorFn) \ + IF_WASM(TFS, IterableToFixedArrayForWasm, NeedsContext::kYes, kIterable, \ + kExpectedLength) \ \ /* #sec-createstringlistfromiterable */ \ - TFS(StringListFromIterable, kIterable) \ + TFS(StringListFromIterable, NeedsContext::kYes, kIterable) \ \ /* Map */ \ - TFS(FindOrderedHashMapEntry, kTable, kKey) \ + TFS(FindOrderedHashMapEntry, NeedsContext::kYes, kTable, kKey) \ TFJ(MapConstructor, kDontAdaptArgumentsSentinel) \ TFJ(MapPrototypeSet, kJSArgcReceiverSlots + 2, kReceiver, kKey, kValue) \ TFJ(MapPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ @@ -718,7 +729,7 @@ namespace internal { TFJ(MapPrototypeValues, kJSArgcReceiverSlots, kReceiver) \ /* ES #sec-%mapiteratorprototype%.next */ \ TFJ(MapIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \ - TFS(MapIteratorToList, kSource) \ + TFS(MapIteratorToList, NeedsContext::kYes, kSource) \ \ /* ES #sec-number-constructor */ \ CPP(NumberPrototypeToExponential) \ @@ -823,7 +834,7 @@ namespace internal { CPP(ObjectPrototypeGetProto) \ CPP(ObjectPrototypeSetProto) \ CPP(ObjectSeal) \ - TFS(ObjectToString, kReceiver) \ + TFS(ObjectToString, NeedsContext::kYes, kReceiver) \ TFJ(ObjectValues, kJSArgcReceiverSlots + 1, kReceiver, kObject) \ \ /* instanceof */ \ @@ -833,9 +844,9 @@ namespace internal { TFC(InstanceOf_Baseline, Compare_Baseline) \ \ /* for-in */ \ - TFS(ForInEnumerate, kReceiver) \ + TFS(ForInEnumerate, NeedsContext::kYes, kReceiver) \ TFC(ForInPrepare, ForInPrepare) \ - TFS(ForInFilter, kKey, kObject) \ + TFS(ForInFilter, NeedsContext::kYes, kKey, kObject) \ \ /* Reflect */ \ ASM(ReflectApply, JSTrampoline) \ @@ -869,13 +880,15 @@ namespace internal { CPP(RegExpRightContextGetter) \ \ /* RegExp helpers */ \ - TFS(RegExpExecAtom, kRegExp, kString, kLastIndex, kMatchInfo) \ - TFS(RegExpExecInternal, kRegExp, kString, kLastIndex, kMatchInfo) \ + TFS(RegExpExecAtom, NeedsContext::kYes, kRegExp, kString, kLastIndex, \ + kMatchInfo) \ + TFS(RegExpExecInternal, NeedsContext::kYes, kRegExp, kString, kLastIndex, \ + kMatchInfo) \ ASM(RegExpInterpreterTrampoline, CCall) \ ASM(RegExpExperimentalTrampoline, CCall) \ \ /* Set */ \ - TFS(FindOrderedHashSetEntry, kTable, kKey) \ + TFS(FindOrderedHashSetEntry, NeedsContext::kYes, kTable, kKey) \ TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \ TFJ(SetPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ TFJ(SetPrototypeAdd, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ @@ -891,11 +904,12 @@ namespace internal { TFJ(SetPrototypeValues, kJSArgcReceiverSlots, kReceiver) \ /* ES #sec-%setiteratorprototype%.next */ \ TFJ(SetIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \ - TFS(SetOrSetIteratorToList, kSource) \ + TFS(SetOrSetIteratorToList, NeedsContext::kYes, kSource) \ \ /* ShadowRealm */ \ CPP(ShadowRealmConstructor) \ - TFS(ShadowRealmGetWrappedValue, kCreationContext, kTargetContext, kValue) \ + TFS(ShadowRealmGetWrappedValue, NeedsContext::kYes, kCreationContext, \ + kTargetContext, kValue) \ CPP(ShadowRealmPrototypeEvaluate) \ TFJ(ShadowRealmPrototypeImportValue, kJSArgcReceiverSlots + 2, kReceiver, \ kSpecifier, kExportName) \ @@ -985,6 +999,7 @@ namespace internal { \ /* Wasm */ \ IF_WASM(ASM, GenericJSToWasmWrapper, WasmDummy) \ + IF_WASM(ASM, NewGenericJSToWasmWrapper, WasmNewJSToWasmWrapper) \ IF_WASM(ASM, WasmReturnPromiseOnSuspend, WasmDummy) \ IF_WASM(ASM, WasmSuspend, WasmSuspend) \ IF_WASM(ASM, WasmResume, WasmDummy) \ @@ -1000,7 +1015,7 @@ namespace internal { \ /* WeakMap */ \ TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \ - TFS(WeakMapLookupHashIndex, kTable, kKey) \ + TFS(WeakMapLookupHashIndex, NeedsContext::kYes, kTable, kKey) \ TFJ(WeakMapGet, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ TFJ(WeakMapPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \ TFJ(WeakMapPrototypeSet, kJSArgcReceiverSlots + 2, kReceiver, kKey, kValue) \ @@ -1013,8 +1028,8 @@ namespace internal { TFJ(WeakSetPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kValue) \ \ /* WeakSet / WeakMap Helpers */ \ - TFS(WeakCollectionDelete, kCollection, kKey) \ - TFS(WeakCollectionSet, kCollection, kKey, kValue) \ + TFS(WeakCollectionDelete, NeedsContext::kYes, kCollection, kKey) \ + TFS(WeakCollectionSet, NeedsContext::kYes, kCollection, kKey, kValue) \ \ /* JS Structs and friends */ \ CPP(SharedSpaceJSObjectHasInstance) \ @@ -1034,11 +1049,12 @@ namespace internal { \ /* AsyncGenerator */ \ \ - TFS(AsyncGeneratorResolve, kGenerator, kValue, kDone) \ - TFS(AsyncGeneratorReject, kGenerator, kValue) \ - TFS(AsyncGeneratorYieldWithAwait, kGenerator, kValue, kIsCaught) \ - TFS(AsyncGeneratorReturn, kGenerator, kValue, kIsCaught) \ - TFS(AsyncGeneratorResumeNext, kGenerator) \ + TFS(AsyncGeneratorResolve, NeedsContext::kYes, kGenerator, kValue, kDone) \ + TFS(AsyncGeneratorReject, NeedsContext::kYes, kGenerator, kValue) \ + TFS(AsyncGeneratorYieldWithAwait, NeedsContext::kYes, kGenerator, kValue, \ + kIsCaught) \ + TFS(AsyncGeneratorReturn, NeedsContext::kYes, kGenerator, kValue, kIsCaught) \ + TFS(AsyncGeneratorResumeNext, NeedsContext::kYes, kGenerator) \ \ /* AsyncGeneratorFunction( p1, p2, ... pn, body ) */ \ /* proposal-async-iteration/#sec-asyncgeneratorfunction-constructor */ \ @@ -1055,8 +1071,10 @@ namespace internal { \ /* Await (proposal-async-iteration/#await), with resume behaviour */ \ /* specific to Async Generators. Internal / Not exposed to JS code. */ \ - TFS(AsyncGeneratorAwaitCaught, kAsyncGeneratorObject, kValue) \ - TFS(AsyncGeneratorAwaitUncaught, kAsyncGeneratorObject, kValue) \ + TFS(AsyncGeneratorAwaitCaught, NeedsContext::kYes, kAsyncGeneratorObject, \ + kValue) \ + TFS(AsyncGeneratorAwaitUncaught, NeedsContext::kYes, kAsyncGeneratorObject, \ + kValue) \ TFJ(AsyncGeneratorAwaitResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \ kValue) \ TFJ(AsyncGeneratorAwaitRejectClosure, kJSArgcReceiverSlots + 1, kReceiver, \ @@ -1093,21 +1111,22 @@ namespace internal { ASM(DirectCEntry, CEntryDummy) \ \ /* String helpers */ \ - TFS(StringAdd_CheckNone, kLeft, kRight) \ - TFS(SubString, kString, kFrom, kTo) \ + TFS(StringAdd_CheckNone, NeedsContext::kYes, kLeft, kRight) \ + TFS(SubString, NeedsContext::kYes, kString, kFrom, kTo) \ \ /* Miscellaneous */ \ ASM(DoubleToI, Void) \ TFC(GetProperty, GetProperty) \ - TFS(GetPropertyWithReceiver, kObject, kKey, kReceiver, kOnNonExistent) \ - TFS(SetProperty, kReceiver, kKey, kValue) \ - TFS(CreateDataProperty, kReceiver, kKey, kValue) \ - TFS(GetOwnPropertyDescriptor, kReceiver, kKey) \ + TFS(GetPropertyWithReceiver, NeedsContext::kYes, kObject, kKey, kReceiver, \ + kOnNonExistent) \ + TFS(SetProperty, NeedsContext::kYes, kReceiver, kKey, kValue) \ + TFS(CreateDataProperty, NeedsContext::kYes, kReceiver, kKey, kValue) \ + TFS(GetOwnPropertyDescriptor, NeedsContext::kYes, kReceiver, kKey) \ ASM(MemCopyUint8Uint8, CCall) \ ASM(MemMove, CCall) \ TFC(FindNonDefaultConstructorOrConstruct, \ FindNonDefaultConstructorOrConstruct) \ - TFS(OrdinaryGetOwnPropertyDescriptor, kReceiver, kKey) \ + TFS(OrdinaryGetOwnPropertyDescriptor, NeedsContext::kYes, kReceiver, kKey) \ \ /* Trace */ \ CPP(IsTraceCategoryEnabled) \ @@ -1903,7 +1922,7 @@ namespace internal { TFJ(StringPrototypeToLowerCaseIntl, kJSArgcReceiverSlots, kReceiver) \ /* ES #sec-string.prototype.touppercase */ \ CPP(StringPrototypeToUpperCaseIntl) \ - TFS(StringToLowerCaseIntl, kString) \ + TFS(StringToLowerCaseIntl, NeedsContext::kYes, kString) \ \ /* Temporal */ \ /* Temporal #sec-temporal.calendar.prototype.era */ \ diff --git a/v8/src/builtins/builtins-intl-gen.cc b/v8/src/builtins/builtins-intl-gen.cc index 890266240..81b86b8d8 100644 --- a/v8/src/builtins/builtins-intl-gen.cc +++ b/v8/src/builtins/builtins-intl-gen.cc @@ -123,8 +123,10 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl( Label fast(this), check_locale(this); // Check for fast locales. GotoIf(IsUndefined(maybe_locales), &fast); - // Passing a smi here is equivalent to passing an empty list of locales. - GotoIf(TaggedIsSmi(maybe_locales), &fast); + // Passing a Smi as locales requires performing a ToObject conversion + // followed by reading the length property and the "indexed" properties of + // it until a valid locale is found. + GotoIf(TaggedIsSmi(maybe_locales), &runtime); GotoIfNot(IsString(CAST(maybe_locales)), &runtime); GotoIfNot(IsSeqOneByteString(CAST(maybe_locales)), &runtime); TNode locale = CAST(maybe_locales); diff --git a/v8/src/builtins/builtins-object-gen.cc b/v8/src/builtins/builtins-object-gen.cc index f0d21bba5..9f491b61b 100644 --- a/v8/src/builtins/builtins-object-gen.cc +++ b/v8/src/builtins/builtins-object-gen.cc @@ -954,59 +954,13 @@ TF_BUILTIN(ObjectToString, ObjectBuiltinsAssembler) { BIND(&checkstringtag); { - // Check if all relevant maps (including the prototype maps) don't - // have any interesting symbols (i.e. that none of them have the - // @@toStringTag property). - Label loop(this, {&var_holder, &var_holder_map}), return_default(this), - return_generic(this, Label::kDeferred); - Goto(&loop); - BIND(&loop); - { - Label interesting_symbols(this); - TNode holder = var_holder.value(); - TNode holder_map = var_holder_map.value(); - GotoIf(IsNull(holder), &return_default); - TNode holder_bit_field3 = LoadMapBitField3(holder_map); - GotoIf(IsSetWord32( - holder_bit_field3), - &interesting_symbols); - var_holder = LoadMapPrototype(holder_map); - var_holder_map = LoadMap(var_holder.value()); - Goto(&loop); - BIND(&interesting_symbols); - { - // Check flags for dictionary objects. - GotoIf(IsClearWord32(holder_bit_field3), - &return_generic); - GotoIf( - InstanceTypeEqual(LoadMapInstanceType(holder_map), JS_PROXY_TYPE), - &if_proxy); - TNode properties = - LoadObjectField(holder, JSObject::kPropertiesOrHashOffset); - CSA_DCHECK(this, TaggedIsNotSmi(properties)); - // TODO(pthier): Support swiss dictionaries. - if constexpr (!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { - CSA_DCHECK(this, IsNameDictionary(CAST(properties))); - TNode flags = - GetNameDictionaryFlags(CAST(properties)); - GotoIf(IsSetSmi(flags, - NameDictionary::MayHaveInterestingSymbolsBit::kMask), - &return_generic); - var_holder = LoadMapPrototype(holder_map); - var_holder_map = LoadMap(var_holder.value()); - } - Goto(&loop); - } - } - - BIND(&return_generic); - { - TNode tag = GetProperty(context, ToObject(context, receiver), - ToStringTagSymbolConstant()); - GotoIf(TaggedIsSmi(tag), &return_default); - GotoIfNot(IsString(CAST(tag)), &return_default); - ReturnToStringFormat(context, CAST(tag)); - } + Label return_default(this); + TNode tag = GetInterestingProperty( + context, receiver, &var_holder, &var_holder_map, + ToStringTagSymbolConstant(), &return_default, &if_proxy); + GotoIf(TaggedIsSmi(tag), &return_default); + GotoIfNot(IsString(CAST(tag)), &return_default); + ReturnToStringFormat(context, CAST(tag)); BIND(&return_default); Return(var_default.value()); @@ -1403,8 +1357,8 @@ void ObjectBuiltinsAssembler::AddToDictionaryIf( Label done(this); GotoIfNot(condition, &done); - Add(CAST(name_dictionary), HeapConstant(name), value, - bailout); + AddToDictionary(CAST(name_dictionary), HeapConstant(name), + value, bailout); Goto(&done); BIND(&done); diff --git a/v8/src/builtins/builtins-regexp-gen.cc b/v8/src/builtins/builtins-regexp-gen.cc index 28790510c..14f8044ac 100644 --- a/v8/src/builtins/builtins-regexp-gen.cc +++ b/v8/src/builtins/builtins-regexp-gen.cc @@ -331,8 +331,8 @@ TNode RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo( // - Receiver is extensible // - Receiver has no interceptors Label add_dictionary_property_slow(this, Label::kDeferred); - Add(CAST(properties), name, capture, - &add_dictionary_property_slow); + AddToDictionary(CAST(properties), name, capture, + &add_dictionary_property_slow); var_i = i_plus_2; Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length), diff --git a/v8/src/builtins/builtins-regexp.cc b/v8/src/builtins/builtins-regexp.cc index e758782a9..931d33c56 100644 --- a/v8/src/builtins/builtins-regexp.cc +++ b/v8/src/builtins/builtins-regexp.cc @@ -83,7 +83,7 @@ BUILTIN(RegExpInputGetter) { HandleScope scope(isolate); Handle obj(isolate->regexp_last_match_info()->LastInput(), isolate); return obj->IsUndefined(isolate) ? ReadOnlyRoots(isolate).empty_string() - : String::cast(*obj); + : Tagged::cast(*obj); } BUILTIN(RegExpInputSetter) { diff --git a/v8/src/builtins/builtins-sharedarraybuffer.cc b/v8/src/builtins/builtins-sharedarraybuffer.cc index ea2fba9bb..c833a676e 100644 --- a/v8/src/builtins/builtins-sharedarraybuffer.cc +++ b/v8/src/builtins/builtins-sharedarraybuffer.cc @@ -214,13 +214,13 @@ Object DoWait(Isolate* isolate, FutexEmulation::WaitMode mode, // 8. If q is NaN, let t be +∞, else let t be max(q, 0). double timeout_number; if (timeout->IsUndefined(isolate)) { - timeout_number = ReadOnlyRoots(isolate).infinity_value().Number(); + timeout_number = ReadOnlyRoots(isolate).infinity_value()->Number(); } else { ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, timeout, Object::ToNumber(isolate, timeout)); timeout_number = timeout->Number(); if (std::isnan(timeout_number)) - timeout_number = ReadOnlyRoots(isolate).infinity_value().Number(); + timeout_number = ReadOnlyRoots(isolate).infinity_value()->Number(); else if (timeout_number < 0) timeout_number = 0; } diff --git a/v8/src/builtins/builtins-string-gen.h b/v8/src/builtins/builtins-string-gen.h index 267dc9d9b..ad4cbe41d 100644 --- a/v8/src/builtins/builtins-string-gen.h +++ b/v8/src/builtins/builtins-string-gen.h @@ -122,9 +122,6 @@ class StringBuiltinsAssembler : public CodeStubAssembler { TNode right, StringComparison op); - using StringAtAccessor = std::function( - TNode receiver, TNode length, TNode index)>; - const TNode IndexOfDollarChar(const TNode context, const TNode string); diff --git a/v8/src/builtins/builtins-struct.cc b/v8/src/builtins/builtins-struct.cc index 1876068f6..0124dbad1 100644 --- a/v8/src/builtins/builtins-struct.cc +++ b/v8/src/builtins/builtins-struct.cc @@ -189,17 +189,19 @@ BUILTIN(SharedStructTypeConstructor) { Handle instance_map = factory->NewMap(JS_SHARED_STRUCT_TYPE, instance_size, DICTIONARY_ELEMENTS, in_object_properties, AllocationType::kSharedMap); + if (num_fields == 0) { + AlwaysSharedSpaceJSObject::PrepareMapNoEnumerableProperties(*instance_map); + } else { + AlwaysSharedSpaceJSObject::PrepareMapWithEnumerableProperties( + isolate, instance_map, maybe_descriptors, num_fields); + } // Structs have fixed layout ahead of time, so there's no slack. int out_of_object_properties = num_fields - in_object_properties; - if (out_of_object_properties == 0) { - instance_map->SetInObjectUnusedPropertyFields(0); - } else { + if (out_of_object_properties != 0) { instance_map->SetOutOfObjectUnusedPropertyFields(0); } - instance_map->set_is_extensible(false); - JSFunction::SetInitialMap(isolate, constructor, instance_map, - factory->null_value(), factory->null_value()); + constructor->set_prototype_or_initial_map(*instance_map, kReleaseStore); // Create a new {constructor, non-instance_prototype} tuple and store it // in Map::constructor field. @@ -210,17 +212,6 @@ BUILTIN(SharedStructTypeConstructor) { constructor->map().set_has_non_instance_prototype(true); constructor->map().SetConstructor(*non_instance_prototype_constructor_tuple); - // Pre-create the enum cache in the shared space, as otherwise for-in - // enumeration will incorrectly create an enum cache in the per-thread heap. - if (num_fields == 0) { - instance_map->SetEnumLength(0); - } else { - instance_map->InitializeDescriptors(isolate, *maybe_descriptors); - FastKeyAccumulator::InitializeFastPropertyEnumCache( - isolate, instance_map, num_fields, AllocationType::kSharedOld); - DCHECK_EQ(num_fields, instance_map->EnumLength()); - } - int num_elements = num_properties - num_fields; if (num_elements != 0) { DCHECK(elements_template->InAnySharedSpace()); diff --git a/v8/src/builtins/builtins.cc b/v8/src/builtins/builtins.cc index 0f14e9ada..120c68e47 100644 --- a/v8/src/builtins/builtins.cc +++ b/v8/src/builtins/builtins.cc @@ -401,7 +401,7 @@ Handle Builtins::CreateInterpreterEntryTrampolineForProfiling( Builtin::kInterpreterEntryTrampolineForProfiling); CodeDesc desc; - desc.buffer = reinterpret_cast(code.instruction_start()); + desc.buffer = reinterpret_cast(code.instruction_start()); int instruction_size = code.instruction_size(); desc.buffer_size = instruction_size; @@ -464,7 +464,8 @@ bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle target, Handle target_global_proxy) { if (v8_flags.allow_unsafe_function_constructor) return true; HandleScopeImplementer* impl = isolate->handle_scope_implementer(); - Handle responsible_context = impl->LastEnteredOrMicrotaskContext(); + Handle responsible_context = + impl->LastEnteredOrMicrotaskContext(); // TODO(verwaest): Remove this. if (responsible_context.is_null()) { return true; diff --git a/v8/src/builtins/builtins.h b/v8/src/builtins/builtins.h index fdf4d468a..6083a1b9b 100644 --- a/v8/src/builtins/builtins.h +++ b/v8/src/builtins/builtins.h @@ -252,6 +252,11 @@ class Builtins { return js_entry_handler_offset_; } + int jspi_prompt_handler_offset() const { + DCHECK_NE(jspi_prompt_handler_offset_, 0); + return jspi_prompt_handler_offset_; + } + void SetJSEntryHandlerOffset(int offset) { // Check the stored offset is either uninitialized or unchanged (we // generate multiple variants of this builtin but they should all have the @@ -260,6 +265,11 @@ class Builtins { js_entry_handler_offset_ = offset; } + void SetJSPIPromptHandlerOffset(int offset) { + CHECK_EQ(jspi_prompt_handler_offset_, 0); + jspi_prompt_handler_offset_ = offset; + } + // Returns given builtin's slot in the main builtin table. FullObjectSlot builtin_slot(Builtin builtin); // Returns given builtin's slot in the tier0 builtin table. @@ -299,6 +309,9 @@ class Builtins { static void Generate_InterpreterPushArgsThenConstructImpl( MacroAssembler* masm, InterpreterPushArgsMode mode); + static void Generate_CallApiCallbackImpl(MacroAssembler* masm, + CallApiCallbackMode mode); + #define DECLARE_ASM(Name, ...) \ static void Generate_##Name(MacroAssembler* masm); #define DECLARE_TF(Name, ...) \ @@ -317,6 +330,9 @@ class Builtins { // label) in JSEntry and its variants. It's used to generate the handler table // during codegen (mksnapshot-only). int js_entry_handler_offset_ = 0; + // Do the same for the JSPI prompt, which catches uncaught exceptions and + // rejects the corresponding promise. + int jspi_prompt_handler_offset_ = 0; friend class SetupIsolateDelegate; }; diff --git a/v8/src/builtins/cast.tq b/v8/src/builtins/cast.tq index 20eecf735..09127f03d 100644 --- a/v8/src/builtins/cast.tq +++ b/v8/src/builtins/cast.tq @@ -576,6 +576,22 @@ Cast(implicit context: Context)( return %RawDownCast(a); } +Cast(implicit context: Context)(o: HeapObject): + JSSetWithNoCustomIteration + labels CastError { + if (IsSetIteratorProtectorCellInvalid()) goto CastError; + const a = Cast(o) otherwise CastError; + return %RawDownCast(a); +} + +Cast(implicit context: Context)(o: HeapObject): + JSMapWithNoCustomIteration + labels CastError { + if (IsMapIteratorProtectorCellInvalid()) goto CastError; + const a = Cast(o) otherwise CastError; + return %RawDownCast(a); +} + macro Cast(o: String): T labels CastError; Cast(o: HeapObject): SeqOneByteString labels CastError { @@ -825,6 +841,11 @@ UnsafeCast(implicit context: Context)(o: Object): return %RawDownCast(o); } +UnsafeCast(implicit context: Context)(o: Object): ArrayList { + dcheck(Is(o)); + return %RawDownCast(o); +} + macro UnsafeCast(o: A|Object): A { dcheck(IsWeakOrCleared(o)); return %RawDownCast(o); diff --git a/v8/src/builtins/collections.tq b/v8/src/builtins/collections.tq index 30444ddad..6ec6923bb 100644 --- a/v8/src/builtins/collections.tq +++ b/v8/src/builtins/collections.tq @@ -5,6 +5,174 @@ #include 'src/builtins/builtins-collections-gen.h' namespace collections { + +const kSetPrototypeValues: constexpr BuiltinsName + generates 'Builtin::kSetPrototypeValues'; +const kSetPrototypeHas: constexpr BuiltinsName + generates 'Builtin::kSetPrototypeHas'; +const kMapPrototypeKeys: constexpr BuiltinsName + generates 'Builtin::kMapPrototypeKeys'; +const kMapPrototypeHas: constexpr BuiltinsName + generates 'Builtin::kMapPrototypeHas'; + +@export +struct SetRecord { + // SetRecord.[[Set]] + object: JSReceiver; + + // SetRecord.[[Size]] + // a non-negative integer or +∞ + size: Number; + + // SetRecord.[[Has]] + has: JSAny; + + // SetRecord.[[Keys]] + keys: JSAny; +} + +extern macro CodeStubAssembler::CloneFixedArray( + FixedArrayBase, constexpr ExtractFixedArrayFlag): FixedArrayBase; + +extern macro CollectionsBuiltinsAssembler::AddToSetTable( + implicit context: Context)(OrderedHashSet, Object, String): OrderedHashSet; + +extern macro CollectionsBuiltinsAssembler::TableHasKey( + implicit context: Context)(OrderedHashSet, Object): bool; +extern macro CollectionsBuiltinsAssembler::TableHasKey( + implicit context: Context)(OrderedHashMap, Object): bool; + +// Direct iteration helpers. +@export +struct KeyIndexPair { + key: JSAny; + index: intptr; +} + +extern macro CollectionsBuiltinsAssembler::NextKeyIndexPairUnmodifiedTable( + OrderedHashSet, int32, int32, intptr): KeyIndexPair labels Done; + +// The underlying table must not be resized during iteration! +struct UnmodifiedOrderedHashSetIterator { + macro Next(): JSAny labels Done { + this.current = NextKeyIndexPairUnmodifiedTable( + this.table, this.numBuckets, this.usedCapacity, this.current.index) + otherwise Done; + return this.current.key; + } + const table: OrderedHashSet; + const numBuckets: int32; + const usedCapacity: int32; + current: KeyIndexPair; +} + +extern macro CollectionsBuiltinsAssembler::NextKeyIndexPair( + OrderedHashSet, intptr): KeyIndexPair labels Done; + +// The underlying table can be resized during iteration. +struct OrderedHashSetIterator { + macro Next(): JSAny labels Done { + this.current = NextKeyIndexPair(this.table, this.current.index) + otherwise Done; + return this.current.key; + } + const table: OrderedHashSet; + current: KeyIndexPair; +} + +macro LoadOrderedHashTableMetadata( + table: OrderedHashMap|OrderedHashSet, fieldIndex: constexpr int32): int32 { + return Convert(UnsafeCast(table.objects[fieldIndex])); +} + +const kOrderedHashSetNumberOfBucketsIndex: + constexpr int32 generates 'OrderedHashSet::NumberOfBucketsIndex()'; +const kOrderedHashSetNumberOfElementsIndex: + constexpr int32 generates 'OrderedHashSet::NumberOfElementsIndex()'; +const kOrderedHashSetNumberOfDeletedElementsIndex: constexpr int32 + generates 'OrderedHashSet::NumberOfDeletedElementsIndex()'; + +macro NewUnmodifiedOrderedHashSetIterator(table: OrderedHashSet): + UnmodifiedOrderedHashSetIterator { + const numBuckets = + LoadOrderedHashTableMetadata(table, kOrderedHashSetNumberOfBucketsIndex); + const numElements = + LoadOrderedHashTableMetadata(table, kOrderedHashSetNumberOfElementsIndex); + const numDeleted = LoadOrderedHashTableMetadata( + table, kOrderedHashSetNumberOfDeletedElementsIndex); + const usedCapacity = numElements + numDeleted; + return UnmodifiedOrderedHashSetIterator{ + table: table, + numBuckets: numBuckets, + usedCapacity: usedCapacity, + current: KeyIndexPair { + key: Undefined, index: 0 + } + }; +} + +macro NewOrderedHashSetIterator(table: OrderedHashSet): OrderedHashSetIterator { + return OrderedHashSetIterator{ + table: table, + current: KeyIndexPair { + key: Undefined, index: 0 + } + }; +} + +@export +struct KeyValueIndexTuple { + key: JSAny; + value: JSAny; + index: intptr; +} +extern macro +CollectionsBuiltinsAssembler::NextKeyValueIndexTupleUnmodifiedTable( + OrderedHashMap, int32, int32, intptr): KeyValueIndexTuple labels Done; + +extern macro CollectionsBuiltinsAssembler::NextKeyValueIndexTuple( + OrderedHashMap, intptr): KeyValueIndexTuple labels Done; + +// The underlying table must not be resized during iteration! +struct UnmodifiedOrderedHashMapIterator { + macro Next(): KeyValuePair labels Done { + this.current = NextKeyValueIndexTupleUnmodifiedTable( + this.table, this.numBuckets, this.usedCapacity, this.current.index) + otherwise Done; + return KeyValuePair{key: this.current.key, value: this.current.value}; + } + const table: OrderedHashMap; + const numBuckets: int32; + const usedCapacity: int32; + current: KeyValueIndexTuple; +} + +const kOrderedHashMapNumberOfBucketsIndex: + constexpr int32 generates 'OrderedHashMap::NumberOfBucketsIndex()'; +const kOrderedHashMapNumberOfElementsIndex: + constexpr int32 generates 'OrderedHashMap::NumberOfElementsIndex()'; +const kOrderedHashMapNumberOfDeletedElementsIndex: constexpr int32 + generates 'OrderedHashMap::NumberOfDeletedElementsIndex()'; + +macro NewUnmodifiedOrderedHashMapIterator(table: OrderedHashMap): + UnmodifiedOrderedHashMapIterator { + const numBuckets = + LoadOrderedHashTableMetadata(table, kOrderedHashMapNumberOfBucketsIndex); + const numElements = + LoadOrderedHashTableMetadata(table, kOrderedHashMapNumberOfElementsIndex); + const numDeleted = LoadOrderedHashTableMetadata( + table, kOrderedHashMapNumberOfDeletedElementsIndex); + const usedCapacity = numElements + numDeleted; + return UnmodifiedOrderedHashMapIterator{ + table: table, + numBuckets: numBuckets, + usedCapacity: usedCapacity, + current: KeyValueIndexTuple { + key: Undefined, value: Undefined, index: 0 + } + }; +} + @export macro LoadKeyValuePairNoSideEffects(implicit context: Context)(o: JSAny): KeyValuePair labels MayHaveSideEffects { @@ -53,4 +221,117 @@ transitioning macro LoadKeyValuePair(implicit context: Context)(o: JSAny): }; } } + +// https://tc39.es/proposal-set-methods/#sec-getsetrecord +transitioning macro GetSetRecord(implicit context: Context)( + obj: JSAny, methodName: constexpr string): SetRecord { + // 1. If obj is not an Object, throw a TypeError exception. + const obj = Cast(obj) + otherwise ThrowTypeError(MessageTemplate::kArgumentIsNonObject, methodName); + + // 2. Let rawSize be ? Get(obj, "size"). + const rawSize = GetProperty(obj, kSizeString); + + // 3. Let numSize be ? ToNumber(rawSize). + const numSize = ToNumber_Inline(rawSize); + if (NumberIsNaN(numSize)) { + // 4. NOTE: If rawSize is undefined, then numSize will be NaN. + // 5. If numSize is NaN, throw a TypeError exception. + ThrowTypeError(MessageTemplate::kSizeIsNaN); + } + + // 6. Let intSize be ! ToIntegerOrInfinity(numSize). + const intSize = ToInteger_Inline(numSize); + + // 7. Let has be ? Get(obj, "has"). + let has = GetProperty(obj, kHasString); + + // 8. If IsCallable(has) is false, throw a TypeError exception. + has = Cast(has) + otherwise ThrowCalledNonCallable(kHasString); + + // 9. Let keys be ? Get(obj, "keys"). + let keys = GetProperty(obj, kKeysString); + + // 10. If IsCallable(keys) is false, throw a TypeError exception. + keys = Cast(keys) + otherwise ThrowCalledNonCallable(kKeysString); + + // 11. Return a new Set Record { [[Set]]: obj, [[Size]]: intSize, [[Has]]: + // has, [[Keys]]: keys }. + return SetRecord{object: obj, size: intSize, has: has, keys: keys}; +} + +// https://tc39.es/proposal-set-methods/#sec-getkeysiterator +transitioning macro GetKeysIterator(implicit context: Context)( + set: JSReceiver, keys: Callable): iterator::IteratorRecord { + // 1. Let keysIter be ? Call(setRec.[[Keys]], setRec.[[Set]]). + const keysIter = Call(context, keys, set); + + // 2. If keysIter is not an Object, throw a TypeError exception. + const keysIterObj = Cast(keysIter) + otherwise ThrowTypeError(MessageTemplate::kKeysMethodInvalid); + + // 3. Let nextMethod be ? Get(keysIter, "next"). + const nextMethod = GetProperty(keysIter, kNextString); + + // 4. If IsCallable(nextMethod) is false, throw a TypeError exception. + Cast(nextMethod) + otherwise ThrowCalledNonCallable(kNextString); + + // 5. Return a new Iterator Record { [[Iterator]]: keysIter, [[NextMethod]]: + // nextMethod, [[Done]]: false }. + return iterator::IteratorRecord{object: keysIterObj, next: nextMethod}; +} + +macro CheckSetRecordHasJSSetMethods(setRecord: SetRecord): + void labels HasUserProvidedMethods { + const keys = + Cast(setRecord.keys) otherwise HasUserProvidedMethods; + const has = Cast(setRecord.has) otherwise HasUserProvidedMethods; + if (!(TaggedEqual( + keys.shared_function_info.function_data, + SmiConstant(kSetPrototypeValues)) && + TaggedEqual( + has.shared_function_info.function_data, + SmiConstant(kSetPrototypeHas)))) + goto HasUserProvidedMethods; +} + +macro CheckSetRecordHasJSMapMethods(setRecord: SetRecord): + void labels HasUserProvidedMethods { + const keys = + Cast(setRecord.keys) otherwise HasUserProvidedMethods; + const has = Cast(setRecord.has) otherwise HasUserProvidedMethods; + if (!(TaggedEqual( + keys.shared_function_info.function_data, + SmiConstant(kMapPrototypeKeys)) && + TaggedEqual( + has.shared_function_info.function_data, + SmiConstant(kMapPrototypeHas)))) + goto HasUserProvidedMethods; +} + +macro FastIntersect(implicit context: Context)( + collectionToIterate: OrderedHashSet, table: T, methodName: String, + resultSetData: OrderedHashSet): OrderedHashSet { + let result = Cast( + CloneFixedArray(resultSetData, ExtractFixedArrayFlag::kFixedArrays)) + otherwise unreachable; + + let iter = + collections::NewUnmodifiedOrderedHashSetIterator(collectionToIterate); + try { + while (true) { + const nextValue = iter.Next() otherwise Done; + + if (TableHasKey(table, nextValue)) { + result = AddToSetTable(result, nextValue, methodName); + } + } + } label Done { + return result; + } + unreachable; +} } diff --git a/v8/src/builtins/conversion.tq b/v8/src/builtins/conversion.tq index 076b787fe..a770d4f71 100644 --- a/v8/src/builtins/conversion.tq +++ b/v8/src/builtins/conversion.tq @@ -158,10 +158,11 @@ transitioning builtin ToObject(implicit context: Context)(input: JSAny): // ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] ) transitioning macro TryGetExoticToPrimitive(implicit context: Context)( - input: JSAny): JSAny labels OrdinaryToPrimitive { + input: JSReceiver): JSAny labels OrdinaryToPrimitive { // Look up the @@toPrimitive property. const exoticToPrimitive: JSAny = - GetProperty(input, ToPrimitiveSymbolConstant()); + GetInterestingProperty(context, input, ToPrimitiveSymbolConstant()) + otherwise OrdinaryToPrimitive; if (IsNullOrUndefined(exoticToPrimitive)) goto OrdinaryToPrimitive; return exoticToPrimitive; } diff --git a/v8/src/builtins/convert.tq b/v8/src/builtins/convert.tq index c5d235e18..36eda98f1 100644 --- a/v8/src/builtins/convert.tq +++ b/v8/src/builtins/convert.tq @@ -361,6 +361,15 @@ Convert(f: float64): float32 { Convert(n: Number): float32 { return Convert(ChangeNumberToFloat64(n)); } +Convert(n: int32): float32 { + return RoundInt32ToFloat32(n); +} +Convert(h: HeapNumber): float32 { + return Convert(LoadHeapNumberValue(h)); +} +Convert(d: float32): Number { + return ChangeFloat32ToTagged(d); +} Convert(d: float64): Number { return ChangeFloat64ToTagged(d); } @@ -391,6 +400,7 @@ Convert(n: Number): intptr { Convert(v: int32): bint { return IntPtrToBInt(Convert(v)); } + FromConstexpr(v: constexpr IntegerLiteral): float64 { return ConstexprIntegerLiteralToFloat64(v); diff --git a/v8/src/builtins/frames.tq b/v8/src/builtins/frames.tq index 121c3bb3e..d7dc20000 100644 --- a/v8/src/builtins/frames.tq +++ b/v8/src/builtins/frames.tq @@ -35,6 +35,7 @@ type Frame = FrameWithArguments|StubFrame; extern macro LoadFramePointer(): Frame; extern macro LoadParentFramePointer(): Frame; +extern macro StackSlotPtr(constexpr int32, constexpr int32): RawPtr; // Load values from a specified frame by given offset in bytes. macro LoadObjectFromFrame(f: Frame, o: constexpr int32): Object { diff --git a/v8/src/builtins/ia32/builtins-ia32.cc b/v8/src/builtins/ia32/builtins-ia32.cc index 48f791aa6..c44861de8 100644 --- a/v8/src/builtins/ia32/builtins-ia32.cc +++ b/v8/src/builtins/ia32/builtins-ia32.cc @@ -864,9 +864,16 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, namespace { -void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array) { - __ mov_w(FieldOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset), - Immediate(0)); +void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi) { + __ mov_w(FieldOperand(sfi, SharedFunctionInfo::kAgeOffset), Immediate(0)); +} + +void ResetJSFunctionAge(MacroAssembler* masm, Register js_function, + Register scratch) { + const Register shared_function_info(scratch); + __ Move(shared_function_info, + FieldOperand(js_function, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, shared_function_info); } void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, @@ -967,6 +974,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Get the bytecode array from the function object and load it into // kInterpreterBytecodeArrayRegister. __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, eax); __ mov(kInterpreterBytecodeArrayRegister, FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset)); GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, eax); @@ -981,8 +989,6 @@ void Builtins::Generate_InterpreterEntryTrampoline( AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); } - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); - // Push bytecode array. __ push(kInterpreterBytecodeArrayRegister); // Push Smi tagged initial bytecode array offset. @@ -1604,6 +1610,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kClosure); DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); DCHECK_EQ(callee_js_function, kJSFunctionRegister); + ResetJSFunctionAge(masm, callee_js_function, scratch); __ Push(callee_js_function); // Callee's JS function. __ Push(saved_arg_count, scratch); // Push actual argument count. @@ -1611,7 +1618,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecode_array = scratch; __ movd(bytecode_array, saved_bytecode_array); - ResetBytecodeAge(masm, bytecode_array); __ Push(bytecode_array); // Baseline code frames store the feedback vector where interpreter would @@ -2993,6 +2999,129 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_NewGenericJSToWasmWrapper(MacroAssembler* masm) { + __ EnterFrame(StackFrame::JS_TO_WASM); + Register wrapper_buffer = + WasmNewJSToWasmWrapperDescriptor::WrapperBufferRegister(); + // Push the wrapper_buffer stack, it's needed later for the results. + __ push(wrapper_buffer); + Register result_size = eax; + __ mov(result_size, + MemOperand( + wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferStackReturnBufferSize)); + __ shl(result_size, kSystemPointerSizeLog2); + __ sub(esp, result_size); + __ mov(MemOperand( + wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferStackReturnBufferStart), + esp); + Register params_start = eax; + __ mov(params_start, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferParamStart)); + Register params_end = esi; + __ mov(params_end, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferParamEnd)); + Register call_target = edi; + __ mov(call_target, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferCallTarget)); + + Register last_stack_param = ecx; + + // The first GP parameter is the instance, which we handle specially. + int stack_params_offset = + (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + + arraysize(wasm::kFpParamRegisters) * kDoubleSize; + int param_padding = stack_params_offset & kSystemPointerSize; + stack_params_offset += param_padding; + __ lea(last_stack_param, MemOperand(params_start, stack_params_offset)); + + Label loop_start; + __ bind(&loop_start); + + Label finish_stack_params; + __ cmp(last_stack_param, params_end); + __ j(greater_equal, &finish_stack_params); + + // Push parameter + __ sub(params_end, Immediate(kSystemPointerSize)); + __ push(MemOperand(params_end, 0)); + __ jmp(&loop_start); + + __ bind(&finish_stack_params); + + int next_offset = stack_params_offset; + for (size_t i = arraysize(wasm::kFpParamRegisters) - 1; + i < arraysize(wasm::kFpParamRegisters); --i) { + next_offset -= kDoubleSize; + __ Movsd(wasm::kFpParamRegisters[i], MemOperand(params_start, next_offset)); + } + + // Set the flag-in-wasm flag before loading the parameter registers. There are + // not so many registers, so we use one of the parameter registers before it + // is blocked. + Register thread_in_wasm_flag_addr = ecx; + __ mov( + thread_in_wasm_flag_addr, + MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); + __ mov(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1)); + + next_offset -= param_padding; + for (size_t i = arraysize(wasm::kGpParamRegisters) - 1; i > 0; --i) { + next_offset -= kSystemPointerSize; + __ mov(wasm::kGpParamRegisters[i], MemOperand(params_start, next_offset)); + } + DCHECK_EQ(next_offset, 0); + // Since there are so few registers, {params_start} overlaps with one of the + // parameter registers. Make sure it overlaps with the last one we fill. + DCHECK_EQ(params_start, wasm::kGpParamRegisters[1]); + + __ mov(kWasmInstanceRegister, + MemOperand(ebp, JSToWasmWrapperConstants::kInstanceOffset)); + + __ call(call_target); + + __ mov( + thread_in_wasm_flag_addr, + MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); + __ mov(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(0)); + thread_in_wasm_flag_addr = no_reg; + + wrapper_buffer = esi; + __ mov(wrapper_buffer, MemOperand(ebp, -2 * kSystemPointerSize)); + + __ Movsd( + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferFPReturnRegister1), + wasm::kFpReturnRegisters[0]); + __ Movsd( + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferFPReturnRegister2), + wasm::kFpReturnRegisters[1]); + __ mov(MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferGPReturnRegister1), + wasm::kGpReturnRegisters[0]); + __ mov(MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferGPReturnRegister2), + wasm::kGpReturnRegisters[1]); + + // Call the return value builtin with + // eax: wasm instance. + // ecx: the result JSArray for multi-return. + // edx: pointer to the wrapper buffer which contains all parameters. + __ mov(eax, MemOperand(ebp, JSToWasmWrapperConstants::kInstanceOffset)); + __ mov(ecx, MemOperand(ebp, JSToWasmWrapperConstants::kResultArrayOffset)); + __ mov(edx, wrapper_buffer); + __ Call(BUILTIN_CODE(masm->isolate(), JSToWasmHandleReturns), + RelocInfo::CODE_TARGET); + + __ LeaveFrame(StackFrame::JS_TO_WASM); + __ ret(0); +} + void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { // TODO(v8:12191): Implement for this platform. __ Trap(); @@ -3261,7 +3390,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { namespace { -// Generates an Operand for saving parameters after PrepareCallApiFunction. +// Generates an Operand for saving parameters after EnterExitFrame. Operand ApiParameterOperand(int index) { return Operand(esp, index * kSystemPointerSize); } @@ -3272,20 +3401,6 @@ Operand ExitFrameCallerStackSlotOperand(int index) { kSystemPointerSize); } -// Prepares stack to put arguments (aligns and so on). Reserves -// space for return value if needed (assumes the return value is a handle). -// Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1) -// etc. Saves context (esi). If space was reserved for return value then -// stores the pointer to the reserved slot into esi. -void PrepareCallApiFunction(MacroAssembler* masm, int extra_slots, - Register c_function) { - ASM_CODE_COMMENT(masm); - __ EnterExitFrame(extra_slots, StackFrame::EXIT, c_function); - if (v8_flags.debug_code) { - __ mov(esi, Immediate(base::bit_cast(kZapValue))); - } -} - // Calls an API function. Allocates HandleScope, extracts returned value // from handle and propagates exceptions. Clobbers esi, edi and caller-saved // registers. Restores context. On return removes @@ -3447,13 +3562,20 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, } // namespace -void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { +void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, + CallApiCallbackMode mode) { // ----------- S t a t e ------------- - // -- esi : context + // CallApiCallbackMode::kGeneric mode: + // -- ecx : arguments count (not including the receiver) + // -- edx : call handler info + // -- edi : holder + // CallApiCallbackMode::kNoSideEffects/kWithSideEffectsSideEffects modes: // -- eax : api function address // -- ecx : arguments count (not including the receiver) // -- edx : call data // -- edi : holder + // Both modes: + // -- esi : context // -- esp[0] : return address // -- esp[8] : argument 0 (receiver) // -- esp[16] : argument 1 @@ -3462,12 +3584,30 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // -- esp[(argc + 1) * 8] : argument argc // ----------------------------------- - Register api_function_address = eax; - Register argc = ecx; - Register call_data = edx; - Register holder = edi; - - DCHECK(!AreAliased(api_function_address, argc, call_data, holder)); + Register api_function_address = no_reg; + Register argc = no_reg; + Register call_data = no_reg; + Register callback = no_reg; + Register holder = no_reg; + + switch (mode) { + case CallApiCallbackMode::kGeneric: + api_function_address = eax; + argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); + callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + holder = CallApiCallbackGenericDescriptor::HolderRegister(); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + api_function_address = + CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister(); + argc = CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister(); + call_data = CallApiCallbackOptimizedDescriptor::CallDataRegister(); + holder = CallApiCallbackOptimizedDescriptor::HolderRegister(); + break; + } + DCHECK(!AreAliased(api_function_address, argc, call_data, callback, holder)); using FCI = FunctionCallbackInfo; using FCA = FunctionCallbackArguments; @@ -3501,7 +3641,16 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { __ PopReturnAddressTo(argc); __ PushRoot(RootIndex::kUndefinedValue); // kNewTarget - __ Push(call_data); + switch (mode) { + case CallApiCallbackMode::kGeneric: + __ push(FieldOperand(callback, CallHandlerInfo::kDataOffset)); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + __ Push(call_data); + break; + } __ PushRoot(RootIndex::kUndefinedValue); // kReturnValue __ Push(Smi::zero()); // kUnused __ Push(Immediate(ExternalReference::isolate_address(masm->isolate()))); @@ -3510,11 +3659,6 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // We use it below to set up the FunctionCallbackInfo object. __ mov(holder, esp); - __ PushReturnAddressFrom(argc); - - // Reload argc from xmm0. - __ movd(argc, xmm0); - // The API function takes v8::FunctionCallbackInfo reference, allocate it // in non-GCed space of the exit frame. static constexpr int kApiArgc = 1; @@ -3522,8 +3666,45 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // from the stack after the callback in non-GCed space of the exit frame. static constexpr int kApiStackSpace = 4; static_assert((kApiStackSpace - 1) * kSystemPointerSize == sizeof(FCI)); + const int exit_frame_params_size = + mode == CallApiCallbackMode::kGeneric ? 2 : 0; + + if (mode == CallApiCallbackMode::kGeneric) { + ASM_CODE_COMMENT_STRING(masm, "Push API_CALLBACK_EXIT frame arguments"); + // Reload argc from xmm0. + __ movd(api_function_address, xmm0); + + // Argc parameter as a Smi. + static_assert(ApiCallbackExitFrameConstants::kArgcOffset == + 3 * kSystemPointerSize); + __ SmiTag(api_function_address); + __ Push(api_function_address); + + // Target parameter. + static_assert(ApiCallbackExitFrameConstants::kTargetOffset == + 2 * kSystemPointerSize); + __ push(FieldOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); + + __ PushReturnAddressFrom(argc); + + __ mov(api_function_address, + FieldOperand(callback, + CallHandlerInfo::kMaybeRedirectedCallbackOffset)); + + __ EnterExitFrame(kApiArgc + kApiStackSpace, StackFrame::API_CALLBACK_EXIT, + api_function_address); + } else { + __ PushReturnAddressFrom(argc); + __ EnterExitFrame(kApiArgc + kApiStackSpace, StackFrame::EXIT, + api_function_address); + } - PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace, api_function_address); + if (v8_flags.debug_code) { + __ mov(esi, Immediate(base::bit_cast(kZapValue))); + } + + // Reload argc from xmm0. + __ movd(argc, xmm0); { ASM_CODE_COMMENT_STRING(masm, "Initialize FunctionCallbackInfo"); @@ -3543,22 +3724,24 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // We also store the number of bytes to drop from the stack after returning // from the API function here. - __ lea(scratch, - Operand(argc, times_system_pointer_size, - (FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize)); + __ lea(scratch, Operand(argc, times_system_pointer_size, + (FCA::kArgsLength + 1 /* receiver */ + + exit_frame_params_size) * + kSystemPointerSize)); __ mov(ApiParameterOperand(kApiArgc + 3), scratch); __ RecordComment("v8::FunctionCallback's argument."); __ lea(scratch, ApiParameterOperand(kApiArgc + 0)); __ mov(ApiParameterOperand(0), scratch); - ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(mode); // Pass api function address to thunk wrapper in case profiler or side-effect // checking is enabled. Register thunk_arg = api_function_address; - Operand return_value_operand = - ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex); + Operand return_value_operand = ExitFrameCallerStackSlotOperand( + FCA::kReturnValueIndex + exit_frame_params_size); static constexpr int kUseStackSpaceOperand = 0; Operand stack_space_operand = ApiParameterOperand(kApiArgc + 3); @@ -3611,7 +3794,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { // esp[7 * kSystemPointerSize]: kDataIndex // esp[8 * kSystemPointerSize]: kThisIndex / receiver - __ pop(scratch); // Pop return address to extend the frame. + __ PopReturnAddressTo(scratch); __ push(receiver); __ push(FieldOperand(callback, AccessorInfo::kDataOffset)); __ PushRoot(RootIndex::kUndefinedValue); // ReturnValue @@ -3625,7 +3808,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ mov(args_array, esp); __ push(FieldOperand(callback, AccessorInfo::kNameOffset)); - __ push(scratch); // Restore return address. + __ PushReturnAddressFrom(scratch); Register api_function_address = ReassignRegister(receiver); __ RecordComment("Load function_address"); @@ -3644,7 +3827,11 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { static constexpr int kApiStackSpace = 1; static_assert(kApiStackSpace * kSystemPointerSize == sizeof(PCI)); - PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace, api_function_address); + __ EnterExitFrame(kApiArgc + kApiStackSpace, StackFrame::EXIT, + api_function_address); + if (v8_flags.debug_code) { + __ mov(esi, Immediate(base::bit_cast(kZapValue))); + } __ RecordComment("Create v8::PropertyCallbackInfo object on the stack."); // Initialize its args_ field. @@ -4377,7 +4564,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ pop(kInterpreterAccumulatorRegister); if (is_osr) { - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); + __ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset)); + ResetJSFunctionAge(masm, closure, closure); Generate_OSREntry(masm, code_obj); } else { __ jmp(code_obj); diff --git a/v8/src/builtins/iterator-helpers.tq b/v8/src/builtins/iterator-helpers.tq index 4b8ecbfc1..30254a6ca 100644 --- a/v8/src/builtins/iterator-helpers.tq +++ b/v8/src/builtins/iterator-helpers.tq @@ -143,7 +143,7 @@ transitioning javascript builtin IteratorPrototypeMap( // 3. If IsCallable(mapper) is false, throw a TypeError exception. const mapper = Cast(mapper) - otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, methodName); + otherwise ThrowCalledNonCallable(methodName); // 4. Let iterated be ? GetIteratorDirect(O). const iterated = GetIteratorDirect(o); @@ -239,7 +239,7 @@ transitioning javascript builtin IteratorPrototypeFilter( // 3. If IsCallable(predicate) is false, throw a TypeError exception. const predicate = Cast(predicate) - otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, methodName); + otherwise ThrowCalledNonCallable(methodName); // 4. Let iterated be ? GetIteratorDirect(O). const iterated = GetIteratorDirect(o); @@ -679,7 +679,7 @@ transitioning javascript builtin IteratorPrototypeReduce( // 3. If IsCallable(reducer) is false, throw a TypeError exception. const reducer = Cast(arguments[0]) - otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, methodName); + otherwise ThrowCalledNonCallable(methodName); // 4. Let iterated be ? GetIteratorDirect(O). const iterated = GetIteratorDirect(o); @@ -798,7 +798,7 @@ transitioning javascript builtin IteratorPrototypeForEach( // 3. If IsCallable(fn) is false, throw a TypeError exception. const fn = Cast(fn) - otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, methodName); + otherwise ThrowCalledNonCallable(methodName); // 4. Let iterated be ? GetIteratorDirect(O). const iterated = GetIteratorDirect(o); @@ -854,7 +854,7 @@ transitioning javascript builtin IteratorPrototypeSome( // 3. If IsCallable(predicate) is false, throw a TypeError exception. const predicate = Cast(predicate) - otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, methodName); + otherwise ThrowCalledNonCallable(methodName); // 4. Let iterated be ? GetIteratorDirect(O). const iterated = GetIteratorDirect(o); @@ -918,7 +918,7 @@ transitioning javascript builtin IteratorPrototypeEvery( // 3. If IsCallable(predicate) is false, throw a TypeError exception. const predicate = Cast(predicate) - otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, methodName); + otherwise ThrowCalledNonCallable(methodName); // 4. Let iterated be ? GetIteratorDirect(O). const iterated = GetIteratorDirect(o); @@ -982,7 +982,7 @@ transitioning javascript builtin IteratorPrototypeFind( // 3. If IsCallable(predicate) is false, throw a TypeError exception. const predicate = Cast(predicate) - otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, methodName); + otherwise ThrowCalledNonCallable(methodName); // 4. Let iterated be ? GetIteratorDirect(O). const iterated = GetIteratorDirect(o); diff --git a/v8/src/builtins/js-to-wasm.tq b/v8/src/builtins/js-to-wasm.tq new file mode 100644 index 000000000..4a28e9f07 --- /dev/null +++ b/v8/src/builtins/js-to-wasm.tq @@ -0,0 +1,830 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/wasm/wasm-linkage.h' + +namespace runtime { +extern runtime WasmGenericJSToWasmObject( + Context, WasmInstanceObject, JSAny, Smi): JSAny; +extern runtime WasmGenericWasmToJSObject(Context, Object): JSAny; +extern runtime WasmCompileWrapper(NoContext, WasmExportedFunctionData): JSAny; +} // namespace runtime + +namespace wasm { +extern builtin NewGenericJSToWasmWrapper( + RawPtr, WasmInstanceObject, JSAny): JSAny; + +extern enum ExternalPointerTag extends uint64 { + kWasmExportedFunctionDataSignatureTag, + ... +} + +extern macro UniqueIntPtrConstant(constexpr intptr): intptr; + +extern macro LoadExternalPointerFromObject( + HeapObject, constexpr int32, constexpr ExternalPointerTag): RawPtr; + +const kWasmExportedFunctionDataSignatureOffset: + constexpr int32 generates 'WasmExportedFunctionData::kSigOffset'; + +const kWasmReturnCountOffset: + constexpr intptr generates 'wasm::FunctionSig::kReturnCountOffset'; + +const kWasmParameterCountOffset: constexpr intptr + generates 'wasm::FunctionSig::kParameterCountOffset'; + +const kWasmSigTypesOffset: + constexpr intptr generates 'wasm::FunctionSig::kRepsOffset'; + +// This constant should only be loaded as a `UniqueIntPtrConstant` to avoid +// problems with PGO. +// `- 1` because of the instance parameter. +const kNumGPRegisterParameters: + constexpr intptr generates 'arraysize(wasm::kGpParamRegisters) - 1'; + +// This constant should only be loaded as a `UniqueIntPtrConstant` to avoid +// problems with PGO. +const kNumFPRegisterParameters: + constexpr intptr generates 'arraysize(wasm::kFpParamRegisters)'; + +const kNumGPRegisterReturns: + constexpr intptr generates 'arraysize(wasm::kGpReturnRegisters)'; + +const kNumFPRegisterReturns: + constexpr intptr generates 'arraysize(wasm::kFpReturnRegisters)'; + +const kWasmI32Type: + constexpr int32 generates 'wasm::kWasmI32.raw_bit_field()'; +const kWasmI64Type: + constexpr int32 generates 'wasm::kWasmI64.raw_bit_field()'; +const kWasmF32Type: + constexpr int32 generates 'wasm::kWasmF32.raw_bit_field()'; +const kWasmF64Type: + constexpr int32 generates 'wasm::kWasmF64.raw_bit_field()'; + +extern enum ValueKind extends int32 constexpr 'wasm::ValueKind' { + kRef, + kRefNull, + ... +} + +extern enum HeapType extends int32 +constexpr 'wasm::HeapType::Representation' { + kExtern, + kNoExtern, + kString, + kEq, + kI31, + kStruct, + kArray, + kAny, + kNone, + kNoFunc, + ... +} + +const kWrapperBufferReturnCount: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferReturnCount'; +const kWrapperBufferRefReturnCount: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferRefReturnCount'; +const kWrapperBufferSigRepresentationArray: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferSigRepresentationArray' + ; +const kWrapperBufferStackReturnBufferSize: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferStackReturnBufferSize' + ; +const kWrapperBufferCallTarget: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferCallTarget'; +const kWrapperBufferParamStart: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferParamStart'; +const kWrapperBufferParamEnd: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferParamEnd'; +const kWrapperBufferStackReturnBufferStart: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferStackReturnBufferStart' + ; +const kWrapperBufferFPReturnRegister1: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferFPReturnRegister1'; +const kWrapperBufferFPReturnRegister2: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferFPReturnRegister2'; +const kWrapperBufferGPReturnRegister1: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferGPReturnRegister1'; +const kWrapperBufferGPReturnRegister2: constexpr intptr + generates 'JSToWasmWrapperConstants::kWrapperBufferGPReturnRegister2'; +const kWrapperBufferSize: constexpr int32 + generates 'JSToWasmWrapperConstants::kWrapperBufferSize'; + +const kValueTypeKindBits: constexpr int32 + generates 'wasm::ValueType::kKindBits'; +const kValueTypeKindBitsMask: constexpr int32 + generates 'wasm::kWasmValueKindBitsMask'; +const kValueTypeHeapTypeMask: constexpr int32 + generates 'wasm::kWasmHeapTypeBitsMask'; + +macro Bitcast(i: From): To { + return i; +} + +extern macro BitcastFloat32ToInt32(float32): uint32; + +Bitcast(v: float32): uint32 { + return BitcastFloat32ToInt32(v); +} + +macro RefCast(i: &intptr): + &To { + return torque_internal::unsafe::NewReference(i.object, i.offset); +} + +macro TrucateBigIntToI64(context: Context, input: JSAny): intptr { + // This is only safe to use on 64-bit platforms. + dcheck(Is64()); + const bigint = ToBigInt(context, input); + + if (bigint::ReadBigIntLength(bigint) == 0) { + return 0; + } + + const digit = bigint::LoadBigIntDigit(bigint, 0); + if (bigint::ReadBigIntSign(bigint) == bigint::kPositiveSign) { + // Note that even though the bigint is positive according to its sign, the + // result of `Signed(digit)` can be negative if the most significant bit is + // set. This is intentional and follows the specification of `ToBigInt64()`. + return Signed(digit); + } + return 0 - Signed(digit); +} + +@export +struct Int64AsInt32Pair { + low: uintptr; + high: uintptr; +} + +// This is only safe to use on 32-bit platforms. +extern macro BigIntToRawBytes(BigInt): Int64AsInt32Pair; + +extern macro PopAndReturn(intptr, JSAny): never; + +// The ReturnSlotAllocator calculates the size of the space needed on the stack +// for return values. +struct ReturnSlotAllocator { + macro AllocStack(): void { + if constexpr (Is64()) { + this.stackSlots++; + } else { + if (this.hasSmallSlot) { + this.hasSmallSlot = false; + this.smallSlotLast = false; + } else { + this.stackSlots += 2; + this.hasSmallSlot = true; + this.smallSlotLast = true; + } + } + return; + } + + macro AllocGP(): void { + if (this.remainingGPRegs > 0) { + this.remainingGPRegs--; + return; + } + this.AllocStack(); + } + + macro AllocFP32(): void { + if (this.remainingFPRegs > 0) { + this.remainingFPRegs--; + return; + } + this.AllocStack(); + } + + macro AllocFP64(): void { + if (this.remainingFPRegs > 0) { + this.remainingFPRegs--; + return; + } + if constexpr (Is64()) { + this.stackSlots++; + } else { + this.stackSlots += 2; + this.smallSlotLast = false; + } + } + + macro GetSize(): intptr { + if (this.smallSlotLast) { + return this.stackSlots - 1; + } else { + return this.stackSlots; + } + } + + remainingGPRegs: intptr; + remainingFPRegs: intptr; + // Even on 32-bit platforms we always allocate 64-bit stack space at a time to + // preserve alignment. If we allocate a 64-bit slot for a 32-bit type, then we + // remember the second half of the 64-bit slot as `smallSlot` so that it can + // be used for the next 32-bit type. + hasSmallSlot: bool; + // If the {smallSlot} is in the middle of the whole allocated stack space, + // then it is part of the overall stack space size. However, if the hole is at + // the border of the whole allocated stack space, then we have to subtract it + // from the overall stack space size. This flag keeps track of whether the + // hole is in the middle (false) or at the border (true). + smallSlotLast: bool; + stackSlots: intptr; +} + +macro NewReturnSlotAllocator(): ReturnSlotAllocator { + let result: ReturnSlotAllocator; + result.remainingGPRegs = kNumGPRegisterReturns; + result.remainingFPRegs = kNumFPRegisterReturns; + result.stackSlots = 0; + result.hasSmallSlot = false; + result.smallSlotLast = false; + return result; +} + +struct LocationAllocator { + macro GetStackSlot(): &intptr { + if constexpr (Is64()) { + const result = torque_internal::unsafe::NewReference( + this.object, this.nextStack); + this.nextStack += torque_internal::SizeOf(); + return result; + } else { + if (this.smallSlot != 0) { + const result = torque_internal::unsafe::NewReference( + this.object, this.smallSlot); + this.smallSlot = 0; + this.smallSlotLast = false; + return result; + } + const result = torque_internal::unsafe::NewReference( + this.object, this.nextStack); + this.smallSlot = this.nextStack + torque_internal::SizeOf(); + this.nextStack = this.smallSlot + torque_internal::SizeOf(); + this.smallSlotLast = true; + return result; + } + } + + macro GetGPSlot(): &intptr { + if (this.remainingGPRegs-- > 0) { + const result = torque_internal::unsafe::NewReference( + this.object, this.nextGPReg); + this.nextGPReg += torque_internal::SizeOf(); + return result; + } + return this.GetStackSlot(); + } + + macro GetFP32Slot(): &intptr { + if (this.remainingFPRegs-- > 0) { + const result = torque_internal::unsafe::NewReference( + this.object, this.nextFPReg); + this.nextFPReg += torque_internal::SizeOf(); + return result; + } + return this.GetStackSlot(); + } + + macro GetFP64Slot(): &intptr { + if (this.remainingFPRegs-- > 0) { + const result = torque_internal::unsafe::NewReference( + this.object, this.nextFPReg); + this.nextFPReg += torque_internal::SizeOf(); + return result; + } + if constexpr (Is64()) { + return this.GetStackSlot(); + } else { + const result = torque_internal::unsafe::NewReference( + this.object, this.nextStack); + this.nextStack = this.nextStack + 2 * torque_internal::SizeOf(); + this.smallSlotLast = false; + return result; + } + } + + // For references we start a new section on the stack, no old slots are + // filled. + macro StartRefs(): void { + if (!this.smallSlotLast) { + this.smallSlot = 0; + } + } + + macro GetStackEnd(): RawPtr { + let offset = this.nextStack; + if (this.smallSlotLast) { + offset -= torque_internal::SizeOf(); + } + return torque_internal::unsafe::GCUnsafeReferenceToRawPtr( + this.object, offset); + } + + object: HeapObject|TaggedZeroPattern; + remainingGPRegs: intptr; + remainingFPRegs: intptr; + nextGPReg: intptr; + nextFPReg: intptr; + nextStack: intptr; + // Even on 32-bit platforms we always allocate 64-bit stack space at a time to + // preserve alignment. If we allocate a 64-bit slot for a 32-bit type, then we + // remember the second half of the 64-bit slot as `smallSlot` so that it can + // be used for the next 32-bit type. + smallSlot: intptr; + // If the {smallSlot} is in the middle of the whole allocated stack space, + // then it is part of the overall stack space size. However, if the hole is at + // the border of the whole allocated stack space, then we have to subtract it + // from the overall stack space size. This flag keeps track of whether the + // hole is in the middle (false) or at the border (true). + smallSlotLast: bool; +} + +macro LocationAllocatorForParams(paramBuffer: MutableSlice): + LocationAllocator { + let slotsPerFloat: intptr; + if constexpr (Is64()) { + slotsPerFloat = 1; + } else { + slotsPerFloat = 2; + } + let result: LocationAllocator; + result.object = paramBuffer.object; + result.remainingGPRegs = UniqueIntPtrConstant(kNumGPRegisterParameters); + result.remainingFPRegs = UniqueIntPtrConstant(kNumFPRegisterParameters); + result.nextGPReg = paramBuffer.offset; + result.nextFPReg = result.remainingGPRegs * torque_internal::SizeOf(); + if constexpr (!Is64()) { + // Add padding to provide 8-byte alignment for float64 values. + result.nextFPReg += (result.nextFPReg & torque_internal::SizeOf()); + } + dcheck(result.nextFPReg % 8 == 0); + result.nextFPReg += paramBuffer.offset; + result.nextStack = result.nextFPReg + + result.remainingFPRegs * slotsPerFloat * + torque_internal::SizeOf(); + result.smallSlot = 0; + result.smallSlotLast = false; + return result; +} + +macro LocationAllocatorForReturns( + gpRegs: RawPtr, fpRegs: RawPtr, stack: RawPtr): LocationAllocator { + let result: LocationAllocator; + result.object = kZeroBitPattern; + result.remainingGPRegs = kNumGPRegisterReturns; + result.remainingFPRegs = kNumFPRegisterReturns; + result.nextGPReg = Convert(gpRegs) + kHeapObjectTag; + result.nextFPReg = Convert(fpRegs) + kHeapObjectTag; + result.nextStack = Convert(stack) + kHeapObjectTag; + result.smallSlot = 0; + result.smallSlotLast = false; + return result; +} + +transitioning javascript builtin +JSToWasmWrapper( + js-implicit context: NativeContext, receiver: JSAny, + target: JSFunction)(...arguments): JSAny { + const functionData = UnsafeCast( + target.shared_function_info.function_data); + + // Trigger a wrapper tier-up when this function got called often enough. + functionData.wrapper_budget = functionData.wrapper_budget - 1; + if (functionData.wrapper_budget == 0) { + runtime::WasmCompileWrapper(kNoContext, functionData); + } + // TODO(ahaas): Define an `extern operator` for the signature the same as it + // exists for `call_target_ptr`. + const sig = LoadExternalPointerFromObject( + functionData, kWasmExportedFunctionDataSignatureOffset, + ExternalPointerTag::kWasmExportedFunctionDataSignatureTag); + + const paramCount = *GetRefAt(sig, kWasmParameterCountOffset); + + const returnCount = *GetRefAt(sig, kWasmReturnCountOffset); + + const reps = *GetRefAt(sig, kWasmSigTypesOffset); + + const sigTypes = torque_internal::unsafe::NewOffHeapConstSlice( + %RawDownCast>(reps), + Convert(paramCount + returnCount)); + + // If the return count is greater than 1, then the return values are returned + // as a JSArray. After returning from the call to wasm, the return values are + // stored on an area of the stack the GC does not know about. To avoid a GC + // while references are still stored in this area of the stack, we allocate + // the result JSArray already now before the call to wasm. + let resultArray: JSAny = Undefined; + let returnSize: intptr = 0; + let hasRefReturns: bool = false; + if (returnCount > 1) { + resultArray = WasmAllocateJSArray(Convert(returnCount)); + + // We have to calculate the size of the stack area where the wasm function + // will store the return values for multi-return. + const returnTypes = + Subslice(sigTypes, Convert(0), Convert(returnCount)) + otherwise unreachable; + let retIt = returnTypes.Iterator(); + let allocator = NewReturnSlotAllocator(); + + while (!retIt.Empty()) { + const retType = retIt.NextNotEmpty(); + if (retType == kWasmI32Type) { + allocator.AllocGP(); + } else if (retType == kWasmI64Type) { + allocator.AllocGP(); + if constexpr (!Is64()) { + // On 32-bit platforms I64 values are stored as two I32 values. + allocator.AllocGP(); + } + } else if (retType == kWasmF32Type) { + allocator.AllocFP32(); + } else if (retType == kWasmF64Type) { + allocator.AllocFP64(); + } else { + // Also check if there are any reference return values, as this allows + // us to skip code when we process return values. + hasRefReturns = true; + allocator.AllocGP(); + } + } + returnSize = allocator.GetSize(); + } + + const paramTypes = Subslice( + sigTypes, Convert(returnCount), Convert(paramCount)) + otherwise unreachable; + + let paramBuffer: MutableSlice; + + // 10 here is an arbitrary number. The analysis of signatures of exported + // functions of big modules showed that most signatures have a low number of + // I32 parameters. We picked a cutoff point where for most signatures the + // pre-allocated stack slots are sufficient without making these stack slots + // overly big. + if (paramCount <= 10) { + // Performance optimization: we pre-allocate a stack area with 18 + // 8-byte slots, and use this area when it is sufficient for all + // parameters. If the stack area is too small, we allocate a byte array + // below. The stack area is big enough for 10 parameters. The 10 parameters + // need 18 * 8 bytes because some segments of the stack area are reserved + // for register parameters, and there may e.g. be no FP parameters passed + // by register, so all 8 FP register slots would remain empty. + const stackSlots = %RawDownCast>( + StackSlotPtr(144, torque_internal::SizeOf())); + // The size of the slice is set to 36, which is correct on 32-bit platforms. + // It does not matter though, because the size field never gets read + // afterwards. + paramBuffer = + torque_internal::unsafe::NewOffHeapMutableSlice(stackSlots, 36); + } else { + // We have to estimate the size of the byte array such that it can store + // all converted parameters. The size is the sum of sizes of the segments + // for the gp registers, fp registers, and stack slots. The sizes of + // the register segments are fixed, but for the size of the stack segment + // we have to guess the number of parameters on the stack. On ia32 it can + // happen that only a single parameter fits completely into a register, and + // all other parameters end up at least partially on the stack (e.g. for a + // signature with only I64 parameters). To make the calculation simpler, we + // just assume that all parameters are on the stack. + const kSlotSize: intptr = torque_internal::SizeOf(); + const bufferSize = UniqueIntPtrConstant(kNumGPRegisterParameters) * + Convert(torque_internal::SizeOf()) + + UniqueIntPtrConstant(kNumFPRegisterParameters) * kSlotSize + + Convert(paramCount) * kSlotSize; + const slice = &AllocateByteArray(Convert(bufferSize)).bytes; + paramBuffer = torque_internal::unsafe::NewMutableSlice( + slice.object, slice.offset, slice.length); + } + + let locationAllocator = LocationAllocatorForParams(paramBuffer); + let hasRefParam: bool = false; + + const items: Arguments = arguments; + for (let k: int32 = 0; k < paramCount; k++) { + const param = items[Convert(k)]; + const paramType = *paramTypes.UncheckedAtIndex(Convert(k)); + if (paramType == kWasmI32Type) { + let toRef = locationAllocator.GetGPSlot(); + typeswitch (param) { + case (smiParam: Smi): { + *toRef = Convert(Unsigned(SmiToInt32(smiParam))); + } + case (heapParam: JSAnyNotSmi): { + *toRef = + Convert(Unsigned(WasmTaggedNonSmiToInt32(heapParam))); + } + } + } else if (paramType == kWasmF32Type) { + let toRef = locationAllocator.GetFP32Slot(); + *toRef = Convert(Bitcast(WasmTaggedToFloat32(param))); + } else if (paramType == kWasmF64Type) { + let toRef = locationAllocator.GetFP64Slot(); + *RefCast(toRef) = ChangeTaggedToFloat64(param); + } else if (paramType == kWasmI64Type) { + if constexpr (Is64()) { + let toRef = locationAllocator.GetGPSlot(); + const v = TrucateBigIntToI64(context, param); + *toRef = v; + } else { + let toLowRef = locationAllocator.GetGPSlot(); + let toHighRef = locationAllocator.GetGPSlot(); + const bigIntVal = ToBigInt(context, param); + const pair = BigIntToRawBytes(bigIntVal); + *toLowRef = Signed(pair.low); + *toHighRef = Signed(pair.high); + } + } else { + // The byte array where we store converted parameters is not GC-safe. + // Therefore we can only copy references into this array once no GC can + // happen anymore. Any conversion of a primitive type can execute + // arbitrary JavaScript code and therefore also trigger GC. Therefore + // references get copied into the array only after all parameters of + // primitive types are finished. + hasRefParam = true; + } + } + if (hasRefParam) { + // Iterate over all parameters again and handle all those with ref types. + const items: Arguments = arguments; + let k: int32 = 0; + locationAllocator.StartRefs(); + // We are not using a `for` loop here because Torque does not support + // `continue` in `for` loops. + while (k < paramCount) { + const paramType = *paramTypes.UncheckedAtIndex(Convert(k)); + const paramKind = paramType & kValueTypeKindBitsMask; + if (paramKind != ValueKind::kRef && paramKind != ValueKind::kRefNull) { + k++; + continue; + } + let toRef = locationAllocator.GetGPSlot(); + + const param = items[Convert(k)]; + const heapType = + (paramType >> kValueTypeKindBits) & kValueTypeHeapTypeMask; + if (heapType == HeapType::kExtern || heapType == HeapType::kNoExtern) { + if (paramKind == ValueKind::kRef && param == Null) { + ThrowTypeError(MessageTemplate::kWasmTrapJSTypeError); + } + *toRef = BitcastTaggedToWord(UnsafeCast(param)); + } else if (heapType == HeapType::kString) { + if (TaggedIsSmi(param)) { + ThrowTypeError(MessageTemplate::kWasmTrapJSTypeError); + } else if (param == Null) { + if (paramKind == ValueKind::kRef) { + ThrowTypeError(MessageTemplate::kWasmTrapJSTypeError); + } else { + *toRef = BitcastTaggedToWord(kWasmNull); + } + } else { + if (IsString(UnsafeCast(param))) { + *toRef = BitcastTaggedToWord(UnsafeCast(param)); + } else { + ThrowTypeError(MessageTemplate::kWasmTrapJSTypeError); + } + } + } else { + const convertedParam = runtime::WasmGenericJSToWasmObject( + context, functionData.instance, param, Convert(paramType)); + *toRef = BitcastTaggedToWord(UnsafeCast(convertedParam)); + } + k++; + } + } + const paramStart = paramBuffer.GCUnsafeStartPointer(); + const paramEnd = locationAllocator.GetStackEnd(); + + const internal: WasmInternalFunction = functionData.internal; + const callTarget = internal.call_target_ptr; + const instance: WasmInstanceObject = functionData.instance; + + // We construct a state that will be passed to `NewGenericJSToWasmWrapper` + // and `JSToWasmHandleReturns`. There are too many parameters to pass + // everything through registers. The stack area also contains slots for values + // that get passed from `NewGenericJSToWasmWrapper` to + // `JSToWasmHandleReturns`. + const wrapperBuffer = %RawDownCast>( + StackSlotPtr(kWrapperBufferSize, torque_internal::SizeOf())); + + *GetRefAt(wrapperBuffer, kWrapperBufferReturnCount) = returnCount; + *GetRefAt(wrapperBuffer, kWrapperBufferRefReturnCount) = hasRefReturns; + *GetRefAt(wrapperBuffer, kWrapperBufferSigRepresentationArray) = reps; + *GetRefAt(wrapperBuffer, kWrapperBufferStackReturnBufferSize) = + returnSize; + *GetRefAt(wrapperBuffer, kWrapperBufferCallTarget) = callTarget; + *GetRefAt>(wrapperBuffer, kWrapperBufferParamStart) = + paramStart; + *GetRefAt(wrapperBuffer, kWrapperBufferParamEnd) = paramEnd; + + // Both `instance` and `resultArray` get passed separately as parameters to + // make them GC-safe. They get passed over the stack so that they get scanned + // by the GC as part of the outgoing parameters of this Torque builtin. + const result = + NewGenericJSToWasmWrapper(wrapperBuffer, instance, resultArray); + // The normal return sequence of Torque-generated JavaScript builtins does not + // consider the case where the caller may push additional "undefined" + // parameters on the stack, and therefore does not generate code to pop these + // additional parameters. Here we calculate the actual number of parameters on + // the stack. This number is the number of actual parameters provided by the + // caller, which is `arguments.length`, or the number of declared arguments, + // if not enough actual parameters were provided, i.e. + // `SharedFunctionInfo::length`. + let popCount = arguments.length; + const declaredArgCount = + Convert(Convert(target.shared_function_info.length)); + if (declaredArgCount > popCount) { + popCount = declaredArgCount; + } + // Also pop the receiver. + PopAndReturn(popCount + 1, result); +} + +macro WasmToJSObject( + instance: WasmInstanceObject, value: Object, retType: int32): JSAny { + const paramKind = retType & kValueTypeKindBitsMask; + const heapType = (retType >> kValueTypeKindBits) & kValueTypeHeapTypeMask; + if (paramKind == ValueKind::kRef) { + if (heapType == HeapType::kEq || heapType == HeapType::kI31 || + heapType == HeapType::kStruct || heapType == HeapType::kArray || + heapType == HeapType::kAny || heapType == HeapType::kExtern || + heapType == HeapType::kString || heapType == HeapType::kNone || + heapType == HeapType::kNoFunc || heapType == HeapType::kNoExtern) { + return UnsafeCast(value); + } + // TODO(ahaas): This is overly pessimistic: all module-defined struct and + // array types can be passed to JS as-is as well; and for function types we + // could at least support the fast path where the WasmExternalFunction has + // already been created. + return runtime::WasmGenericWasmToJSObject( + LoadContextFromInstance(instance), value); + } else { + dcheck(paramKind == ValueKind::kRefNull); + if (heapType == HeapType::kExtern || heapType == HeapType::kNoExtern) { + return UnsafeCast(value); + } + if (value == kWasmNull) { + return Null; + } + if (heapType == HeapType::kEq || heapType == HeapType::kStruct || + heapType == HeapType::kArray || heapType == HeapType::kString || + heapType == HeapType::kI31 || heapType == HeapType::kAny) { + return UnsafeCast(value); + } + // TODO(ahaas): This is overly pessimistic: all module-defined struct and + // array types can be passed to JS as-is as well; and for function types we + // could at least support the fast path where the WasmExternalFunction has + // already been created. + return runtime::WasmGenericWasmToJSObject( + LoadContextFromInstance(instance), value); + } +} + +macro GetRefAt(base: From, offset: intptr): &T { + return torque_internal::unsafe::NewOffHeapReference( + %RawDownCast>(base + offset)); +} + +builtin JSToWasmHandleReturns( + instance: WasmInstanceObject, resultArray: JSArray, + wrapperBuffer: RawPtr): JSAny { + const returnCount = *GetRefAt( + wrapperBuffer, kWrapperBufferReturnCount); + if (returnCount == 0) { + return Undefined; + } + if (returnCount == 1) { + const reps = *GetRefAt( + wrapperBuffer, kWrapperBufferSigRepresentationArray); + const retType = *GetRefAt(reps, 0); + if (retType == kWasmI32Type) { + const ret = *GetRefAt( + wrapperBuffer, kWrapperBufferGPReturnRegister1); + const result = Convert(ret); + return result; + } else if (retType == kWasmF32Type) { + const resultRef = + GetRefAt(wrapperBuffer, kWrapperBufferFPReturnRegister1); + return Convert(*resultRef); + } else if (retType == kWasmF64Type) { + const resultRef = + GetRefAt(wrapperBuffer, kWrapperBufferFPReturnRegister1); + return Convert(*resultRef); + } else if (retType == kWasmI64Type) { + if constexpr (Is64()) { + const ret = *GetRefAt( + wrapperBuffer, kWrapperBufferGPReturnRegister1); + return I64ToBigInt(ret); + } else { + const lowWord = *GetRefAt( + wrapperBuffer, kWrapperBufferGPReturnRegister1); + const highWord = *GetRefAt( + wrapperBuffer, kWrapperBufferGPReturnRegister2); + return I32PairToBigInt(lowWord, highWord); + } + } else { + const ptr = %RawDownCast>( + wrapperBuffer + kWrapperBufferGPReturnRegister1); + const rawRef = *GetRefAt(ptr, 0); + const value = BitcastWordToTagged(rawRef); + return WasmToJSObject(instance, value, retType); + } + } + + // Multi return; + const fixedArray: FixedArray = UnsafeCast(resultArray.elements); + const returnBuffer = *GetRefAt( + wrapperBuffer, kWrapperBufferStackReturnBufferStart); + let locationAllocator = LocationAllocatorForReturns( + wrapperBuffer + kWrapperBufferGPReturnRegister1, + wrapperBuffer + kWrapperBufferFPReturnRegister1, returnBuffer); + + const reps = *GetRefAt( + wrapperBuffer, kWrapperBufferSigRepresentationArray); + + const retTypes = torque_internal::unsafe::NewOffHeapConstSlice( + %RawDownCast>(reps), Convert(returnCount)); + + const hasRefReturns = *GetRefAt( + wrapperBuffer, kWrapperBufferRefReturnCount); + + if (hasRefReturns) { + // We first process all references and copy them in the the result array to + // put them into a location that is known to the GC. The processing of + // references does not trigger a GC, but the allocation of HeapNumbers and + // BigInts for primitive types may trigger a GC. + + for (let k: intptr = 0; k < Convert(returnCount); k++) { + const retType = *retTypes.UncheckedAtIndex(Convert(k)); + if (retType == kWasmI32Type) { + locationAllocator.GetGPSlot(); + } else if (retType == kWasmF32Type) { + locationAllocator.GetFP32Slot(); + } else if (retType == kWasmI64Type) { + locationAllocator.GetGPSlot(); + if constexpr (!Is64()) { + locationAllocator.GetGPSlot(); + } + } else if (retType == kWasmF64Type) { + locationAllocator.GetFP64Slot(); + } else { + let value: Object; + const slot = locationAllocator.GetGPSlot(); + const rawRef = *slot; + value = BitcastWordToTagged(rawRef); + // Store the wasm object in the JSArray to make it GC safe. The + // transformation will happen later in a second loop. + fixedArray.objects[k] = value; + } + } + } + + locationAllocator = LocationAllocatorForReturns( + wrapperBuffer + kWrapperBufferGPReturnRegister1, + wrapperBuffer + kWrapperBufferFPReturnRegister1, returnBuffer); + + for (let k: intptr = 0; k < Convert(returnCount); k++) { + const retType = *retTypes.UncheckedAtIndex(Convert(k)); + if (retType == kWasmI32Type) { + const slot = locationAllocator.GetGPSlot(); + const val = *RefCast(slot); + fixedArray.objects[k] = Convert(val); + } else if (retType == kWasmF32Type) { + const slot = locationAllocator.GetFP32Slot(); + const val = *RefCast(slot); + fixedArray.objects[k] = Convert(val); + } else if (retType == kWasmI64Type) { + if constexpr (Is64()) { + const slot = locationAllocator.GetGPSlot(); + const val = *slot; + fixedArray.objects[k] = I64ToBigInt(val); + } else { + const lowWordSlot = locationAllocator.GetGPSlot(); + const highWordSlot = locationAllocator.GetGPSlot(); + const lowWord = *lowWordSlot; + const highWord = *highWordSlot; + fixedArray.objects[k] = I32PairToBigInt(lowWord, highWord); + } + } else if (retType == kWasmF64Type) { + const slot = locationAllocator.GetFP64Slot(); + const val = *RefCast(slot); + fixedArray.objects[k] = Convert(val); + } else { + locationAllocator.GetGPSlot(); + const value = fixedArray.objects[k]; + fixedArray.objects[k] = WasmToJSObject(instance, value, retType); + } + } + + return resultArray; +} + +} // namespace wasm diff --git a/v8/src/builtins/loong64/builtins-loong64.cc b/v8/src/builtins/loong64/builtins-loong64.cc index c3d8508de..f4b5f601d 100644 --- a/v8/src/builtins/loong64/builtins-loong64.cc +++ b/v8/src/builtins/loong64/builtins-loong64.cc @@ -908,9 +908,17 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, } namespace { -void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array) { - __ St_h(zero_reg, - FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset)); + +void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi) { + __ St_h(zero_reg, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset)); +} + +void ResetJSFunctionAge(MacroAssembler* masm, Register js_function, + Register scratch) { + __ LoadTaggedField( + scratch, + FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, scratch); } void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, @@ -984,6 +992,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kCalleeContext); Register callee_js_function = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); + { + UseScratchRegisterScope temps(masm); + ResetJSFunctionAge(masm, callee_js_function, temps.Acquire()); + } __ Push(callee_context, callee_js_function); DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); DCHECK_EQ(callee_js_function, kJSFunctionRegister); @@ -994,7 +1006,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecode_array = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - ResetBytecodeAge(masm, bytecode_array); __ Push(argc, bytecode_array); // Baseline code frames store the feedback vector where interpreter would @@ -1108,6 +1119,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ LoadTaggedField( kScratchReg, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, kScratchReg); __ LoadTaggedField( kInterpreterBytecodeArrayRegister, FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); @@ -1173,8 +1185,6 @@ void Builtins::Generate_InterpreterEntryTrampoline( FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); - // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); @@ -2811,6 +2821,10 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_NewGenericJSToWasmWrapper(MacroAssembler* masm) { + __ Trap(); +} + #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -3189,13 +3203,20 @@ MemOperand ExitFrameCallerStackSlotOperand(int index) { } // namespace -void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { +void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, + CallApiCallbackMode mode) { // ----------- S t a t e ------------- - // -- cp : context + // CallApiCallbackMode::kGeneric mode: + // -- a2 : arguments count (not including the receiver) + // -- a3 : call handler info + // -- a0 : holder + // CallApiCallbackMode::kNoSideEffects/kWithSideEffectsSideEffects modes: // -- a1 : api function address // -- a2 : arguments count // -- a3 : call data // -- a0 : holder + // Both modes: + // -- cp : context // -- sp[0] : receiver // -- sp[8] : first argument // -- ... @@ -3204,15 +3225,34 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { Register function_callback_info_arg = arg_reg_1; - Register api_function_address = a1; - Register argc = a2; - Register call_data = a3; - Register holder = a0; + Register api_function_address = no_reg; + Register argc = no_reg; + Register call_data = no_reg; + Register callback = no_reg; + Register holder = no_reg; Register scratch = t0; - Register base = t1; // For addressing MemOperands on the stack. - - DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch, - base)); + Register scratch2 = t1; + Register base = t2; // For addressing MemOperands on the stack. + + switch (mode) { + case CallApiCallbackMode::kGeneric: + api_function_address = a1; + argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); + callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + holder = CallApiCallbackGenericDescriptor::HolderRegister(); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + api_function_address = + CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister(); + argc = CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister(); + call_data = CallApiCallbackOptimizedDescriptor::CallDataRegister(); + holder = CallApiCallbackOptimizedDescriptor::HolderRegister(); + break; + } + DCHECK(!AreAliased(api_function_address, argc, holder, call_data, callback, + scratch, scratch2, base)); using FCI = FunctionCallbackInfo; using FCA = FunctionCallbackArguments; @@ -3259,7 +3299,18 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { __ St_d(scratch, MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize)); // kData. - __ St_d(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + switch (mode) { + case CallApiCallbackMode::kGeneric: + __ LoadTaggedField( + scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + __ St_d(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + __ St_d(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + } // kNewTarget. __ St_d(scratch, MemOperand(sp, FCA::kNewTargetIndex * kSystemPointerSize)); @@ -3277,9 +3328,38 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { static_assert(FCI::kImplicitArgsOffset == 0); static_assert(FCI::kValuesOffset == 1 * kSystemPointerSize); static_assert(FCI::kLengthOffset == 2 * kSystemPointerSize); + const int exit_frame_params_size = + mode == CallApiCallbackMode::kGeneric ? 2 : 0; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + if (mode == CallApiCallbackMode::kGeneric) { + ASM_CODE_COMMENT_STRING(masm, "Push API_CALLBACK_EXIT frame arguments"); + __ AllocateStackSpace(exit_frame_params_size * kSystemPointerSize); + + // Argc parameter as a Smi. + static_assert(ApiCallbackExitFrameConstants::kArgcOffset == + 3 * kSystemPointerSize); + __ SmiTag(scratch, argc); + __ St_d(scratch, MemOperand(sp, 1 * kSystemPointerSize)); + + // Target parameter. + static_assert(ApiCallbackExitFrameConstants::kTargetOffset == + 2 * kSystemPointerSize); + __ LoadTaggedField( + scratch, + FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); + __ St_d(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + + __ LoadExternalPointerField( + api_function_address, + FieldMemOperand(callback, + CallHandlerInfo::kMaybeRedirectedCallbackOffset), + kCallHandlerInfoCallbackTag); + + __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); + } else { + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + } { ASM_CODE_COMMENT_STRING(masm, "Initialize FunctionCallbackInfo"); @@ -3306,7 +3386,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // drop, not the number of bytes. MemOperand stack_space_operand = ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropOnStackSize); - __ Add_d(scratch, argc, Operand(FCA::kArgsLengthWithReceiver)); + __ Add_d(scratch, argc, + Operand(FCA::kArgsLengthWithReceiver + exit_frame_params_size)); __ St_d(scratch, stack_space_operand); __ RecordComment("v8::FunctionCallback's argument."); @@ -3316,13 +3397,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { DCHECK( !AreAliased(api_function_address, scratch, function_callback_info_arg)); - ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(mode); // Pass api function address to thunk wrapper in case profiler or side-effect // checking is enabled. Register thunk_arg = api_function_address; - MemOperand return_value_operand = - ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex); + MemOperand return_value_operand = ExitFrameCallerStackSlotOperand( + FCA::kReturnValueIndex + exit_frame_params_size); static constexpr int kUseStackSpaceOperand = 0; @@ -3424,9 +3506,10 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ Add_d(property_callback_info_arg, sp, Operand(1 * kSystemPointerSize)); __ RecordComment("Load api_function_address"); - __ Ld_d( + __ LoadExternalPointerField( api_function_address, - FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset)); + FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset), + kAccessorInfoGetterTag); DCHECK( !AreAliased(api_function_address, property_callback_info_arg, name_arg)); @@ -3696,6 +3779,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ LoadTaggedField( code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + + if (is_osr) { + ResetSharedFunctionInfoAge(masm, code_obj); + } + __ LoadTaggedField( code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); @@ -3796,7 +3884,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // TODO(liuyu): Remove Ld as arm64 after register reallocation. __ Ld_d(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); Generate_OSREntry(masm, code_obj); } else { __ Jump(code_obj); diff --git a/v8/src/builtins/map-groupby.tq b/v8/src/builtins/map-groupby.tq new file mode 100644 index 000000000..df4ba1281 --- /dev/null +++ b/v8/src/builtins/map-groupby.tq @@ -0,0 +1,10 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +transitioning javascript builtin +MapGroupBy(js-implicit context: NativeContext, receiver: JSAny)( + _items: JSAny, _callback: JSAny): JSAny { + // TODO(v8:12499): Implement. + return Undefined; +} diff --git a/v8/src/builtins/mips64/builtins-mips64.cc b/v8/src/builtins/mips64/builtins-mips64.cc index 52b1b1fdd..0aa0d42bf 100644 --- a/v8/src/builtins/mips64/builtins-mips64.cc +++ b/v8/src/builtins/mips64/builtins-mips64.cc @@ -891,9 +891,16 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, } namespace { -void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array) { - __ Sh(zero_reg, - FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset)); + +void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi) { + __ Sh(zero_reg, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset)); +} + +void ResetJSFunctionAge(MacroAssembler* masm, Register js_function, + Register scratch) { + __ Ld(scratch, + FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, scratch); } void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, @@ -964,6 +971,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kCalleeContext); Register callee_js_function = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); + { + UseScratchRegisterScope temps(masm); + ResetJSFunctionAge(masm, callee_js_function, temps.Acquire()); + } __ Push(callee_context, callee_js_function); DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); DCHECK_EQ(callee_js_function, kJSFunctionRegister); @@ -974,7 +985,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecode_array = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - ResetBytecodeAge(masm, bytecode_array); __ Push(argc, bytecode_array); // Baseline code frames store the feedback vector where interpreter would @@ -1087,6 +1097,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // kInterpreterBytecodeArrayRegister. __ Ld(kScratchReg, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, kScratchReg); __ Ld(kInterpreterBytecodeArrayRegister, FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); Label is_baseline; @@ -1148,8 +1159,6 @@ void Builtins::Generate_InterpreterEntryTrampoline( FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); - // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); @@ -2785,6 +2794,10 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_NewGenericJSToWasmWrapper(MacroAssembler* masm) { + __ Trap(); +} + #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -3228,13 +3241,20 @@ MemOperand ExitFrameCallerStackSlotOperand(int index) { } // namespace -void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { +void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, + CallApiCallbackMode mode) { // ----------- S t a t e ------------- - // -- cp : context + // CallApiCallbackMode::kGeneric mode: + // -- a2 : arguments count (not including the receiver) + // -- a3 : call handler info + // -- a0 : holder + // CallApiCallbackMode::kNoSideEffects/kWithSideEffectsSideEffects modes: // -- a1 : api function address // -- a2 : arguments count // -- a3 : call data // -- a0 : holder + // Both modes: + // -- cp : context // -- sp[0] : receiver // -- sp[8] : first argument // -- ... @@ -3243,15 +3263,34 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { Register function_callback_info_arg = arg_reg_1; - Register api_function_address = a1; - Register argc = a2; - Register call_data = a3; - Register holder = a0; + Register api_function_address = no_reg; + Register argc = no_reg; + Register call_data = no_reg; + Register callback = no_reg; + Register holder = no_reg; Register scratch = t0; - Register base = t1; // For addressing MemOperands on the stack. - - DCHECK(!AreAliased(api_function_address, argc, call_data, - holder, scratch, base)); + Register scratch2 = t1; + Register base = t2; // For addressing MemOperands on the stack. + + switch (mode) { + case CallApiCallbackMode::kGeneric: + api_function_address = a1; + argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); + callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + holder = CallApiCallbackGenericDescriptor::HolderRegister(); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + api_function_address = + CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister(); + argc = CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister(); + call_data = CallApiCallbackOptimizedDescriptor::CallDataRegister(); + holder = CallApiCallbackOptimizedDescriptor::HolderRegister(); + break; + } + DCHECK(!AreAliased(api_function_address, argc, holder, call_data, callback, + scratch, scratch2, base)); using FCI = FunctionCallbackInfo; using FCA = FunctionCallbackArguments; @@ -3298,7 +3337,17 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { __ Sd(scratch, MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize)); // kData. - __ Sd(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + switch (mode) { + case CallApiCallbackMode::kGeneric: + __ Ld(scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + __ Sd(scratch2, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + __ Sd(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + } // kNewTarget. __ Sd(scratch, MemOperand(sp, FCA::kNewTargetIndex * kSystemPointerSize)); @@ -3316,9 +3365,35 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { static_assert(FCI::kImplicitArgsOffset == 0); static_assert(FCI::kValuesOffset == 1 * kSystemPointerSize); static_assert(FCI::kLengthOffset == 2 * kSystemPointerSize); + const int exit_frame_params_size = + mode == CallApiCallbackMode::kGeneric ? 2 : 0; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + if (mode == CallApiCallbackMode::kGeneric) { + ASM_CODE_COMMENT_STRING(masm, "Push API_CALLBACK_EXIT frame arguments"); + __ AllocateStackSpace(exit_frame_params_size * kSystemPointerSize); + + // Argc parameter as a Smi. + static_assert(ApiCallbackExitFrameConstants::kArgcOffset == + 3 * kSystemPointerSize); + __ SmiTag(scratch, argc); + __ Sd(scratch, MemOperand(sp, 1 * kSystemPointerSize)); + + // Target parameter. + static_assert(ApiCallbackExitFrameConstants::kTargetOffset == + 2 * kSystemPointerSize); + __ Ld(scratch, + FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); + __ Sd(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + + __ Ld(api_function_address, + FieldMemOperand(callback, + CallHandlerInfo::kMaybeRedirectedCallbackOffset)); + + __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); + } else { + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + } { ASM_CODE_COMMENT_STRING(masm, "Initialize FunctionCallbackInfo"); @@ -3345,7 +3420,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // drop, not the number of bytes. MemOperand stack_space_operand = ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropOnStackSize); - __ Daddu(scratch, argc, Operand(FCA::kArgsLengthWithReceiver)); + __ Daddu(scratch, argc, + Operand(FCA::kArgsLengthWithReceiver + exit_frame_params_size)); __ Sd(scratch, stack_space_operand); __ RecordComment("v8::FunctionCallback's argument."); @@ -3357,13 +3433,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { DCHECK( !AreAliased(api_function_address, scratch, function_callback_info_arg)); - ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(mode); // Pass api function address to thunk wrapper in case profiler or side-effect // checking is enabled. Register thunk_arg = api_function_address; - MemOperand return_value_operand = - ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex); + MemOperand return_value_operand = ExitFrameCallerStackSlotOperand( + FCA::kReturnValueIndex + exit_frame_params_size); static constexpr int kUseStackSpaceOperand = 0; @@ -3738,6 +3815,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, Register code_obj = s1; __ Ld(code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + + if (is_osr) { + ResetSharedFunctionInfoAge(masm, code_obj); + } + __ Ld(code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); @@ -3837,7 +3919,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // TODO(liuyu): Remove Ld as arm64 after register reallocation. __ Ld(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); Generate_OSREntry(masm, code_obj); } else { __ Jump(code_obj); diff --git a/v8/src/builtins/object-groupby.tq b/v8/src/builtins/object-groupby.tq new file mode 100644 index 000000000..9c0b53d34 --- /dev/null +++ b/v8/src/builtins/object-groupby.tq @@ -0,0 +1,143 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace collections { + +extern macro CollectionsBuiltinsAssembler::AddValueToKeyedGroup( + OrderedHashMap, Object, Object, String): OrderedHashMap; + +extern macro CollectionsBuiltinsAssembler::NormalizeNumberKey(JSAny): JSAny; + +} // namespace collections + +// https://tc39.es/proposal-array-grouping/#sec-group-by +transitioning builtin GroupByGeneric(implicit context: Context)( + items: JSAny, callbackfn: Callable, coerceToProperty: Boolean, + methodName: String): OrderedHashMap { + // 3. Let groups be a new empty List. + let groups = AllocateOrderedHashMap(); + + // 4. Let iteratorRecord be ? GetIterator(items, sync). + const fastIteratorResultMap = GetIteratorResultMap(); + const iteratorRecord = iterator::GetIterator(items); + + // 5. Let k be 0. + let k: Number = 0; + + // 6. Repeat, + while (true) { + // a. If k ≥ 2^53 - 1, then + // i. Let error be ThrowCompletion(a newly created TypeError object). + // ii. Return ? IteratorClose(iteratorRecord, error). + // + // The spec requires that we throw an exception if index reaches 2^53-1, + // but an empty loop would take >100 days to do this many iterations. To + // actually run for that long would require an iterator that never set + // done to true and a target array which somehow never ran out of + // memory, e.g. a proxy that discarded the values. Ignoring this case + // just means we would call the callback with 2^53. + dcheck(k < kMaxSafeInteger); + + // b. Let next be ? IteratorStep(iteratorRecord). + let next: JSReceiver; + try { + next = iterator::IteratorStep(iteratorRecord, fastIteratorResultMap) + otherwise NextIsFalse; + } + // c. If next is false, then + label NextIsFalse { + // i. Return groups. + return groups; + } + + // d. Let value be ? IteratorValue(next). + const value = iterator::IteratorValue(next, fastIteratorResultMap); + + // e. Let key be Completion(Call(callbackfn, undefined, « value, 𝔽(k) »)). + let key: JSAny; + try { + key = Call(context, callbackfn, Undefined, value, k); + + // g. If coercion is property, then + if (coerceToProperty == True) { + // i. Set key to Completion(ToPropertyKey(key)). + key = ToName(key); + } + // h. Else, + else { + // i. Assert: coercion is zero. + // ii. If key is -0𝔽, set key to +0𝔽. + key = collections::NormalizeNumberKey(key); + } + } catch (e, message) { + // f. and g.ii. + // IfAbruptCloseIterator(key, iteratorRecord). + iterator::IteratorCloseOnException(iteratorRecord); + ReThrowWithMessage(context, e, message); + } + + // i. Perform AddValueToKeyedGroup(groups, key, value). + groups = collections::AddValueToKeyedGroup(groups, key, value, methodName); + + // j. Set k to k + 1. + k += 1; + } + + unreachable; +} + +// https://tc39.es/proposal-array-grouping/#sec-group-by +transitioning macro GroupByImpl(implicit context: Context)( + items: JSAny, callback: JSAny, + methodName: constexpr string): OrderedHashMap { + // 1. Perform ? RequireObjectCoercible(items). + RequireObjectCoercible(items, methodName); + + // 2. If IsCallable(callbackfn) is false, throw a TypeError exception. + const callbackfn = Cast(callback) + otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, callback); + + // TODO(v8:12499): Array fast path. + return GroupByGeneric( + items, callbackfn, /* coerceToProperty */ True, methodName); +} + +transitioning javascript builtin +ObjectGroupBy(js-implicit context: NativeContext, receiver: JSAny)( + items: JSAny, callback: JSAny): JSAny { + // 1. Let groups be ? GroupBy(items, callbackfn, property). + const groups: OrderedHashMap = GroupByImpl(items, callback, 'Object.groupBy'); + + let iter = collections::NewUnmodifiedOrderedHashMapIterator(groups); + + // 2. Let obj be OrdinaryObjectCreate(null). + // 3. For each Record { [[Key]], [[Elements]] } g of groups, do + // a. Let elements be CreateArrayFromList(g.[[Elements]]). + // b. Perform ! CreateDataPropertyOrThrow(obj, g.[[Key]], elements). + let properties: NameDictionary|SwissNameDictionary; + + @if(V8_ENABLE_SWISS_NAME_DICTIONARY) { + properties = + AllocateSwissNameDictionary(Convert(iter.usedCapacity)); + } + @ifnot(V8_ENABLE_SWISS_NAME_DICTIONARY) { + properties = AllocateNameDictionary(Convert(iter.usedCapacity)); + } + const nullProtoMap = LoadSlowObjectWithNullPrototypeMap(context); + const obj = AllocateJSObjectFromMap(nullProtoMap, properties); + + // TODO(v8:12499): Determine more specific elements map if worth it. + try { + const arrayMap = GetFastPackedElementsJSArrayMap(); + while (true) { + const entry = iter.Next() otherwise Done; + const elements = ArrayListElements(UnsafeCast(entry.value)); + const array = NewJSArray(arrayMap, elements); + CreateDataProperty(obj, entry.key, array); + } + } label Done {} + + // 4. Return obj. + return obj; +} diff --git a/v8/src/builtins/ppc/builtins-ppc.cc b/v8/src/builtins/ppc/builtins-ppc.cc index b6d97f543..8f6d228c3 100644 --- a/v8/src/builtins/ppc/builtins-ppc.cc +++ b/v8/src/builtins/ppc/builtins-ppc.cc @@ -87,13 +87,21 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address, __ Ret(); } -void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array, - Register scratch) { - DCHECK(!AreAliased(bytecode_array, scratch)); +void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi, + Register scratch) { + DCHECK(!AreAliased(sfi, scratch)); __ mov(scratch, Operand(0)); - __ StoreU16( - scratch, - FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset), r0); + __ StoreU16(scratch, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset), + no_reg); +} + +void ResetJSFunctionAge(MacroAssembler* masm, Register js_function, + Register scratch1, Register scratch2) { + __ LoadTaggedField( + scratch1, + FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset), + scratch2); + ResetSharedFunctionInfoAge(masm, scratch1, scratch2); } void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, @@ -130,6 +138,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ LoadTaggedField( code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0); + + if (is_osr) { + ResetSharedFunctionInfoAge(masm, code_obj, r6); + } + __ LoadTaggedField( code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0); @@ -233,8 +246,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ Pop(kInterpreterAccumulatorRegister); if (is_osr) { - Register scratch = ip; - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, scratch); Generate_OSREntry(masm, code_obj, 0); } else { __ Jump(code_obj); @@ -1247,6 +1258,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kCalleeContext); Register callee_js_function = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); + ResetJSFunctionAge(masm, callee_js_function, r11, r0); __ Push(callee_context, callee_js_function); DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); DCHECK_EQ(callee_js_function, kJSFunctionRegister); @@ -1257,7 +1269,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecodeArray = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - ResetBytecodeAge(masm, bytecodeArray, r11); __ Push(argc, bytecodeArray); @@ -1371,6 +1382,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // kInterpreterBytecodeArrayRegister. __ LoadTaggedField( r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0); + ResetSharedFunctionInfoAge(masm, r7, ip); // Load original bytecode array or the debug copy. __ LoadTaggedField( kInterpreterBytecodeArrayRegister, @@ -1445,8 +1457,6 @@ void Builtins::Generate_InterpreterEntryTrampoline( FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, r8); - // Load initial bytecode offset. __ mov(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); @@ -2960,6 +2970,10 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); } + +void Builtins::Generate_NewGenericJSToWasmWrapper(MacroAssembler* masm) { + __ Trap(); +} #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -3434,13 +3448,20 @@ MemOperand ExitFrameCallerStackSlotOperand(int index) { } // namespace -void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { +void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, + CallApiCallbackMode mode) { // ----------- S t a t e ------------- - // -- cp : context + // CallApiCallbackMode::kGeneric mode: + // -- r5 : arguments count (not including the receiver) + // -- r6 : call handler info + // -- r3 : holder + // CallApiCallbackMode::kNoSideEffects/kWithSideEffectsSideEffects modes: // -- r4 : api function address // -- r5 : arguments count (not including the receiver) // -- r6 : call data // -- r3 : holder + // Both modes: + // -- cp : context // -- sp[0] : receiver // -- sp[8] : first argument // -- ... @@ -3449,12 +3470,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { Register function_callback_info_arg = arg_reg_1; - Register api_function_address = r4; - Register argc = r5; - Register call_data = r6; - Register holder = r3; + Register api_function_address = no_reg; + Register argc = no_reg; + Register call_data = no_reg; + Register callback = no_reg; + Register holder = no_reg; Register scratch = r7; - DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch)); + Register scratch2 = r8; + + switch (mode) { + case CallApiCallbackMode::kGeneric: + api_function_address = r4; + argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); + callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + holder = CallApiCallbackGenericDescriptor::HolderRegister(); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + api_function_address = + CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister(); + argc = CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister(); + call_data = CallApiCallbackOptimizedDescriptor::CallDataRegister(); + holder = CallApiCallbackOptimizedDescriptor::HolderRegister(); + break; + } + DCHECK(!AreAliased(api_function_address, argc, holder, call_data, callback, + scratch, scratch2)); using FCI = FunctionCallbackInfo; using FCA = FunctionCallbackArguments; @@ -3499,7 +3541,21 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize)); // kData. - __ StoreU64(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + switch (mode) { + case CallApiCallbackMode::kGeneric: + __ LoadTaggedField( + scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset), + r0); + __ StoreU64(scratch2, + MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + __ StoreU64(call_data, + MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + } // kNewTarget. __ StoreU64(scratch, @@ -3522,9 +3578,37 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { static_assert(FCI::kImplicitArgsOffset == 0); static_assert(FCI::kValuesOffset == 1 * kSystemPointerSize); static_assert(FCI::kLengthOffset == 2 * kSystemPointerSize); + const int exit_frame_params_size = + mode == CallApiCallbackMode::kGeneric ? 2 : 0; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + if (mode == CallApiCallbackMode::kGeneric) { + ASM_CODE_COMMENT_STRING(masm, "Push API_CALLBACK_EXIT frame arguments"); + __ AllocateStackSpace(exit_frame_params_size * kSystemPointerSize); + + // Argc parameter as a Smi. + static_assert(ApiCallbackExitFrameConstants::kArgcOffset == + 3 * kSystemPointerSize); + __ SmiTag(scratch, argc); + __ StoreU64(scratch, MemOperand(sp, 1 * kSystemPointerSize)); + + // Target parameter. + static_assert(ApiCallbackExitFrameConstants::kTargetOffset == + 2 * kSystemPointerSize); + __ LoadTaggedField( + scratch, + FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset), r0); + __ StoreU64(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + + __ LoadU64(api_function_address, + FieldMemOperand(callback, + CallHandlerInfo::kMaybeRedirectedCallbackOffset), + r0); + + __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); + } else { + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + } { ASM_CODE_COMMENT_STRING(masm, "Initialize FunctionCallbackInfo"); @@ -3546,8 +3630,9 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // from the API function here. MemOperand stack_space_operand = ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropOnStackSize); - __ mov(scratch, - Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize)); + __ mov(scratch, Operand((FCA::kArgsLength + 1 /* receiver */ + + exit_frame_params_size) * + kSystemPointerSize)); __ ShiftLeftU64(ip, argc, Operand(kSystemPointerSizeLog2)); __ add(scratch, scratch, ip); __ StoreU64(scratch, stack_space_operand); @@ -3558,13 +3643,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { DCHECK(!AreAliased(api_function_address, function_callback_info_arg)); - ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(mode); // Pass api function address to thunk wrapper in case profiler or side-effect // checking is enabled. Register thunk_arg = api_function_address; - MemOperand return_value_operand = - ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex); + MemOperand return_value_operand = ExitFrameCallerStackSlotOperand( + FCA::kReturnValueIndex + exit_frame_params_size); static constexpr int kUseStackSpaceOperand = 0; CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, thunk_arg, @@ -3572,7 +3658,6 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { return_value_operand); } - void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- cp : context diff --git a/v8/src/builtins/reflect.tq b/v8/src/builtins/reflect.tq index 5ed70c81c..d7290f6b8 100644 --- a/v8/src/builtins/reflect.tq +++ b/v8/src/builtins/reflect.tq @@ -55,7 +55,6 @@ transitioning javascript builtin ReflectSetPrototypeOf( } } -extern transitioning builtin ToName(implicit context: Context)(JSAny): AnyName; type OnNonExistent constexpr 'OnNonExistent'; const kReturnUndefined: constexpr OnNonExistent generates 'OnNonExistent::kReturnUndefined'; diff --git a/v8/src/builtins/riscv/builtins-riscv.cc b/v8/src/builtins/riscv/builtins-riscv.cc index 0f5b3bf9f..81509a577 100644 --- a/v8/src/builtins/riscv/builtins-riscv.cc +++ b/v8/src/builtins/riscv/builtins-riscv.cc @@ -259,9 +259,6 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset( masm->pc_offset()); - // Restore the context from the frame. - __ LoadWord(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); - // If the result is an object (in the ECMA sense), we should get rid // of the receiver and use the result; see ECMA-262 section 13.2.2-7 // on page 74. @@ -370,7 +367,6 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // -- a1 : the JSGeneratorObject to resume // -- ra : return address // ----------------------------------- - // Store input value into generator object. __ StoreTaggedField( a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); @@ -804,6 +800,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, #endif // s6 holds the root address. Do not clobber. // s7 is cp. Do not init. + // s11 is pointer cage base register (kPointerCageBaseRegister). // Invoke the code. Handle builtin = is_construct @@ -945,9 +942,17 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, } namespace { -void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array) { - __ Sh(zero_reg, - FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset)); +void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi) { + __ Sh(zero_reg, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset)); +} + +void ResetJSFunctionAge(MacroAssembler* masm, Register js_function, + Register scratch) { + const Register shared_function_info(scratch); + __ LoadTaggedField( + shared_function_info, + FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, shared_function_info); } void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, @@ -992,10 +997,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kClosure); // Load the feedback vector from the closure. Register feedback_vector = temps.Acquire(); - __ LoadWord(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadWord(feedback_vector, - FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); { UseScratchRegisterScope temp(masm); Register type = temps.Acquire(); @@ -1035,6 +1040,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kCalleeContext); Register callee_js_function = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); + { + UseScratchRegisterScope temps(masm); + ResetJSFunctionAge(masm, callee_js_function, temps.Acquire()); + } __ Push(callee_context, callee_js_function); DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); DCHECK_EQ(callee_js_function, kJSFunctionRegister); @@ -1045,7 +1054,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecode_array = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - ResetBytecodeAge(masm, bytecode_array); __ Push(argc, bytecode_array); // Baseline code frames store the feedback vector where interpreter would // store the bytecode offset. @@ -1134,6 +1142,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ LoadTaggedField( kScratchReg, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, kScratchReg); __ LoadTaggedField( kInterpreterBytecodeArrayRegister, FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); @@ -1197,8 +1206,6 @@ void Builtins::Generate_InterpreterEntryTrampoline( FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); - // Load initial bytecode offset. __ li(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); @@ -1748,8 +1755,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // If maybe_target_code is not null, no need to call into runtime. A // precondition here is: if maybe_target_code is a InstructionStream object, // it must NOT be marked_for_deoptimization (callers must ensure this). - __ Branch(&jump_to_optimized_code, ne, maybe_target_code, - Operand(Smi::zero())); + __ CompareTaggedAndBranch(&jump_to_optimized_code, ne, maybe_target_code, + Operand(Smi::zero())); } ASM_CODE_COMMENT(masm); { @@ -1758,7 +1765,12 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, } // If the code object is null, just return to the caller. - __ Ret(eq, a0, Operand(Smi::zero())); + // If the code object is null, just return to the caller. + __ CompareTaggedAndBranch(&jump_to_optimized_code, ne, maybe_target_code, + Operand(Smi::zero())); + __ Ret(); + DCHECK_EQ(maybe_target_code, a0); // Already in the right spot. + __ bind(&jump_to_optimized_code); // OSR entry tracing. @@ -1792,9 +1804,10 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Load the OSR entrypoint offset from the deoptimization data. // = [#header_size + #osr_pc_offset] - __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( - DeoptimizationData::kOsrPcOffsetIndex) - - kHeapObjectTag)); + __ SmiUntagField(a1, + MemOperand(a1, FixedArray::OffsetOfElementAt( + DeoptimizationData::kOsrPcOffsetIndex) - + kHeapObjectTag)); __ LoadCodeInstructionStart(a0, a0); @@ -1835,6 +1848,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { Register receiver = a1; Register this_arg = a5; Register undefined_value = a3; + Register scratch = a4; __ LoadRoot(undefined_value, RootIndex::kUndefinedValue); @@ -1880,9 +1894,10 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // 3. Tail call with no arguments if argArray is null or undefined. Label no_arguments; - __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments); - __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value), - Label::Distance::kNear); + __ LoadRoot(scratch, RootIndex::kNullValue); + __ CompareTaggedAndBranch(&no_arguments, eq, arg_array, Operand(scratch)); + __ CompareTaggedAndBranch(&no_arguments, eq, arg_array, + Operand(undefined_value)); // 4a. Apply the receiver to the given argArray. __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), @@ -2165,11 +2180,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ bind(&loop); __ LoadTaggedField(a5, MemOperand(src)); __ AddWord(src, src, kTaggedSize); -#if V8_STATIC_ROOTS_BOOL - __ Branch(&push, ne, a5, RootIndex::kTheHoleValue); -#else - __ Branch(&push, ne, a5, Operand(hole_value)); -#endif + __ CompareTaggedAndBranch(&push, ne, a5, Operand(hole_value)); __ LoadRoot(a5, RootIndex::kUndefinedValue); __ bind(&push); __ StoreWord(a5, MemOperand(a7, 0)); @@ -2280,15 +2291,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // -- a0 : the number of arguments // -- a1 : the function to call (checked to be a JSFunction) // ----------------------------------- - __ AssertCallableFunction(a1); + __ AssertFunction(a1); - Label class_constructor; __ LoadTaggedField( a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - __ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); - __ And(kScratchReg, a3, - Operand(SharedFunctionInfo::IsClassConstructorBit::kMask)); - __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg)); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function @@ -2316,10 +2322,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, Label convert_to_object, convert_receiver; __ LoadReceiver(a3, a0); __ JumpIfSmi(a3, &convert_to_object); - static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ GetObjectType(a3, a4, a4); - __ Branch(&done_convert, Ugreater_equal, a4, - Operand(FIRST_JS_RECEIVER_TYPE)); + __ JumpIfJSAnyIsNotPrimitive(a3, a4, &done_convert); if (mode != ConvertReceiverMode::kNotNullOrUndefined) { Label convert_global_proxy; __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy); @@ -2366,14 +2369,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Lhu(a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump); - - // The function is a "classConstructor", need to raise an exception. - __ bind(&class_constructor); - { - FrameScope frame(masm, StackFrame::INTERNAL); - __ Push(a1); - __ CallRuntime(Runtime::kThrowConstructorNonCallableError); - } } namespace { @@ -2482,46 +2477,55 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { // -- a1 : the target to call (can be any Object). // ----------------------------------- + Register argc = a0; + Register target = a1; + Register map = t1; + Register instance_type = t2; + Register scratch = t6; + DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); + Label non_callable, class_constructor; - UseScratchRegisterScope temps(masm); - temps.Include(t1, t2); - temps.Include(t4); - Register map = temps.Acquire(), type = temps.Acquire(), - range = temps.Acquire(); - __ JumpIfSmi(a1, &non_callable); - __ LoadMap(map, a1); - __ GetInstanceTypeRange(map, type, FIRST_CALLABLE_JS_FUNCTION_TYPE, range); + __ JumpIfSmi(target, &non_callable); + __ LoadMap(map, target); + __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE, + scratch); __ Jump(masm->isolate()->builtins()->CallFunction(mode), - RelocInfo::CODE_TARGET, Uless_equal, range, + RelocInfo::CODE_TARGET, ule, scratch, Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - FIRST_CALLABLE_JS_FUNCTION_TYPE)); __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), - RelocInfo::CODE_TARGET, eq, type, Operand(JS_BOUND_FUNCTION_TYPE)); - Register scratch = map; + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_BOUND_FUNCTION_TYPE)); + // Check if target has a [[Call]] internal method. - __ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); - __ And(scratch, scratch, Operand(Map::Bits1::IsCallableBit::kMask)); - __ Branch(&non_callable, eq, scratch, Operand(zero_reg), - Label::Distance::kNear); + { + Register flags = t1; + __ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); + map = no_reg; + __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask)); + __ Branch(&non_callable, eq, flags, Operand(zero_reg)); + } __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, - type, Operand(JS_PROXY_TYPE)); + instance_type, Operand(JS_PROXY_TYPE)); // Check if target is a wrapped function and call CallWrappedFunction external // builtin __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction), - RelocInfo::CODE_TARGET, eq, type, Operand(JS_WRAPPED_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_WRAPPED_FUNCTION_TYPE)); // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) // Check that the function is not a "classConstructor". - __ Branch(&class_constructor, eq, type, Operand(JS_CLASS_CONSTRUCTOR_TYPE)); + __ Branch(&class_constructor, eq, instance_type, + Operand(JS_CLASS_CONSTRUCTOR_TYPE)); // 2. Call to something else, which might have a [[Call]] internal method (if // not we raise an exception). // Overwrite the original receiver with the (original) target. - __ StoreReceiver(a1, a0, kScratchReg); + __ StoreReceiver(target, argc, kScratchReg); // Let the "call_as_function_delegate" take care of the rest. - __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); + __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); __ Jump(masm->isolate()->builtins()->CallFunction( ConvertReceiverMode::kNotNullOrUndefined), RelocInfo::CODE_TARGET); @@ -2530,16 +2534,16 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { __ bind(&non_callable); { FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(a1); + __ Push(target); __ CallRuntime(Runtime::kThrowCalledNonCallable); } + // 4. The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); { FrameScope frame(masm, StackFrame::INTERNAL); - __ Push(a1); + __ Push(target); __ CallRuntime(Runtime::kThrowConstructorNonCallableError); - __ ebreak(); } } @@ -2588,16 +2592,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { // Patch new.target to [[BoundTargetFunction]] if new.target equals target. Label skip; - { -#ifdef V8_COMPRESS_POINTERS - UseScratchRegisterScope temps(masm); - Register scratch = temps.Acquire(); - __ CmpTagged(scratch, a1, a3); - __ Branch(&skip, ne, scratch, Operand(zero_reg), Label::Distance::kNear); -#else - __ Branch(&skip, ne, a1, Operand(a3), Label::Distance::kNear); -#endif - } + __ CompareTaggedAndBranch(&skip, ne, a1, Operand(a3)); __ LoadTaggedField( a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); __ bind(&skip); @@ -2608,7 +2603,6 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); } -// static void Builtins::Generate_Construct(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : the number of arguments @@ -2617,34 +2611,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { // the JSFunction on which new was invoked initially) // ----------------------------------- + Register argc = a0; + Register target = a1; + Register map = t1; + Register instance_type = t2; + Register scratch = t6; + DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); + // Check if target is a Smi. Label non_constructor, non_proxy; - __ JumpIfSmi(a1, &non_constructor); + __ JumpIfSmi(target, &non_constructor); // Check if target has a [[Construct]] internal method. - UseScratchRegisterScope temps(masm); - temps.Include(t0, t1); - Register map = temps.Acquire(); - Register scratch = temps.Acquire(); - __ LoadTaggedField(map, FieldMemOperand(a1, HeapObject::kMapOffset)); - __ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); - __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask)); - __ Branch(&non_constructor, eq, scratch, Operand(zero_reg)); - Register range = temps.Acquire(); + __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset)); + { + Register flags = t3; + __ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); + __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask)); + __ Branch(&non_constructor, eq, flags, Operand(zero_reg)); + } + // Dispatch based on instance type. - __ GetInstanceTypeRange(map, scratch, FIRST_JS_FUNCTION_TYPE, range); + __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch); __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), - RelocInfo::CODE_TARGET, Uless_equal, range, + RelocInfo::CODE_TARGET, Uless_equal, scratch, Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); // Only dispatch to bound functions after checking whether they are // constructors. __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), - RelocInfo::CODE_TARGET, eq, scratch, Operand(JS_BOUND_FUNCTION_TYPE)); + RelocInfo::CODE_TARGET, eq, instance_type, + Operand(JS_BOUND_FUNCTION_TYPE)); // Only dispatch to proxies after checking whether they are constructors. - __ Branch(&non_proxy, ne, scratch, Operand(JS_PROXY_TYPE), - Label::Distance::kNear); + __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE)); __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), RelocInfo::CODE_TARGET); @@ -2652,9 +2652,10 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ bind(&non_proxy); { // Overwrite the original receiver with the (original) target. - __ StoreReceiver(a1, a0, kScratchReg); + __ StoreReceiver(target, argc, kScratchReg); // Let the "call_as_constructor_delegate" take care of the rest. - __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); + __ LoadNativeContextSlot(target, + Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET); } @@ -2716,11 +2717,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Register scratch = t2; Label allocate_vector, done; - __ LoadWord(vector, + __ LoadTaggedField(vector, FieldMemOperand(kWasmInstanceRegister, WasmInstanceObject::kFeedbackVectorsOffset)); __ CalcScaledAddress(vector, vector, func_index, kTaggedSizeLog2); - __ LoadWord(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); + __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); __ Push(vector); @@ -2870,8 +2871,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Check result for exception sentinel. Label exception_returned; - __ LoadRoot(a4, RootIndex::kException); - __ Branch(&exception_returned, eq, a4, Operand(a0)); + __ Branch(&exception_returned, eq, a0, RootIndex::kException); // Check that there is no pending exception, otherwise we // should have returned the exception sentinel. @@ -2881,9 +2881,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, IsolateAddressId::kPendingExceptionAddress, masm->isolate()); __ li(a2, pending_exception_address); __ LoadWord(a2, MemOperand(a2)); - __ LoadRoot(a4, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. - __ Branch(&okay, eq, a4, Operand(a2), Label::Distance::kNear); + __ Branch(&okay, eq, a2, RootIndex::kTheHoleValue); __ stop(); __ bind(&okay); } @@ -2940,6 +2939,15 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ StoreWord(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ bind(&zero); + // Clear c_entry_fp, like we do in `LeaveExitFrame`. + { + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, + masm->isolate())); + __ StoreWord(zero_reg, MemOperand(scratch, 0)); + } + // Compute the handler entry address and jump to it. UseScratchRegisterScope temp(masm); Register scratch = temp.Acquire(); @@ -3100,6 +3108,10 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); } + +void Builtins::Generate_NewGenericJSToWasmWrapper(MacroAssembler* masm) { + __ Trap(); +} namespace { // Calls an API function. Allocates HandleScope, extracts returned value @@ -3123,8 +3135,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, ER::handle_scope_level_address(isolate), no_reg); Register return_value = a0; - Register scratch = kScratchReg2; - Register scratch2 = s11; + Register scratch = a4; + Register scratch2 = a5; // Allocate HandleScope in callee-saved registers. // We will need to restore the HandleScope after the call to the API function, @@ -3227,7 +3239,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, { ASM_CODE_COMMENT_STRING(masm, "Convert return value"); Label finish_return; - __ JumpIfNotRoot(return_value, RootIndex::kTheHoleValue, &finish_return); + __ Branch(&finish_return, ne, return_value, RootIndex::kTheHoleValue); __ LoadRoot(return_value, RootIndex::kUndefinedValue); __ bind(&finish_return); } @@ -3280,14 +3292,20 @@ MemOperand ExitFrameCallerStackSlotOperand(int index) { } } // namespace -void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { +void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, + CallApiCallbackMode mode) { // ----------- S t a t e ------------- - // -- cp : context + // CallApiCallbackMode::kGeneric mode: + // -- a2 : arguments count (not including the receiver) + // -- a3 : call handler info + // -- a0 : holder + // CallApiCallbackMode::kNoSideEffects/kWithSideEffectsSideEffects modes: // -- a1 : api function address // -- a2 : arguments count // -- a3 : call data // -- a0 : holder - // -- + // Both modes: + // -- cp : context // -- sp[0] : receiver // -- sp[8] : first argument // -- ... @@ -3295,13 +3313,34 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // ----------------------------------- Register function_callback_info_arg = arg_reg_1; - Register api_function_address = a1; - Register argc = a2; - Register call_data = a3; - Register holder = a0; - Register scratch = kScratchReg2; + Register api_function_address = no_reg; + Register argc = no_reg; + Register call_data = no_reg; + Register callback = no_reg; + Register holder = no_reg; + Register scratch = t0; + Register scratch2 = t1; + + switch (mode) { + case CallApiCallbackMode::kGeneric: + api_function_address = a1; + argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); + callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + holder = CallApiCallbackGenericDescriptor::HolderRegister(); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + api_function_address = + CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister(); + argc = CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister(); + call_data = CallApiCallbackOptimizedDescriptor::CallDataRegister(); + holder = CallApiCallbackOptimizedDescriptor::HolderRegister(); + break; + } + DCHECK(!AreAliased(api_function_address, argc, holder, call_data, callback, + scratch, scratch2)); - DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch)); using FCI = FunctionCallbackInfo; using FCA = FunctionCallbackArguments; @@ -3347,7 +3386,20 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize)); // kData. - __ StoreWord(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + switch (mode) { + case CallApiCallbackMode::kGeneric: + __ LoadTaggedField( + scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + __ StoreWord(scratch2, + MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + __ StoreWord(call_data, + MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + } // kNewTarget. __ StoreWord(scratch, @@ -3366,9 +3418,38 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { static_assert(FCI::kImplicitArgsOffset == 0); static_assert(FCI::kValuesOffset == 1 * kSystemPointerSize); static_assert(FCI::kLengthOffset == 2 * kSystemPointerSize); + const int exit_frame_params_size = + mode == CallApiCallbackMode::kGeneric ? 2 : 0; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kApiStackSpace); + if (mode == CallApiCallbackMode::kGeneric) { + ASM_CODE_COMMENT_STRING(masm, "Push API_CALLBACK_EXIT frame arguments"); + __ AllocateStackSpace(exit_frame_params_size * kSystemPointerSize); + + // Argc parameter as a Smi. + static_assert(ApiCallbackExitFrameConstants::kArgcOffset == + 3 * kSystemPointerSize); + __ SmiTag(scratch, argc); + __ StoreWord(scratch, MemOperand(sp, 1 * kSystemPointerSize)); + + // Target parameter. + static_assert(ApiCallbackExitFrameConstants::kTargetOffset == + 2 * kSystemPointerSize); + __ LoadTaggedField( + scratch, + FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); + __ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + + __ LoadExternalPointerField( + api_function_address, + FieldMemOperand(callback, + CallHandlerInfo::kMaybeRedirectedCallbackOffset), + kCallHandlerInfoCallbackTag); + + __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); + } else { + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + } // EnterExitFrame may align the sp. { @@ -3394,7 +3475,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // drop, not the number of bytes. MemOperand stack_space_operand = ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropOnStackSize); - __ AddWord(scratch, argc, Operand(FCA::kArgsLengthWithReceiver)); + __ AddWord(scratch, argc, + Operand(FCA::kArgsLengthWithReceiver + exit_frame_params_size)); __ StoreWord(scratch, stack_space_operand); __ RecordComment("v8::FunctionCallback's argument."); @@ -3403,10 +3485,11 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { DCHECK(!AreAliased(api_function_address, function_callback_info_arg)); - ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(mode); - MemOperand return_value_operand = - ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex); + MemOperand return_value_operand = ExitFrameCallerStackSlotOperand( + FCA::kReturnValueIndex + exit_frame_params_size); // Pass api function address to thunk wrapper in case profiler or side-effect // checking is enabled. @@ -3798,6 +3881,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ LoadTaggedField( code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + + if (is_osr) { + ResetSharedFunctionInfoAge(masm, code_obj); + } + __ LoadTaggedField( code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); @@ -3907,7 +3995,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ LoadWord( kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); Generate_OSREntry(masm, code_obj); } else { __ Jump(code_obj); diff --git a/v8/src/builtins/s390/builtins-s390.cc b/v8/src/builtins/s390/builtins-s390.cc index a70189b70..b5be3c258 100644 --- a/v8/src/builtins/s390/builtins-s390.cc +++ b/v8/src/builtins/s390/builtins-s390.cc @@ -92,13 +92,20 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address, __ Ret(); } -void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array, - Register scratch) { - DCHECK(!AreAliased(bytecode_array, scratch)); - __ mov(r0, Operand(0)); - __ StoreU16( - r0, FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset), - scratch); +void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi, + Register scratch) { + DCHECK(!AreAliased(sfi, scratch)); + __ mov(scratch, Operand(0)); + __ StoreU16(scratch, FieldMemOperand(sfi, SharedFunctionInfo::kAgeOffset), + no_reg); +} + +void ResetJSFunctionAge(MacroAssembler* masm, Register js_function, + Register scratch1, Register scratch2) { + __ LoadTaggedField( + scratch1, + FieldMemOperand(js_function, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, scratch1, scratch2); } void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, @@ -131,6 +138,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ LoadTaggedField( code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + + if (is_osr) { + ResetSharedFunctionInfoAge(masm, code_obj, r5); + } + __ LoadTaggedField( code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); @@ -232,7 +244,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, if (is_osr) { // TODO(pthier): Separate baseline Sparkplug from TF arming and don't // disarm Sparkplug here. - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, r1); Generate_OSREntry(masm, code_obj, Operand(0)); } else { __ Jump(code_obj); @@ -1283,6 +1294,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kCalleeContext); Register callee_js_function = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); + ResetJSFunctionAge(masm, callee_js_function, r1, r0); __ Push(callee_context, callee_js_function); DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); DCHECK_EQ(callee_js_function, kJSFunctionRegister); @@ -1293,7 +1305,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // the frame, so load it into a register. Register bytecodeArray = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - ResetBytecodeAge(masm, bytecodeArray, r1); __ Push(argc, bytecodeArray); @@ -1402,6 +1413,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // kInterpreterBytecodeArrayRegister. __ LoadTaggedField( r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, r6, ip); // Load original bytecode array or the debug copy. __ LoadTaggedField( kInterpreterBytecodeArrayRegister, @@ -1469,8 +1481,6 @@ void Builtins::Generate_InterpreterEntryTrampoline( FrameScope frame_scope(masm, StackFrame::MANUAL); __ PushStandardFrame(closure); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, r1); - // Load the initial bytecode offset. __ mov(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); @@ -2944,6 +2954,10 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { // Only needed on x64. __ Trap(); } + +void Builtins::Generate_NewGenericJSToWasmWrapper(MacroAssembler* masm) { + __ Trap(); +} #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -3410,13 +3424,20 @@ MemOperand ExitFrameCallerStackSlotOperand(int index) { } // namespace -void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { +void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, + CallApiCallbackMode mode) { // ----------- S t a t e ------------- - // -- cp : context + // CallApiCallbackMode::kGeneric mode: + // -- r4 : arguments count (not including the receiver) + // -- r5 : call handler info + // -- r2 : holder + // CallApiCallbackMode::kNoSideEffects/kWithSideEffectsSideEffects modes: // -- r4 : api function address // -- r4 : arguments count (not including the receiver) // -- r5 : call data // -- r2 : holder + // Both modes: + // -- cp // -- sp[0] : receiver // -- sp[8] : first argument // -- ... @@ -3425,12 +3446,33 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { Register function_callback_info_arg = arg_reg_1; - Register api_function_address = r3; - Register argc = r4; - Register call_data = r5; - Register holder = r2; + Register api_function_address = no_reg; + Register argc = no_reg; + Register call_data = no_reg; + Register holder = no_reg; + Register callback = no_reg; Register scratch = r6; - DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch)); + Register scratch2 = r7; + + switch (mode) { + case CallApiCallbackMode::kGeneric: + api_function_address = r3; + argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); + callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + holder = CallApiCallbackGenericDescriptor::HolderRegister(); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + api_function_address = + CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister(); + argc = CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister(); + call_data = CallApiCallbackOptimizedDescriptor::CallDataRegister(); + holder = CallApiCallbackOptimizedDescriptor::HolderRegister(); + break; + } + DCHECK(!AreAliased(api_function_address, argc, holder, call_data, callback, + scratch, scratch2)); using FCI = FunctionCallbackInfo; using FCA = FunctionCallbackArguments; @@ -3475,7 +3517,20 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { MemOperand(sp, FCA::kReturnValueIndex * kSystemPointerSize)); // kData. - __ StoreU64(call_data, MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + switch (mode) { + case CallApiCallbackMode::kGeneric: + __ LoadTaggedField( + scratch2, FieldMemOperand(callback, CallHandlerInfo::kDataOffset)); + __ StoreU64(scratch2, + MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + __ StoreU64(call_data, + MemOperand(sp, FCA::kDataIndex * kSystemPointerSize)); + break; + } // kNewTarget. __ StoreU64(scratch, @@ -3499,9 +3554,36 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { static_assert(FCI::kImplicitArgsOffset == 0); static_assert(FCI::kValuesOffset == 1 * kSystemPointerSize); static_assert(FCI::kLengthOffset == 2 * kSystemPointerSize); + const int exit_frame_params_size = + mode == CallApiCallbackMode::kGeneric ? 2 : 0; FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + if (mode == CallApiCallbackMode::kGeneric) { + ASM_CODE_COMMENT_STRING(masm, "Push API_CALLBACK_EXIT frame arguments"); + __ AllocateStackSpace(exit_frame_params_size * kSystemPointerSize); + + // Argc parameter as a Smi. + static_assert(ApiCallbackExitFrameConstants::kArgcOffset == + 3 * kSystemPointerSize); + __ SmiTag(scratch, argc); + __ StoreU64(scratch, MemOperand(sp, 1 * kSystemPointerSize)); + + // Target parameter. + static_assert(ApiCallbackExitFrameConstants::kTargetOffset == + 2 * kSystemPointerSize); + __ LoadTaggedField( + scratch, + FieldMemOperand(callback, CallHandlerInfo::kOwnerTemplateOffset)); + __ StoreU64(scratch, MemOperand(sp, 0 * kSystemPointerSize)); + + __ LoadU64(api_function_address, + FieldMemOperand( + callback, CallHandlerInfo::kMaybeRedirectedCallbackOffset)); + + __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT); + } else { + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT); + } { ASM_CODE_COMMENT_STRING(masm, "Initialize FunctionCallbackInfo"); @@ -3523,8 +3605,9 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // from the API function here. MemOperand stack_space_operand = ExitFrameStackSlotOperand(FCI::kLengthOffset + kSlotsToDropOnStackSize); - __ mov(scratch, - Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize)); + __ mov(scratch, Operand((FCA::kArgsLength + 1 /* receiver */ + + exit_frame_params_size) * + kSystemPointerSize)); __ ShiftLeftU64(r1, argc, Operand(kSystemPointerSizeLog2)); __ AddS64(scratch, r1); __ StoreU64(scratch, stack_space_operand); @@ -3535,13 +3618,14 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { DCHECK(!AreAliased(api_function_address, function_callback_info_arg)); - ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(mode); // Pass api function address to thunk wrapper in case profiler or side-effect // checking is enabled. Register thunk_arg = api_function_address; - MemOperand return_value_operand = - ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex); + MemOperand return_value_operand = ExitFrameCallerStackSlotOperand( + FCA::kReturnValueIndex + exit_frame_params_size); static constexpr int kUseStackSpaceOperand = 0; CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, thunk_arg, diff --git a/v8/src/builtins/set-intersection.tq b/v8/src/builtins/set-intersection.tq new file mode 100644 index 000000000..ca602d4ed --- /dev/null +++ b/v8/src/builtins/set-intersection.tq @@ -0,0 +1,165 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/objects/ordered-hash-table.h' + +namespace collections { + +// https://tc39.es/proposal-set-methods/#sec-set.prototype.intersection +transitioning javascript builtin SetPrototypeIntersection( + js-implicit context: NativeContext, receiver: JSAny)(other: JSAny): JSSet { + const methodName: constexpr string = 'Set.prototype.intersection'; + const fastIteratorResultMap = GetIteratorResultMap(); + + // 1. Let O be the this value. + // 2. Perform ? RequireInternalSlot(O, [[SetData]]). + const o = Cast(receiver) otherwise + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver); + + const table = Cast(o.table) otherwise unreachable; + + // 3. Let otherRec be ? GetSetRecord(other). + let otherRec = GetSetRecord(other, methodName); + + // 4. Let resultSetData be a new empty List. + let resultSetData = AllocateOrderedHashSet(); + + // 5. Let thisSize be the number of elements in O.[[SetData]]. + const thisSize = + LoadOrderedHashTableMetadata(table, kOrderedHashSetNumberOfElementsIndex); + + try { + typeswitch (other) { + case (otherSet: JSSetWithNoCustomIteration): { + CheckSetRecordHasJSSetMethods(otherRec) otherwise SlowPath; + + const otherTable = + Cast(otherSet.table) otherwise unreachable; + + const otherSize = LoadOrderedHashTableMetadata( + otherTable, kOrderedHashSetNumberOfElementsIndex); + + if (thisSize <= otherSize) { + resultSetData = FastIntersect( + table, otherTable, methodName, resultSetData); + goto Done; + + } else { + resultSetData = FastIntersect( + otherTable, table, methodName, resultSetData); + goto Done; + } + } + case (otherMap: JSMapWithNoCustomIteration): { + CheckSetRecordHasJSMapMethods(otherRec) otherwise SlowPath; + + const otherTable = + Cast(otherMap.table) otherwise unreachable; + + const otherSize = LoadOrderedHashTableMetadata( + otherTable, kOrderedHashSetNumberOfElementsIndex); + + if (thisSize <= otherSize) { + resultSetData = FastIntersect( + table, otherTable, methodName, resultSetData); + goto Done; + + } else { + let otherIterator = + collections::NewUnmodifiedOrderedHashMapIterator(otherTable); + + while (true) { + const nextValue = otherIterator.Next() otherwise Done; + + if (TableHasKey(table, nextValue.key)) { + resultSetData = + AddToSetTable(resultSetData, nextValue.key, methodName); + } + } + } + } + case (JSAny): { + goto SlowPath; + } + } + } label SlowPath { + // 6. If thisSize ≤ otherRec.[[Size]], then + if (thisSize <= Convert(otherRec.size)) { + // a. Let index be 0. + let thisIter = collections::NewOrderedHashSetIterator(table); + + // b. Repeat, while index < thisSize, + while (true) { + // i. Let e be O.[[SetData]][index]. + const key = thisIter.Next() otherwise Done; + + // ii. Set index to index + 1. + // iii. If e is not empty, then + // 1. Let inOther be ToBoolean(? Call(otherRec.[[Has]], + // otherRec.[[Set]], « e »)). + const inOther = + ToBoolean(Call(context, otherRec.has, otherRec.object, key)); + + // 2. If inOther is true, then + if (inOther) { + // a. NOTE: It is possible for earlier calls to otherRec.[[Has]] to + // remove and re-add an element of O.[[SetData]], which can cause the + // same element to be visited twice during this iteration. + // We used `OrderedHashSetIterator` that works when underlying table + // is changed. + // b. Let alreadyInResult be SetDataHas(resultSetData, e). + // c. If alreadyInResult is false, then + // i. Append e to resultSetData. + resultSetData = AddToSetTable(resultSetData, key, methodName); + } + + // 3. NOTE: The number of elements in O.[[SetData]] may have increased + // during execution of otherRec.[[Has]]. + // 4. Set thisSize to the number of elements of O.[[SetData]]. + // We used iterator so we do not need to update thisSize and index. + } + } else { + // a. Let keysIter be ? GetKeysIterator(otherRec). + let keysIter = + GetKeysIterator(otherRec.object, UnsafeCast(otherRec.keys)); + + // b. Let next be true. + let nextRecord: JSReceiver; + + // c. Repeat, while next is not false, + while (true) { + // i. Set next to ? IteratorStep(keysIter). + nextRecord = iterator::IteratorStep(keysIter, fastIteratorResultMap) + otherwise Done; + + // ii. If next is not false, then + // 1. Let nextValue be ? IteratorValue(next). + const nextValue = + iterator::IteratorValue(nextRecord, fastIteratorResultMap); + + // 2. If nextValue is -0𝔽, set nextValue to +0𝔽. + // 3. NOTE: Because other is an arbitrary object, it is possible for its + // "keys" iterator to produce the same value more than once. + // 4. Let alreadyInResult be SetDataHas(resultSetData, nextValue). + // 5. Let inThis be SetDataHas(O.[[SetData]], nextValue). + + if (TableHasKey(table, nextValue)) { + // 6. If alreadyInResult is false and inThis is true, then + // a. Append nextValue to resultSetData. + resultSetData = AddToSetTable(resultSetData, nextValue, methodName); + } + } + } + } label Done { + return new JSSet{ + map: *NativeContextSlot(ContextSlot::JS_SET_MAP_INDEX), + properties_or_hash: kEmptyFixedArray, + elements: kEmptyFixedArray, + table: resultSetData + }; + } + unreachable; +} +} diff --git a/v8/src/builtins/set-union.tq b/v8/src/builtins/set-union.tq new file mode 100644 index 000000000..c20cd37f3 --- /dev/null +++ b/v8/src/builtins/set-union.tq @@ -0,0 +1,100 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace collections { + +// https://tc39.es/proposal-set-methods/#sec-set.prototype.union +transitioning javascript builtin SetPrototypeUnion( + js-implicit context: NativeContext, receiver: JSAny)(other: JSAny): JSSet { + const methodName: constexpr string = 'Set.prototype.union'; + const fastIteratorResultMap = GetIteratorResultMap(); + + // 1. Let O be the this value. + // 2. Perform ? RequireInternalSlot(O, [[SetData]]). + const o = Cast(receiver) otherwise + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, methodName, receiver); + + const table = Cast(o.table) otherwise unreachable; + + // 3. Let otherRec be ? GetSetRecord(other). + let otherRec = GetSetRecord(other, methodName); + + // 5. Let resultSetData be a copy of O.[[SetData]]. + let resultSetData = Cast(CloneFixedArray( + table, ExtractFixedArrayFlag::kFixedArrays)) otherwise unreachable; + + try { + typeswitch (other) { + case (otherSet: JSSetWithNoCustomIteration): { + CheckSetRecordHasJSSetMethods(otherRec) otherwise SlowPath; + + const otherTable = + Cast(otherSet.table) otherwise unreachable; + + let otherIterator = + collections::NewUnmodifiedOrderedHashSetIterator(otherTable); + + while (true) { + const nextValue = otherIterator.Next() otherwise Done; + resultSetData = AddToSetTable(resultSetData, nextValue, methodName); + } + } + case (otherMap: JSMapWithNoCustomIteration): { + CheckSetRecordHasJSMapMethods(otherRec) otherwise SlowPath; + + const otherTable = + Cast(otherMap.table) otherwise unreachable; + + let otherIterator = + collections::NewUnmodifiedOrderedHashMapIterator(otherTable); + + while (true) { + const nextValue = otherIterator.Next() otherwise Done; + resultSetData = + AddToSetTable(resultSetData, nextValue.key, methodName); + } + } + case (JSAny): { + goto SlowPath; + } + } + } label SlowPath { + // 4. Let keysIter be ? GetKeysIterator(otherRec). + let keysIter = + GetKeysIterator(otherRec.object, UnsafeCast(otherRec.keys)); + + // 6. Let next be true. + let nextRecord: JSReceiver; + // 7. Repeat, while next is not false, + while (true) { + // a. Set next to ? IteratorStep(keysIter). + nextRecord = iterator::IteratorStep(keysIter, fastIteratorResultMap) + otherwise Done; + + // b. If next is not false, then + // i. Let nextValue be ? IteratorValue(next). + const nextValue = + iterator::IteratorValue(nextRecord, fastIteratorResultMap); + + // ii. If nextValue is -0𝔽, set nextValue to +0𝔽. + // iii. If SetDataHas(resultSetData, nextValue) is false, then + // 1. Append nextValue to resultSetData. + resultSetData = AddToSetTable(resultSetData, nextValue, methodName); + } + } label Done { + // 8. Let result be + // OrdinaryObjectCreate(%Set.prototype%, « [[SetData]]»). + // 9. Set result.[[SetData]] to resultSetData. + // 10. Return result. + return new JSSet{ + map: *NativeContextSlot(ContextSlot::JS_SET_MAP_INDEX), + properties_or_hash: kEmptyFixedArray, + elements: kEmptyFixedArray, + table: resultSetData + }; + } + unreachable; +} +} diff --git a/v8/src/builtins/setup-builtins-internal.cc b/v8/src/builtins/setup-builtins-internal.cc index 3dc08bf40..cbf64c42e 100644 --- a/v8/src/builtins/setup-builtins-internal.cc +++ b/v8/src/builtins/setup-builtins-internal.cc @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include + #include "src/builtins/builtins.h" #include "src/builtins/profile-data-reader.h" #include "src/codegen/assembler-inl.h" @@ -79,7 +81,7 @@ using CodeAssemblerGenerator = void (*)(compiler::CodeAssemblerState*); Handle BuildPlaceholder(Isolate* isolate, Builtin builtin) { HandleScope scope(isolate); - byte buffer[kBufferSize]; + uint8_t buffer[kBufferSize]; MacroAssembler masm(isolate, CodeObjectRequired::kYes, ExternalAssemblerBuffer(buffer, kBufferSize)); DCHECK(!masm.has_frame()); @@ -103,7 +105,7 @@ Code BuildWithMacroAssembler(Isolate* isolate, Builtin builtin, MacroAssemblerGenerator generator, const char* s_name) { HandleScope scope(isolate); - byte buffer[kBufferSize]; + uint8_t buffer[kBufferSize]; MacroAssembler masm(isolate, BuiltinAssemblerOptions(isolate, builtin), CodeObjectRequired::kYes, @@ -124,6 +126,14 @@ Code BuildWithMacroAssembler(Isolate* isolate, Builtin builtin, HandlerTable::EmitReturnEntry( &masm, 0, isolate->builtins()->js_entry_handler_offset()); } +#if V8_ENABLE_WEBASSEMBLY && (V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64) + // TODO(v8:12191): Enable on all platforms once the builtin has been ported. + if (builtin == Builtin::kWasmReturnPromiseOnSuspend) { + handler_table_offset = HandlerTable::EmitReturnTableStart(&masm); + HandlerTable::EmitReturnEntry( + &masm, 0, isolate->builtins()->jspi_prompt_handler_offset()); + } +#endif CodeDesc desc; masm.GetCode(isolate, &desc, MacroAssembler::kNoSafepointTable, @@ -142,7 +152,7 @@ Code BuildWithMacroAssembler(Isolate* isolate, Builtin builtin, Code BuildAdaptor(Isolate* isolate, Builtin builtin, Address builtin_address, const char* name) { HandleScope scope(isolate); - byte buffer[kBufferSize]; + uint8_t buffer[kBufferSize]; MacroAssembler masm(isolate, BuiltinAssemblerOptions(isolate, builtin), CodeObjectRequired::kYes, ExternalAssemblerBuffer(buffer, kBufferSize)); @@ -283,6 +293,11 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) { Builtins* builtins = isolate->builtins(); DCHECK(!builtins->initialized_); + if (v8_flags.dump_builtins_hashes_to_file) { + // Create an empty file. + std::ofstream(v8_flags.dump_builtins_hashes_to_file, std::ios_base::trunc); + } + PopulateWithPlaceholders(isolate); // Create a scope for the handles in the builtins. diff --git a/v8/src/builtins/torque-internal.tq b/v8/src/builtins/torque-internal.tq index 5baa32cd2..c97a4edab 100644 --- a/v8/src/builtins/torque-internal.tq +++ b/v8/src/builtins/torque-internal.tq @@ -48,6 +48,11 @@ struct Unsafe {} // of the pointer, not of the instance. intrinsic %SizeOf(): constexpr int31; +// `SizeOf` without the `%` to allow uses outside of `torque_internal`. +macro SizeOf(): constexpr int31 { + return %SizeOf(); +} + macro TimesSizeOf(i: intptr): intptr { return i * %SizeOf(); } @@ -184,6 +189,16 @@ macro NewConstSlice( }); } +macro NewOffHeapMutableSlice( + startPointer: RawPtr, length: intptr): MutableSlice { + return %RawDownCast>(Slice{ + object: kZeroBitPattern, + offset: Convert(Convert(startPointer)) + kHeapObjectTag, + length: length, + unsafeMarker: Unsafe {} + }); +} + macro NewOffHeapConstSlice( startPointer: RawPtr, length: intptr): ConstSlice { return %RawDownCast>(Slice{ diff --git a/v8/src/builtins/typed-array-createtypedarray.tq b/v8/src/builtins/typed-array-createtypedarray.tq index c2fee1a51..a917d701a 100644 --- a/v8/src/builtins/typed-array-createtypedarray.tq +++ b/v8/src/builtins/typed-array-createtypedarray.tq @@ -10,7 +10,6 @@ extern builtin IterableToListMayPreserveHoles( extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( implicit context: Context)(): JSArrayBuffer; -extern macro CodeStubAssembler::AllocateByteArray(uintptr): ByteArray; extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor( implicit context: Context)(JSTypedArray): JSFunction; extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields( diff --git a/v8/src/builtins/typed-array-every.tq b/v8/src/builtins/typed-array-every.tq index fa1b7ab61..2bd3089ab 100644 --- a/v8/src/builtins/typed-array-every.tq +++ b/v8/src/builtins/typed-array-every.tq @@ -71,7 +71,7 @@ TypedArrayPrototypeEvery( } label IsDetachedOrOutOfBounds deferred { ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameEvery); } label NotCallable deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/typed-array-filter.tq b/v8/src/builtins/typed-array-filter.tq index 736dff0af..8fcd7f8e0 100644 --- a/v8/src/builtins/typed-array-filter.tq +++ b/v8/src/builtins/typed-array-filter.tq @@ -22,7 +22,7 @@ transitioning javascript builtin TypedArrayPrototypeFilter( otherwise IsDetachedOrOutOfBounds; // 4. If IsCallable(callbackfn) is false, throw a TypeError exception. const callbackfn = Cast(arguments[0]) - otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + otherwise ThrowCalledNonCallable(arguments[0]); // 5. If thisArg is present, let T be thisArg; else let T be undefined. const thisArg: JSAny = arguments[1]; diff --git a/v8/src/builtins/typed-array-find.tq b/v8/src/builtins/typed-array-find.tq index c2456f926..2804eb372 100644 --- a/v8/src/builtins/typed-array-find.tq +++ b/v8/src/builtins/typed-array-find.tq @@ -69,7 +69,7 @@ TypedArrayPrototypeFind( const thisArg = arguments[1]; return FindAllElements(attachedArrayAndLength, predicate, thisArg); } label NotCallable deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFind); } label IsDetachedOrOutOfBounds deferred { diff --git a/v8/src/builtins/typed-array-findindex.tq b/v8/src/builtins/typed-array-findindex.tq index 6a6300814..96d1f97ef 100644 --- a/v8/src/builtins/typed-array-findindex.tq +++ b/v8/src/builtins/typed-array-findindex.tq @@ -63,7 +63,7 @@ TypedArrayPrototypeFindIndex( const thisArg = arguments[1]; return FindIndexAllElements(attachedArrayAndLength, predicate, thisArg); } label NotCallable deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindIndex); } label IsDetachedOrOutOfBounds deferred { diff --git a/v8/src/builtins/typed-array-findlast.tq b/v8/src/builtins/typed-array-findlast.tq index 45695f83a..b6220603c 100644 --- a/v8/src/builtins/typed-array-findlast.tq +++ b/v8/src/builtins/typed-array-findlast.tq @@ -68,7 +68,7 @@ TypedArrayPrototypeFindLast( const thisArg = arguments[1]; return FindLastAllElements(attachedArrayAndLength, predicate, thisArg); } label NotCallable deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindLast); } label IsDetachedOrOutOfBounds deferred { diff --git a/v8/src/builtins/typed-array-findlastindex.tq b/v8/src/builtins/typed-array-findlastindex.tq index 1edee5444..e7736d22b 100644 --- a/v8/src/builtins/typed-array-findlastindex.tq +++ b/v8/src/builtins/typed-array-findlastindex.tq @@ -68,7 +68,7 @@ TypedArrayPrototypeFindLastIndex( const thisArg = arguments[1]; return FindLastIndexAllElements(attachedArrayAndLength, predicate, thisArg); } label NotCallable deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameFindLastIndex); } label IsDetachedOrOutOfBounds deferred { diff --git a/v8/src/builtins/typed-array-foreach.tq b/v8/src/builtins/typed-array-foreach.tq index 45b949b4e..5dee6accc 100644 --- a/v8/src/builtins/typed-array-foreach.tq +++ b/v8/src/builtins/typed-array-foreach.tq @@ -63,7 +63,7 @@ TypedArrayPrototypeForEach(js-implicit context: NativeContext, receiver: JSAny)( const thisArg = arguments[1]; return ForEachAllElements(attachedArrayAndLength, callbackfn, thisArg); } label NotCallable deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameForEach); } label IsDetachedOrOutOfBounds deferred { diff --git a/v8/src/builtins/typed-array-from.tq b/v8/src/builtins/typed-array-from.tq index eba7c87ea..55fee2ad0 100644 --- a/v8/src/builtins/typed-array-from.tq +++ b/v8/src/builtins/typed-array-from.tq @@ -7,7 +7,6 @@ namespace typed_array { const kBuiltinNameFrom: constexpr string = '%TypedArray%.from'; -type BuiltinsName extends int31 constexpr 'Builtin'; const kTypedArrayPrototypeValues: constexpr BuiltinsName generates 'Builtin::kTypedArrayPrototypeValues'; const kArrayPrototypeValues: constexpr BuiltinsName @@ -35,7 +34,7 @@ TypedArrayFrom(js-implicit context: NativeContext, receiver: JSAny)( // b. Let mapping be true. const mapping: bool = mapfnObj != Undefined; if (mapping && !Is(mapfnObj)) deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, mapfnObj); + ThrowCalledNonCallable(mapfnObj); } // We split up this builtin differently to the way it is written in the diff --git a/v8/src/builtins/typed-array-reduce.tq b/v8/src/builtins/typed-array-reduce.tq index b231b1ff9..b429c137c 100644 --- a/v8/src/builtins/typed-array-reduce.tq +++ b/v8/src/builtins/typed-array-reduce.tq @@ -65,7 +65,7 @@ TypedArrayPrototypeReduce( const initialValue = arguments.length >= 2 ? arguments[1] : TheHole; return ReduceAllElements(attachedArrayAndLength, callbackfn, initialValue); } label NotCallable deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameReduce); } label IsDetachedOrOutOfBounds deferred { diff --git a/v8/src/builtins/typed-array-reduceright.tq b/v8/src/builtins/typed-array-reduceright.tq index 36f14a1b6..9ab569977 100644 --- a/v8/src/builtins/typed-array-reduceright.tq +++ b/v8/src/builtins/typed-array-reduceright.tq @@ -69,7 +69,7 @@ TypedArrayPrototypeReduceRight( return ReduceRightAllElements( attachedArrayAndLength, callbackfn, initialValue); } label NotCallable deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } label NotTypedArray deferred { ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameReduceRight); } label IsDetachedOrOutOfBounds deferred { diff --git a/v8/src/builtins/typed-array-some.tq b/v8/src/builtins/typed-array-some.tq index bb2d951ec..b9822542c 100644 --- a/v8/src/builtins/typed-array-some.tq +++ b/v8/src/builtins/typed-array-some.tq @@ -73,7 +73,7 @@ TypedArrayPrototypeSome( } label IsDetachedOrOutOfBounds deferred { ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameSome); } label NotCallable deferred { - ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]); + ThrowCalledNonCallable(arguments[0]); } } } diff --git a/v8/src/builtins/wasm.tq b/v8/src/builtins/wasm.tq index 81550b586..d20f66ac3 100644 --- a/v8/src/builtins/wasm.tq +++ b/v8/src/builtins/wasm.tq @@ -71,7 +71,6 @@ extern runtime WasmJSToWasmObject(Context, JSAny, Smi): JSAny; } namespace unsafe { -extern macro TimesTaggedSize(intptr): intptr; extern macro Allocate(intptr, constexpr AllocationFlag): HeapObject; } @@ -122,7 +121,7 @@ builtin WasmFuncRefToJS(implicit context: Context)(val: WasmInternalFunction| } } -builtin WasmTaggedNonSmiToInt32(implicit context: Context)(val: JSAnyNotSmi): +builtin WasmTaggedNonSmiToInt32(implicit context: Context)(val: HeapObject): int32 { return ChangeTaggedNonSmiToInt32(val); } @@ -131,6 +130,10 @@ builtin WasmTaggedToFloat64(implicit context: Context)(val: JSAny): float64 { return ChangeTaggedToFloat64(val); } +builtin WasmTaggedToFloat32(implicit context: Context)(val: JSAny): float32 { + return TruncateFloat64ToFloat32(ChangeTaggedToFloat64(val)); +} + builtin WasmMemoryGrow(numPages: int32): int32 { if (!IsValidPositiveSmi(ChangeInt32ToIntPtr(numPages))) return Int32Constant(-1); @@ -402,17 +405,21 @@ builtin WasmAllocateArray_Uninitialized( } builtin WasmArrayNewSegment( - segmentIndex: uint32, offset: uint32, length: uint32, rtt: Map): Object { + segmentIndex: uint32, offset: uint32, length: uint32, isElement: Smi, + rtt: Map): Object { const instance = LoadInstanceFromFrame(); try { - const smiOffset = - Convert(offset) otherwise ElementSegmentOutOfBounds; + const smiOffset = Convert(offset) otherwise SegmentOutOfBounds; const smiLength = Convert(length) otherwise ArrayTooLarge; tail runtime::WasmArrayNewSegment( LoadContextFromInstance(instance), instance, SmiFromUint32(segmentIndex), smiOffset, smiLength, rtt); - } label ElementSegmentOutOfBounds { - tail ThrowWasmTrapElementSegmentOutOfBounds(); + } label SegmentOutOfBounds { + if (isElement == SmiConstant(0)) { + tail ThrowWasmTrapDataSegmentOutOfBounds(); + } else { + tail ThrowWasmTrapElementSegmentOutOfBounds(); + } } label ArrayTooLarge { tail ThrowWasmTrapArrayTooLarge(); } @@ -421,7 +428,7 @@ builtin WasmArrayNewSegment( // {segmentIndex} has to be tagged as a possible stack parameter. builtin WasmArrayInitSegment( arrayIndex: uint32, segmentOffset: uint32, length: uint32, - segmentIndex: Smi, arrayRaw: HeapObject): JSAny { + segmentIndex: Smi, isElement: Smi, arrayRaw: HeapObject): JSAny { const instance = LoadInstanceFromFrame(); if (arrayRaw == kWasmNull) { tail ThrowWasmTrapNullDereference(); @@ -431,14 +438,18 @@ builtin WasmArrayInitSegment( const smiArrayIndex = Convert(arrayIndex) otherwise ArrayOutOfBounds; const smiOffset = - Convert(segmentOffset) otherwise ElementSegmentOutOfBounds; + Convert(segmentOffset) otherwise SegmentOutOfBounds; const smiLength = Convert(length) otherwise ArrayOutOfBounds; tail runtime::WasmArrayInitSegment( LoadContextFromInstance(instance), instance, segmentIndex, array, smiArrayIndex, smiOffset, smiLength); - } label ElementSegmentOutOfBounds { - tail ThrowWasmTrapElementSegmentOutOfBounds(); + } label SegmentOutOfBounds { + if (isElement == SmiConstant(0)) { + tail ThrowWasmTrapDataSegmentOutOfBounds(); + } else { + tail ThrowWasmTrapElementSegmentOutOfBounds(); + } } label ArrayOutOfBounds { tail ThrowWasmTrapArrayOutOfBounds(); } @@ -525,6 +536,7 @@ builtin UintPtr53ToNumber(value: uintptr): Number { } extern builtin I64ToBigInt(intptr): BigInt; +extern builtin I32PairToBigInt(/*low*/ intptr, /*high*/ intptr): BigInt; builtin WasmAtomicNotify(offset: uintptr, count: uint32): uint32 { const instance: WasmInstanceObject = LoadInstanceFromFrame(); @@ -1367,4 +1379,5 @@ builtin WasmExternInternalize(externObject: JSAny): JSAny { tail runtime::WasmJSToWasmObject( context, externObject, SmiConstant(kAnyType)); } -} + +} // namespace wasm diff --git a/v8/src/builtins/x64/builtins-x64.cc b/v8/src/builtins/x64/builtins-x64.cc index d9a2359f4..00b91156c 100644 --- a/v8/src/builtins/x64/builtins-x64.cc +++ b/v8/src/builtins/x64/builtins-x64.cc @@ -7,6 +7,7 @@ #include "src/api/api-arguments.h" #include "src/base/bits-iterator.h" #include "src/base/iterator.h" +#include "src/builtins/builtins-descriptors.h" #include "src/codegen/code-factory.h" #include "src/codegen/interface-descriptors-inl.h" // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. @@ -994,9 +995,16 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, namespace { -void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array) { - __ movw(FieldOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset), - Immediate(0)); +void ResetSharedFunctionInfoAge(MacroAssembler* masm, Register sfi) { + __ movw(FieldOperand(sfi, SharedFunctionInfo::kAgeOffset), Immediate(0)); +} + +void ResetJSFunctionAge(MacroAssembler* masm, Register js_function) { + const Register shared_function_info(kScratchRegister); + __ LoadTaggedField( + shared_function_info, + FieldOperand(js_function, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, shared_function_info); } void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, @@ -1031,10 +1039,11 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Get the bytecode array from the function object and load it into // kInterpreterBytecodeArrayRegister. - const TaggedRegister shared_function_info(kScratchRegister); + const Register shared_function_info(kScratchRegister); __ LoadTaggedField( shared_function_info, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + ResetSharedFunctionInfoAge(masm, shared_function_info); __ LoadTaggedField(kInterpreterBytecodeArrayRegister, FieldOperand(shared_function_info, SharedFunctionInfo::kFunctionDataOffset)); @@ -1095,8 +1104,6 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ Push(kJavaScriptCallTargetRegister); // Callee's JS function. __ Push(kJavaScriptCallArgCountRegister); // Actual argument count. - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); - // Load initial bytecode offset. __ Move(kInterpreterBytecodeOffsetRegister, BytecodeArray::kHeaderSize - kHeapObjectTag); @@ -1606,6 +1613,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kClosure); DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); DCHECK_EQ(callee_js_function, kJSFunctionRegister); + ResetJSFunctionAge(masm, callee_js_function); __ Push(callee_js_function); // Callee's JS function. __ Push(descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor:: @@ -1616,7 +1624,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // onto the frame, so load it into a register. Register bytecode_array = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - ResetBytecodeAge(masm, bytecode_array); __ Push(bytecode_array); // Baseline code frames store the feedback vector where interpreter would @@ -3047,9 +3054,8 @@ void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_continuation) { LoadJumpBuffer(masm, target_jmpbuf, false); } -void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance, - Register return_reg, Register tmp1, - Register tmp2) { +void ReloadParentContinuation(MacroAssembler* masm, Register promise, + Register tmp1, Register tmp2) { Register active_continuation = tmp1; __ LoadRoot(active_continuation, RootIndex::kActiveContinuation); @@ -3080,12 +3086,10 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance, MemOperand GCScanSlotPlace = MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset); __ Move(GCScanSlotPlace, 1); - __ Push(return_reg); - __ Push(wasm_instance); // Spill. __ Move(kContextRegister, Smi::zero()); + __ Push(promise); __ CallRuntime(Runtime::kWasmSyncStackLimit); - __ Pop(wasm_instance); - __ Pop(return_reg); + __ Pop(promise); } void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, @@ -3796,8 +3800,31 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Label return_done; __ bind(&return_done); + + Label return_promise; if (stack_switch) { - ReloadParentContinuation(masm, wasm_instance, return_reg, rbx, rcx); + // The return value of the wasm function becomes the parameter of the + // FulfillPromise builtin, and the promise is the return value of this + // wrapper. + __ movq(rbx, return_reg); + Register promise = rax; + __ LoadRoot(promise, RootIndex::kActiveSuspender); + __ LoadTaggedField( + promise, FieldOperand(promise, WasmSuspenderObject::kPromiseOffset)); + __ movq(kContextRegister, MemOperand(rbp, kFunctionDataOffset)); + __ LoadTaggedField(kContextRegister, + FieldOperand(kContextRegister, + WasmExportedFunctionData::kInstanceOffset)); + __ LoadTaggedField(kContextRegister, + FieldOperand(kContextRegister, + WasmInstanceObject::kNativeContextOffset)); + __ Move(MemOperand(rbp, kGCScanSlotCountOffset), 1); + __ Push(promise); + __ CallBuiltin(Builtin::kFulfillPromise); + __ Pop(promise); + + __ bind(&return_promise); + ReloadParentContinuation(masm, promise, rbx, rcx); RestoreParentSuspender(masm, rbx, rcx); } __ bind(&suspend); @@ -4049,13 +4076,12 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // Save registers to the stack. __ pushq(wasm_instance); __ pushq(function_data); - // Push the arguments for the runtime call. - __ Push(wasm_instance); // first argument - __ Push(function_data); // second argument + // Push the argument for the runtime call. + __ Push(function_data); // argument // Set up context. __ Move(kContextRegister, Smi::zero()); // Call the runtime function that kicks off compilation. - __ CallRuntime(Runtime::kWasmCompileWrapper, 2); + __ CallRuntime(Runtime::kWasmCompileWrapper, 1); // Pop the result. __ movq(r9, kReturnRegister0); // Restore registers from the stack. @@ -4063,6 +4089,54 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { __ popq(wasm_instance); __ jmp(&compile_wrapper_done); } + + // Catch handler for the stack-switching wrapper: reject the promise with the + // thrown exception. + if (stack_switch) { + int catch_handler = __ pc_offset(); + // Restore rsp to free the reserved stack slots for the sections. + __ leaq(rsp, MemOperand(rbp, kLastSpillOffset)); + + // Unset thread_in_wasm_flag. + thread_in_wasm_flag_addr = r8; + __ movq(thread_in_wasm_flag_addr, + MemOperand(kRootRegister, + Isolate::thread_in_wasm_flag_address_offset())); + __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(0)); + thread_in_wasm_flag_addr = no_reg; + + // The exception becomes the parameter of the RejectPromise builtin, and the + // promise is the return value of this wrapper. + __ movq(rbx, kReturnRegister0); + Register promise = rax; + __ LoadRoot(promise, RootIndex::kActiveSuspender); + __ LoadTaggedField( + promise, FieldOperand(promise, WasmSuspenderObject::kPromiseOffset)); + __ movq(kContextRegister, MemOperand(rbp, kFunctionDataOffset)); + __ LoadTaggedField(kContextRegister, + FieldOperand(kContextRegister, + WasmExportedFunctionData::kInstanceOffset)); + __ LoadTaggedField(kContextRegister, + FieldOperand(kContextRegister, + WasmInstanceObject::kNativeContextOffset)); + + __ Move(MemOperand(rbp, kGCScanSlotCountOffset), 1); + __ Push(promise); + static const Builtin_RejectPromise_InterfaceDescriptor desc; + static_assert(desc.GetRegisterParameter(0) == rax && // promise + desc.GetRegisterParameter(1) == rbx && // reason + desc.GetRegisterParameter(2) == rcx // debugEvent + ); + __ LoadRoot(rcx, RootIndex::kTrueValue); + __ CallBuiltin(Builtin::kRejectPromise); + __ Pop(promise); + + // Run the rest of the wrapper normally (switch to the old stack, + // deconstruct the frame, ...). + __ jmp(&return_promise); + + masm->isolate()->builtins()->SetJSPIPromptHandlerOffset(catch_handler); + } } } // namespace @@ -4074,12 +4148,132 @@ void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { GenericJSToWasmWrapperHelper(masm, true); } +void Builtins::Generate_NewGenericJSToWasmWrapper(MacroAssembler* masm) { + __ EnterFrame(StackFrame::JS_TO_WASM); + Register wrapper_buffer = + WasmNewJSToWasmWrapperDescriptor::WrapperBufferRegister(); + // Push the wrapper_buffer stack, it's needed later for the results. + __ pushq(wrapper_buffer); + Register result_size = rax; + __ movq(result_size, + MemOperand( + wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferStackReturnBufferSize)); + __ shlq(result_size, Immediate(kSystemPointerSizeLog2)); + __ subq(rsp, result_size); + __ movq(MemOperand( + wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferStackReturnBufferStart), + rsp); + Register call_target = rdi; + // param_start should not alias with any parameter registers. + Register params_start = r11; + __ movq(params_start, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferParamStart)); + Register params_end = rbx; + __ movq(params_end, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferParamEnd)); + __ movq(call_target, + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferCallTarget)); + + Register last_stack_param = rcx; + + // The first GP parameter is the instance, which we handle specially. + int stack_params_offset = + (arraysize(wasm::kGpParamRegisters) - 1) * kSystemPointerSize + + arraysize(wasm::kFpParamRegisters) * kDoubleSize; + + __ leaq(last_stack_param, MemOperand(params_start, stack_params_offset)); + + Label loop_start; + __ bind(&loop_start); + + Label finish_stack_params; + __ cmpq(last_stack_param, params_end); + __ j(greater_equal, &finish_stack_params); + + // Push parameter + __ subq(params_end, Immediate(kSystemPointerSize)); + __ pushq(MemOperand(params_end, 0)); + __ jmp(&loop_start); + + __ bind(&finish_stack_params); + + int next_offset = 0; + for (size_t i = 1; i < arraysize(wasm::kGpParamRegisters); ++i) { + // Check that {params_start} does not overlap with any of the parameter + // registers, so that we don't overwrite it by accident with the loads + // below. + DCHECK_NE(params_start, wasm::kGpParamRegisters[i]); + __ movq(wasm::kGpParamRegisters[i], MemOperand(params_start, next_offset)); + next_offset += kSystemPointerSize; + } + + for (size_t i = 0; i < arraysize(wasm::kFpParamRegisters); ++i) { + __ Movsd(wasm::kFpParamRegisters[i], MemOperand(params_start, next_offset)); + next_offset += kDoubleSize; + } + DCHECK_EQ(next_offset, stack_params_offset); + + __ movq(kWasmInstanceRegister, + MemOperand(rbp, JSToWasmWrapperConstants::kInstanceOffset)); + + Register thread_in_wasm_flag_addr = r12; + __ movq( + thread_in_wasm_flag_addr, + MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); + __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1)); + + __ call(call_target); + + __ movq( + thread_in_wasm_flag_addr, + MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); + __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(0)); + thread_in_wasm_flag_addr = no_reg; + + wrapper_buffer = rcx; + for (size_t i = 0; i < arraysize(wasm::kGpReturnRegisters); ++i) { + DCHECK_NE(wrapper_buffer, wasm::kGpReturnRegisters[i]); + } + __ movq(wrapper_buffer, MemOperand(rbp, -2 * kSystemPointerSize)); + + __ Movsd( + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferFPReturnRegister1), + wasm::kFpReturnRegisters[0]); + __ Movsd( + MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferFPReturnRegister2), + wasm::kFpReturnRegisters[1]); + __ movq(MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferGPReturnRegister1), + wasm::kGpReturnRegisters[0]); + __ movq(MemOperand(wrapper_buffer, + JSToWasmWrapperConstants::kWrapperBufferGPReturnRegister2), + wasm::kGpReturnRegisters[1]); + + // Call the return value builtin with + // rax: wasm instance. + // rbx: the result JSArray for multi-return. + // rcx: pointer to the byte buffer which contains all parameters. + __ movq(rbx, MemOperand(rbp, JSToWasmWrapperConstants::kResultArrayOffset)); + __ movq(rax, MemOperand(rbp, JSToWasmWrapperConstants::kInstanceOffset)); + __ Call(BUILTIN_CODE(masm->isolate(), JSToWasmHandleReturns), + RelocInfo::CODE_TARGET); + + __ LeaveFrame(StackFrame::JS_TO_WASM); + __ ret(0); +} + void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { // Set up the stackframe. __ EnterFrame(StackFrame::STACK_SWITCH); - Register promise = rax; - Register suspender = rbx; + Register suspender = rax; __ subq(rsp, Immediate(-(BuiltinWasmWrapperConstants::kGCScanSlotCountOffset - TypedFrameConstants::kFixedFrameSizeFromFp))); @@ -4149,18 +4343,20 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { MemOperand GCScanSlotPlace = MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset); __ Move(GCScanSlotPlace, 2); - __ Push(promise); __ Push(caller); + __ Push(suspender); __ Move(kContextRegister, Smi::zero()); __ CallRuntime(Runtime::kWasmSyncStackLimit); + __ Pop(suspender); __ Pop(caller); - __ Pop(promise); jmpbuf = caller; __ LoadExternalPointerField( jmpbuf, FieldOperand(caller, WasmContinuationObject::kJmpbufOffset), kWasmContinuationJmpbufTag, r8); caller = no_reg; - __ movq(kReturnRegister0, promise); + __ LoadTaggedField( + kReturnRegister0, + FieldOperand(suspender, WasmSuspenderObject::kPromiseOffset)); __ Move(GCScanSlotPlace, 0); LoadJumpBuffer(masm, jmpbuf, true); __ Trap(); @@ -4283,7 +4479,6 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset); __ Move(GCScanSlotPlace, 1); __ Push(target_continuation); - __ Move(kContextRegister, Smi::zero()); __ CallRuntime(Runtime::kWasmSyncStackLimit); __ Pop(target_continuation); @@ -4301,6 +4496,9 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { if (on_resume == wasm::OnResume::kThrow) { // Switch to the continuation's stack without restoring the PC. LoadJumpBuffer(masm, target_jmpbuf, false); + // Pop this frame now. The unwinder expects that the first STACK_SWITCH + // frame is the outermost one. + __ LeaveFrame(StackFrame::STACK_SWITCH); // Forward the onRejected value to kThrow. __ pushq(kReturnRegister0); __ CallRuntime(Runtime::kThrow); @@ -4750,13 +4948,20 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, // TODO(jgruber): I suspect that most of CallApiCallback could be implemented // as a C++ trampoline, vastly simplifying the assembly implementation. -void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { +void Builtins::Generate_CallApiCallbackImpl(MacroAssembler* masm, + CallApiCallbackMode mode) { // ----------- S t a t e ------------- - // -- rsi : context + // CallApiCallbackMode::kGeneric mode: + // -- rcx : arguments count (not including the receiver) + // -- rbx : call handler info + // -- r8 : holder + // CallApiCallbackMode::kNoSideEffects/kWithSideEffectsSideEffects modes: // -- rdx : api function address // -- rcx : arguments count (not including the receiver) // -- rbx : call data // -- rdi : holder + // Both modes: + // -- rsi : context // -- rsp[0] : return address // -- rsp[8] : argument 0 (receiver) // -- rsp[16] : argument 1 @@ -4767,13 +4972,34 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { Register function_callback_info_arg = arg_reg_1; - Register api_function_address = rdx; - Register argc = rcx; - Register call_data = rbx; - Register holder = rdi; - - DCHECK(!AreAliased(api_function_address, argc, holder, call_data, - kScratchRegister)); + Register api_function_address = no_reg; + Register argc = no_reg; + Register call_data = no_reg; + Register callback = no_reg; + Register holder = no_reg; + Register scratch = rax; + Register scratch2 = no_reg; + + switch (mode) { + case CallApiCallbackMode::kGeneric: + api_function_address = rdx; + scratch2 = r9; + argc = CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister(); + callback = CallApiCallbackGenericDescriptor::CallHandlerInfoRegister(); + holder = CallApiCallbackGenericDescriptor::HolderRegister(); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + api_function_address = + CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister(); + argc = CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister(); + call_data = CallApiCallbackOptimizedDescriptor::CallDataRegister(); + holder = CallApiCallbackOptimizedDescriptor::HolderRegister(); + break; + } + DCHECK(!AreAliased(api_function_address, argc, holder, call_data, callback, + scratch, scratch2, kScratchRegister)); using FCI = FunctionCallbackInfo; using FCA = FunctionCallbackArguments; @@ -4802,10 +5028,20 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // Existing state: // rsp[7 * kSystemPointerSize]: <= FCA:::values_ - __ PopReturnAddressTo(rax); + __ PopReturnAddressTo(scratch); __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue); __ Push(kScratchRegister); // kNewTarget - __ Push(call_data); + switch (mode) { + case CallApiCallbackMode::kGeneric: + __ PushTaggedField(FieldOperand(callback, CallHandlerInfo::kDataOffset), + scratch2); + break; + + case CallApiCallbackMode::kNoSideEffects: + case CallApiCallbackMode::kWithSideEffects: + __ Push(call_data); + break; + } __ Push(kScratchRegister); // kReturnValue __ Push(kScratchRegister); // kUnused __ PushAddress(ExternalReference::isolate_address(masm->isolate())); @@ -4814,14 +5050,43 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // We use it below to set up the FunctionCallbackInfo object. __ movq(holder, rsp); - __ PushReturnAddressFrom(rax); - // Allocate v8::FunctionCallbackInfo object and a number of bytes to drop // from the stack after the callback in non-GCed space of the exit frame. static constexpr int kApiStackSpace = 4; static_assert((kApiStackSpace - 1) * kSystemPointerSize == sizeof(FCI)); + const int exit_frame_params_size = + mode == CallApiCallbackMode::kGeneric ? 2 : 0; + + if (mode == CallApiCallbackMode::kGeneric) { + ASM_CODE_COMMENT_STRING(masm, "Push API_CALLBACK_EXIT frame arguments"); + + // Argc parameter as a Smi. + static_assert(ApiCallbackExitFrameConstants::kArgcOffset == + 3 * kSystemPointerSize); + __ Move(kScratchRegister, argc); + __ SmiTag(kScratchRegister); + __ Push(kScratchRegister); // argc as a Smi + + // Target parameter. + static_assert(ApiCallbackExitFrameConstants::kTargetOffset == + 2 * kSystemPointerSize); + __ PushTaggedField( + FieldOperand(callback, CallHandlerInfo::kOwnerTemplateOffset), + scratch2); - __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT, api_function_address); + __ PushReturnAddressFrom(scratch); + + __ LoadExternalPointerField( + api_function_address, + FieldOperand(callback, CallHandlerInfo::kMaybeRedirectedCallbackOffset), + kCallHandlerInfoCallbackTag, kScratchRegister); + + __ EnterExitFrame(kApiStackSpace, StackFrame::API_CALLBACK_EXIT, + api_function_address); + } else { + __ PushReturnAddressFrom(scratch); + __ EnterExitFrame(kApiStackSpace, StackFrame::EXIT, api_function_address); + } constexpr int kImplicitArgsOffset = 0; constexpr int kLengthOffset = 2; { @@ -4841,16 +5106,18 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { // FunctionCallbackInfo::length_. static_assert(kLengthOffset == offsetof(FCI, length_) / kSystemPointerSize); - __ movq(ExitFrameStackSlotOperand(2), argc); + __ movq(ExitFrameStackSlotOperand(kLengthOffset), argc); } // We also store the number of bytes to drop from the stack after returning // from the API function here. constexpr int kBytesToDropOffset = kLengthOffset + 1; static_assert(kBytesToDropOffset == kApiStackSpace - 1); - __ leaq(kScratchRegister, Operand(argc, times_system_pointer_size, - FCA::kArgsLength * kSystemPointerSize + - kReceiverOnStackSize)); + __ leaq( + kScratchRegister, + Operand(argc, times_system_pointer_size, + (FCA::kArgsLength + exit_frame_params_size) * kSystemPointerSize + + kReceiverOnStackSize)); __ movq(ExitFrameStackSlotOperand(kBytesToDropOffset), kScratchRegister); __ RecordComment("v8::FunctionCallback's argument."); @@ -4859,15 +5126,16 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { DCHECK(!AreAliased(api_function_address, function_callback_info_arg)); - ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); + ExternalReference thunk_ref = + ExternalReference::invoke_function_callback(mode); // Pass api function address to thunk wrapper in case profiler or side-effect // checking is enabled. Register thunk_arg = api_function_address; - Operand return_value_operand = - ExitFrameCallerStackSlotOperand(FCA::kReturnValueIndex); + Operand return_value_operand = ExitFrameCallerStackSlotOperand( + FCA::kReturnValueIndex + exit_frame_params_size); static constexpr int kUseExitFrameStackSlotOperand = 0; - Operand stack_space_operand = ExitFrameStackSlotOperand(3); + Operand stack_space_operand = ExitFrameStackSlotOperand(kBytesToDropOffset); CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, thunk_arg, kUseExitFrameStackSlotOperand, &stack_space_operand, @@ -5215,10 +5483,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Get the InstructionStream object from the shared function info. Register code_obj = rbx; - TaggedRegister shared_function_info(code_obj); + Register shared_function_info(code_obj); __ LoadTaggedField( shared_function_info, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + + if (is_osr) { + ResetSharedFunctionInfoAge(masm, shared_function_info); + } + __ LoadTaggedField(code_obj, FieldOperand(shared_function_info, SharedFunctionInfo::kFunctionDataOffset)); @@ -5318,7 +5591,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, __ popq(kInterpreterAccumulatorRegister); if (is_osr) { - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); Generate_OSREntry(masm, code_obj); } else { __ jmp(code_obj); diff --git a/v8/src/codegen/arm/assembler-arm.cc b/v8/src/codegen/arm/assembler-arm.cc index d5ac947f6..ac48ca4b2 100644 --- a/v8/src/codegen/arm/assembler-arm.cc +++ b/v8/src/codegen/arm/assembler-arm.cc @@ -522,7 +522,8 @@ Assembler::Assembler(const AssemblerOptions& options, std::unique_ptr buffer) : AssemblerBase(options, std::move(buffer)), pending_32_bit_constants_(), - scratch_register_list_({ip}) { + scratch_register_list_(DefaultTmpList()), + scratch_vfp_register_list_(DefaultFPTmpList()) { reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); constant_pool_deadline_ = kMaxInt; const_pool_blocked_nesting_ = 0; @@ -534,13 +535,6 @@ Assembler::Assembler(const AssemblerOptions& options, // it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make // its use consistent with other features, we always enable it if we can. EnableCpuFeature(VFP32DREGS); - // Make sure we pick two D registers which alias a Q register. This way, we - // can use a Q as a scratch if NEON is supported. - scratch_vfp_register_list_ = d14.ToVfpRegList() | d15.ToVfpRegList(); - } else { - // When VFP32DREGS is not supported, d15 become allocatable. Therefore we - // cannot use it as a scratch. - scratch_vfp_register_list_ = d14.ToVfpRegList(); } } @@ -549,8 +543,24 @@ Assembler::~Assembler() { DCHECK_EQ(first_const_pool_32_use_, -1); } +// static +RegList Assembler::DefaultTmpList() { return {ip}; } + +// static +VfpRegList Assembler::DefaultFPTmpList() { + if (CpuFeatures::IsSupported(VFP32DREGS)) { + // Make sure we pick two D registers which alias a Q register. This way, we + // can use a Q as a scratch if NEON is supported. + return d14.ToVfpRegList() | d15.ToVfpRegList(); + } else { + // When VFP32DREGS is not supported, d15 become allocatable. Therefore we + // cannot use it as a scratch. + return d14.ToVfpRegList(); + } +} + void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, - SafepointTableBuilder* safepoint_table_builder, + SafepointTableBuilderBase* safepoint_table_builder, int handler_table_offset) { // As a crutch to avoid having to add manual Align calls wherever we use a // raw workflow to create InstructionStream objects (mostly in tests), add diff --git a/v8/src/codegen/arm/assembler-arm.h b/v8/src/codegen/arm/assembler-arm.h index 0f51903fb..e34f521e1 100644 --- a/v8/src/codegen/arm/assembler-arm.h +++ b/v8/src/codegen/arm/assembler-arm.h @@ -309,6 +309,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ~Assembler() override; + static RegList DefaultTmpList(); + static VfpRegList DefaultFPTmpList(); + void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); first_const_pool_32_use_ = -1; @@ -316,9 +319,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // GetCode emits any pending (non-emitted) code and fills the descriptor desc. static constexpr int kNoHandlerTable = 0; - static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; + static constexpr SafepointTableBuilderBase* kNoSafepointTable = nullptr; void GetCode(Isolate* isolate, CodeDesc* desc, - SafepointTableBuilder* safepoint_table_builder, + SafepointTableBuilderBase* safepoint_table_builder, int handler_table_offset); // Convenience wrapper for code without safepoint or handler tables. @@ -1412,6 +1415,16 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { bool CanAcquireD() const { return CanAcquireVfp(); } bool CanAcquireQ() const { return CanAcquireVfp(); } + RegList Available() { return *assembler_->GetScratchRegisterList(); } + void SetAvailable(RegList available) { + *assembler_->GetScratchRegisterList() = available; + } + + VfpRegList AvailableVfp() { return *assembler_->GetScratchVfpRegisterList(); } + void SetAvailableVfp(VfpRegList available) { + *assembler_->GetScratchVfpRegisterList() = available; + } + void Include(const Register& reg1, const Register& reg2 = no_reg) { RegList* available = assembler_->GetScratchRegisterList(); DCHECK_NOT_NULL(available); @@ -1420,6 +1433,11 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { available->set(reg1); available->set(reg2); } + void Include(RegList list) { + RegList* available = assembler_->GetScratchRegisterList(); + DCHECK_NOT_NULL(available); + *available = *available | list; + } void Include(VfpRegList list) { VfpRegList* available = assembler_->GetScratchVfpRegisterList(); DCHECK_NOT_NULL(available); diff --git a/v8/src/codegen/arm/constants-arm.h b/v8/src/codegen/arm/constants-arm.h index 71ff34f30..d8d52f154 100644 --- a/v8/src/codegen/arm/constants-arm.h +++ b/v8/src/codegen/arm/constants-arm.h @@ -78,48 +78,49 @@ constexpr int kRootRegisterBias = 4095; // General constants are in an anonymous enum in class Instr. // Values for the condition field as defined in section A3.2 -using Condition = int; -constexpr Condition kNoCondition = -1; - -constexpr Condition eq = 0 << 28; // Z set Equal. -constexpr Condition ne = 1 << 28; // Z clear Not equal. -constexpr Condition cs = 2 << 28; // C set Unsigned higher or same. -constexpr Condition cc = 3 << 28; // C clear Unsigned lower. -constexpr Condition mi = 4 << 28; // N set Negative. -constexpr Condition pl = 5 << 28; // N clear Positive or zero. -constexpr Condition vs = 6 << 28; // V set Overflow. -constexpr Condition vc = 7 << 28; // V clear No overflow. -constexpr Condition hi = 8 << 28; // C set, Z clear Unsigned higher. -constexpr Condition ls = 9 << 28; // C clear or Z set Unsigned lower or same. -constexpr Condition ge = 10 << 28; // N == V Greater or equal. -constexpr Condition lt = 11 << 28; // N != V Less than. -constexpr Condition gt = 12 << 28; // Z clear, N == V Greater than. -constexpr Condition le = 13 << 28; // Z set or N != V Less then or equal -constexpr Condition al = 14 << 28; // Always. - -// Special condition (refer to section A3.2.1). -constexpr Condition kSpecialCondition = 15 << 28; -constexpr Condition kNumberOfConditions = 16; - -// Aliases. -constexpr Condition hs = cs; // C set Unsigned higher or same. -constexpr Condition lo = cc; // C clear Unsigned lower. - -// Unified cross-platform condition names/aliases. -constexpr Condition kEqual = eq; -constexpr Condition kNotEqual = ne; -constexpr Condition kLessThan = lt; -constexpr Condition kGreaterThan = gt; -constexpr Condition kLessThanEqual = le; -constexpr Condition kGreaterThanEqual = ge; -constexpr Condition kUnsignedLessThan = lo; -constexpr Condition kUnsignedGreaterThan = hi; -constexpr Condition kUnsignedLessThanEqual = ls; -constexpr Condition kUnsignedGreaterThanEqual = hs; -constexpr Condition kOverflow = vs; -constexpr Condition kNoOverflow = vc; -constexpr Condition kZero = eq; -constexpr Condition kNotZero = ne; +enum Condition : int { + kNoCondition = -1, + + eq = 0 << 28, // Z set Equal. + ne = 1 << 28, // Z clear Not equal. + cs = 2 << 28, // C set Unsigned higher or same. + cc = 3 << 28, // C clear Unsigned lower. + mi = 4 << 28, // N set Negative. + pl = 5 << 28, // N clear Positive or zero. + vs = 6 << 28, // V set Overflow. + vc = 7 << 28, // V clear No overflow. + hi = 8 << 28, // C set, Z clear Unsigned higher. + ls = 9 << 28, // C clear or Z set Unsigned lower or same. + ge = 10 << 28, // N == V Greater or equal. + lt = 11 << 28, // N != V Less than. + gt = 12 << 28, // Z clear, N == V Greater than. + le = 13 << 28, // Z set or N != V Less then or equal + al = 14 << 28, // Always. + + // Special condition (refer to section A3.2.1). + kSpecialCondition = 15 << 28, + kNumberOfConditions = 16, + + // Aliases. + hs = cs, // C set Unsigned higher or same. + lo = cc, // C clear Unsigned lower. + + // Unified cross-platform condition names/aliases. + kEqual = eq, + kNotEqual = ne, + kLessThan = lt, + kGreaterThan = gt, + kLessThanEqual = le, + kGreaterThanEqual = ge, + kUnsignedLessThan = lo, + kUnsignedGreaterThan = hi, + kUnsignedLessThanEqual = ls, + kUnsignedGreaterThanEqual = hs, + kOverflow = vs, + kNoOverflow = vc, + kZero = eq, + kNotZero = ne, +}; inline Condition NegateCondition(Condition cond) { DCHECK(cond != al); diff --git a/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/v8/src/codegen/arm/interface-descriptors-arm-inl.h index f1affc1be..e36c8dd28 100644 --- a/v8/src/codegen/arm/interface-descriptors-arm-inl.h +++ b/v8/src/codegen/arm/interface-descriptors-arm-inl.h @@ -19,6 +19,15 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() { return registers; } +constexpr auto CallInterfaceDescriptor::DefaultDoubleRegisterArray() { + // Construct the std::array explicitly here because on arm, the registers d0, + // d1, ... are not of type DoubleRegister but only support implicit casting to + // DoubleRegister. For template resolution, however, implicit casting is not + // sufficient. + std::array registers{d0, d1, d2, d3, d4, d5, d6}; + return registers; +} + #if DEBUG template void StaticCallInterfaceDescriptor:: @@ -269,11 +278,36 @@ constexpr auto BinarySmiOp_BaselineDescriptor::registers() { } // static -constexpr auto ApiCallbackDescriptor::registers() { - return RegisterArray(r1, // kApiFunctionAddress - r2, // kArgc - r3, // kCallData - r0); // kHolder +constexpr Register +CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister() { + return r1; +} +// static +constexpr Register +CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister() { + return r2; +} +// static +constexpr Register CallApiCallbackOptimizedDescriptor::CallDataRegister() { + return r3; +} +// static +constexpr Register CallApiCallbackOptimizedDescriptor::HolderRegister() { + return r0; +} + +// static +constexpr Register +CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister() { + return r2; +} +// static +constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { + return r3; +} +// static +constexpr Register CallApiCallbackGenericDescriptor::HolderRegister() { + return r0; } // static @@ -311,6 +345,10 @@ constexpr auto RunMicrotasksEntryDescriptor::registers() { return RegisterArray(r0, r1); } +constexpr auto WasmNewJSToWasmWrapperDescriptor::registers() { + // Arbitrarily picked register. + return RegisterArray(r8); +} } // namespace internal } // namespace v8 diff --git a/v8/src/codegen/arm/macro-assembler-arm.cc b/v8/src/codegen/arm/macro-assembler-arm.cc index 296c6972d..b26441535 100644 --- a/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/v8/src/codegen/arm/macro-assembler-arm.cc @@ -416,6 +416,11 @@ void MacroAssembler::Push(Smi smi) { push(scratch); } +void MacroAssembler::Push(TaggedIndex index) { + // TaggedIndex is the same as Smi for 32 bit archs. + Push(Smi::FromIntptr(index.value())); +} + void MacroAssembler::PushArray(Register array, Register size, Register scratch, PushArrayOrder order) { ASM_CODE_COMMENT(this); @@ -1422,7 +1427,8 @@ void MacroAssembler::EnterExitFrame(int stack_space, StackFrame::Type frame_type) { ASM_CODE_COMMENT(this); DCHECK(frame_type == StackFrame::EXIT || - frame_type == StackFrame::BUILTIN_EXIT); + frame_type == StackFrame::BUILTIN_EXIT || + frame_type == StackFrame::API_CALLBACK_EXIT); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -2116,20 +2122,32 @@ void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } -void MacroAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); - Check(ne, AbortReason::kOperandIsASmi); + Check(ne, reason); } -void MacroAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object, AbortReason reason) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); - Check(eq, AbortReason::kOperandIsNotASmi); + Check(eq, reason); +} + +void MacroAssembler::AssertMap(Register object) { + if (!v8_flags.debug_code) return; + ASM_CODE_COMMENT(this); + AssertNotSmi(object, AbortReason::kOperandIsNotAMap); + + UseScratchRegisterScope temps(this); + Register temp = temps.Acquire(); + + CompareObjectType(object, temp, temp, MAP_TYPE); + Check(eq, AbortReason::kOperandIsNotAMap); } void MacroAssembler::AssertConstructor(Register object) { @@ -2778,6 +2796,24 @@ void MacroAssembler::ComputeCodeStartAddress(Register dst) { sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta)); } +// Check if the code object is marked for deoptimization. If it is, then it +// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need +// to: +// 1. read from memory the word that contains that bit, which can be found in +// the flags in the referenced {Code} object; +// 2. test kMarkedForDeoptimizationBit in those flags; and +// 3. if it is not zero then it jumps to the builtin. +void MacroAssembler::BailoutIfDeoptimized() { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; + ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset)); + ldr(scratch, FieldMemOperand(scratch, Code::kFlagsOffset)); + tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit)); + Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), + RelocInfo::CODE_TARGET, ne); +} + void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { @@ -2916,6 +2952,77 @@ void MacroAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst, F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_f32); } +void MacroAssembler::Switch(Register scratch, Register value, + int case_value_base, Label** labels, + int num_labels) { + Label fallthrough; + if (case_value_base != 0) { + sub(value, value, Operand(case_value_base)); + } + // This {cmp} might still emit a constant pool entry. + cmp(value, Operand(num_labels)); + // Ensure to emit the constant pool first if necessary. + CheckConstPool(true, true); + BlockConstPoolFor(num_labels + 2); + add(pc, pc, Operand(value, LSL, 2), LeaveCC, lo); + b(&fallthrough); + for (int i = 0; i < num_labels; ++i) { + b(labels[i]); + } + bind(&fallthrough); +} + +void MacroAssembler::JumpIfCodeIsMarkedForDeoptimization( + Register code, Register scratch, Label* if_marked_for_deoptimization) { + ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset)); + tst(scratch, Operand(Code::kMarkedForDeoptimizationBit)); + b(if_marked_for_deoptimization, ne); +} + +void MacroAssembler::JumpIfCodeIsTurbofanned(Register code, Register scratch, + Label* if_turbofanned) { + ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset)); + tst(scratch, Operand(Code::kIsTurbofannedBit)); + b(if_turbofanned, ne); +} + +void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, + CodeKind min_opt_level, + Register feedback_vector, + FeedbackSlot slot, + Label* on_result, + Label::Distance) { + Label fallthrough, clear_slot; + LoadTaggedField( + scratch_and_result, + FieldMemOperand(feedback_vector, + FeedbackVector::OffsetOfElementAt(slot.ToInt()))); + LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough); + + // Is it marked_for_deoptimization? If yes, clear the slot. + { + UseScratchRegisterScope temps(this); + Register temp = temps.Acquire(); + JumpIfCodeIsMarkedForDeoptimization(scratch_and_result, temp, &clear_slot); + if (min_opt_level == CodeKind::TURBOFAN) { + JumpIfCodeIsTurbofanned(scratch_and_result, temp, on_result); + b(&fallthrough); + } else { + b(on_result); + } + } + + bind(&clear_slot); + Move(scratch_and_result, ClearedValue()); + StoreTaggedField( + scratch_and_result, + FieldMemOperand(feedback_vector, + FeedbackVector::OffsetOfElementAt(slot.ToInt()))); + + bind(&fallthrough); + Move(scratch_and_result, Operand(0)); +} + } // namespace internal } // namespace v8 diff --git a/v8/src/codegen/arm/macro-assembler-arm.h b/v8/src/codegen/arm/macro-assembler-arm.h index 05c74093a..ebc662f40 100644 --- a/v8/src/codegen/arm/macro-assembler-arm.h +++ b/v8/src/codegen/arm/macro-assembler-arm.h @@ -94,6 +94,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void Push(Handle handle); void Push(Smi smi); + void Push(TaggedIndex index); // Push two registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Condition cond = al) { @@ -344,6 +345,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // The return address on the stack is used by frame iteration. void StoreReturnAddressAndCall(Register target); + void BailoutIfDeoptimized(); void CallForDeoptimization(Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret, Label* jump_deoptimization_entry_label); @@ -533,8 +535,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { } void SmiToInt32(Register smi) { SmiUntag(smi); } + void SmiToInt32(Register dst, Register smi) { SmiUntag(dst, smi); } // Load an object from the root table. + void LoadTaggedRoot(Register destination, RootIndex index) { + LoadRoot(destination, index); + } void LoadRoot(Register destination, RootIndex index) final { LoadRoot(destination, index, al); } @@ -548,6 +554,102 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void LoadMap(Register destination, Register object); + void PushAll(RegList registers) { + if (registers.is_empty()) return; + ASM_CODE_COMMENT(this); + stm(db_w, sp, registers); + } + + void PopAll(RegList registers) { + if (registers.is_empty()) return; + ASM_CODE_COMMENT(this); + ldm(ia_w, sp, registers); + } + + void PushAll(DoubleRegList registers, int stack_slot_size = kDoubleSize) { + if (registers.is_empty()) return; + ASM_CODE_COMMENT(this); + // TODO(victorgomes): vstm only works for consecutive double registers. We + // could check if it is the case and optimize here. + for (DoubleRegister reg : registers) { + vpush(reg); + } + } + + void PopAll(DoubleRegList registers, int stack_slot_size = kDoubleSize) { + if (registers.is_empty()) return; + ASM_CODE_COMMENT(this); + // TODO(victorgomes): vldm only works for consecutive double registers. We + // could check if it is the case and optimize here. + for (DoubleRegister reg : base::Reversed(registers)) { + vpop(reg); + } + } + + inline void Cmp(const Register& rn, int imm) { cmp(rn, Operand(imm)); } + + inline void CmpTagged(const Register& r1, const Register& r2) { cmp(r1, r2); } + + // Functions performing a check on a known or potential smi. Returns + // a condition that is satisfied if the check is successful. + Condition CheckSmi(Register src) { + SmiTst(src); + return eq; + } + + void DecompressTagged(const Register& destination, + const MemOperand& field_operand) { + // No pointer compression on arm, we do just a simple load. + LoadTaggedField(destination, field_operand); + } + + void DecompressTagged(const Register& destination, const Register& source) { + // No pointer compression on arm. Do nothing. + } + + void AssertMap(Register object) NOOP_UNLESS_DEBUG_CODE; + + void LoadTaggedField(const Register& destination, + const MemOperand& field_operand) { + ldr(destination, field_operand); + } + + void LoadTaggedFieldWithoutDecompressing(const Register& destination, + const MemOperand& field_operand) { + LoadTaggedField(destination, field_operand); + } + + void StoreTaggedField(const Register& value, + const MemOperand& dst_field_operand) { + str(value, dst_field_operand); + } + + // For compatibility with platform-independent code. + void StoreTaggedField(const MemOperand& dst_field_operand, + const Register& value) { + StoreTaggedField(value, dst_field_operand); + } + + void Switch(Register scratch, Register value, int case_value_base, + Label** labels, int num_labels); + + void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, + Label* if_marked_for_deoptimization); + + void JumpIfCodeIsTurbofanned(Register code, Register scratch, + Label* if_turbofanned); + + // Falls through and sets scratch_and_result to 0 on failure, jumps to + // on_result on success. + void TryLoadOptimizedOsrCode(Register scratch_and_result, + CodeKind min_opt_level, Register feedback_vector, + FeedbackSlot slot, Label* on_result, + Label::Distance distance); + + void AssertZeroExtended(Register int32_register) { + // In arm32, we don't have top 32 bits, so do nothing. + } + // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it // succeeds, otherwise falls through if result is saturated. On return @@ -812,8 +914,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void JumpIfNotSmi(Register value, Label* not_smi_label); // Abort execution if argument is a smi, enabled via --debug-code. - void AssertNotSmi(Register object) NOOP_UNLESS_DEBUG_CODE; - void AssertSmi(Register object) NOOP_UNLESS_DEBUG_CODE; + void AssertNotSmi(Register object, + AbortReason reason = AbortReason::kOperandIsASmi) + NOOP_UNLESS_DEBUG_CODE; + void AssertSmi(Register object, + AbortReason reason = AbortReason::kOperandIsNotASmi) + NOOP_UNLESS_DEBUG_CODE; // Abort execution if argument is not a Constructor, enabled via --debug-code. void AssertConstructor(Register object) NOOP_UNLESS_DEBUG_CODE; diff --git a/v8/src/codegen/arm/register-arm.h b/v8/src/codegen/arm/register-arm.h index ffa29ddf7..15bc1e88f 100644 --- a/v8/src/codegen/arm/register-arm.h +++ b/v8/src/codegen/arm/register-arm.h @@ -55,11 +55,6 @@ namespace internal { V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15) // clang-format on -// The ARM ABI does not specify the usage of register r9, which may be reserved -// as the static base or thread register on some platforms, in which case we -// leave it alone. Adjust the value of kR9Available accordingly: -const int kR9Available = 1; // 1 if available to us, 0 if reserved - enum RegisterCode { #define REGISTER_CODE(R) kRegCode_##R, GENERAL_REGISTERS(REGISTER_CODE) @@ -334,6 +329,8 @@ constexpr Register kRootRegister = r10; // Roots array pointer. constexpr DoubleRegister kFPReturnRegister0 = d0; +constexpr Register kMaglevExtraScratchRegister = r9; + } // namespace internal } // namespace v8 diff --git a/v8/src/codegen/arm/reglist-arm.h b/v8/src/codegen/arm/reglist-arm.h index c6834a5db..1bdf9efba 100644 --- a/v8/src/codegen/arm/reglist-arm.h +++ b/v8/src/codegen/arm/reglist-arm.h @@ -28,13 +28,13 @@ const RegList kJSCallerSaved = {r0, // r0 a1 const int kNumJSCallerSaved = 4; // Callee-saved registers preserved when switching from C to JavaScript -const RegList kCalleeSaved = {r4, // r4 v1 - r5, // r5 v2 - r6, // r6 v3 - r7, // r7 v4 (cp in JavaScript code) - r8, // r8 v5 (pp in JavaScript code) - kR9Available ? r9 : Register::no_reg(), // r9 v6 - r10, // r10 v7 +const RegList kCalleeSaved = {r4, // r4 v1 + r5, // r5 v2 + r6, // r6 v3 + r7, // r7 v4 (cp in JavaScript code) + r8, // r8 v5 (pp in JavaScript code) + r9, // r9 v6 + r10, // r10 v7 r11}; // r11 v8 (fp in JavaScript code) // When calling into C++ (only for C++ calls that can't cause a GC). @@ -45,7 +45,7 @@ const RegList kCallerSaved = {r0, // r0 r3, // r3 r9}; // r9 -const int kNumCalleeSaved = 7 + kR9Available; +const int kNumCalleeSaved = 8; // Double registers d8 to d15 are callee-saved. const int kNumDoubleCalleeSaved = 8; diff --git a/v8/src/codegen/arm64/assembler-arm64-inl.h b/v8/src/codegen/arm64/assembler-arm64-inl.h index bc8e2edb8..41f74b929 100644 --- a/v8/src/codegen/arm64/assembler-arm64-inl.h +++ b/v8/src/codegen/arm64/assembler-arm64-inl.h @@ -33,7 +33,8 @@ void RelocInfo::apply(intptr_t delta) { Address old_target = reinterpret_cast
(instr->ImmPCOffsetTarget()); Address new_target = old_target - delta; - instr->SetBranchImmTarget(reinterpret_cast(new_target)); + instr->SetBranchImmTarget( + reinterpret_cast(new_target)); } } } @@ -557,7 +558,8 @@ void Assembler::deserialization_set_special_target_at(Address location, // to zero instead. target = location; } - instr->SetBranchImmTarget(reinterpret_cast(target)); + instr->SetBranchImmTarget( + reinterpret_cast(target)); FlushInstructionCache(location, kInstrSize); } else { DCHECK_EQ(instr->InstructionBits(), 0); @@ -593,7 +595,8 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool, // to zero instead. target = pc; } - instr->SetBranchImmTarget(reinterpret_cast(target)); + instr->SetBranchImmTarget( + reinterpret_cast(target)); if (icache_flush_mode != SKIP_ICACHE_FLUSH) { FlushInstructionCache(pc, kInstrSize); } @@ -823,6 +826,20 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) { } } +inline void Assembler::LoadStoreScaledImmOffset(Instr memop, int offset, + unsigned size) { + Emit(LoadStoreUnsignedOffsetFixed | memop | ImmLSUnsigned(offset >> size)); +} + +inline void Assembler::LoadStoreUnscaledImmOffset(Instr memop, int offset) { + Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset)); +} + +inline void Assembler::LoadStoreWRegOffset(Instr memop, + const Register& regoffset) { + Emit(LoadStoreRegisterOffsetFixed | memop | Rm(regoffset) | ExtendMode(UXTW)); +} + int Assembler::LinkAndGetInstructionOffsetTo(Label* label) { DCHECK_EQ(kStartOfLabelLinkChain, 0); int offset = LinkAndGetByteOffsetTo(label); diff --git a/v8/src/codegen/arm64/assembler-arm64.cc b/v8/src/codegen/arm64/assembler-arm64.cc index 65e7bc6db..c6e6ded4f 100644 --- a/v8/src/codegen/arm64/assembler-arm64.cc +++ b/v8/src/codegen/arm64/assembler-arm64.cc @@ -683,23 +683,24 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) { while (!end_of_chain) { Instruction* link = InstructionAt(link_offset); - link_pcoffset = static_cast(link->ImmPCOffset()); - - // ADR instructions are not handled by veneers. - if (link->IsImmBranch()) { - int max_reachable_pc = - static_cast(InstructionOffset(link) + - Instruction::ImmBranchRange(link->BranchType())); - using unresolved_info_it = std::multimap::iterator; - std::pair range; - range = unresolved_branches_.equal_range(max_reachable_pc); - unresolved_info_it it; - for (it = range.first; it != range.second; ++it) { - if (it->second.pc_offset_ == link_offset) { - unresolved_branches_.erase(it); - break; - } - } + int max_reachable_pc = static_cast(InstructionOffset(link)); + + // ADR instructions and unconditional branches are not handled by veneers. + if (link->IsCondBranchImm() || link->IsCompareBranch()) { + static_assert(Instruction::ImmBranchRange(CondBranchType) == + Instruction::ImmBranchRange(CompareBranchType)); + max_reachable_pc += Instruction::ImmBranchRange(CondBranchType); + unresolved_branches_.erase(max_reachable_pc); + link_pcoffset = link->ImmCondBranch() * kInstrSize; + } else if (link->IsTestBranch()) { + // Add one to account for branch type tag bit. + max_reachable_pc += Instruction::ImmBranchRange(TestBranchType) + 1; + unresolved_branches_.erase(max_reachable_pc); + link_pcoffset = link->ImmTestBranch() * kInstrSize; + } else if (link->IsUncondBranchImm()) { + link_pcoffset = link->ImmUncondBranch() * kInstrSize; + } else { + link_pcoffset = static_cast(link->ImmPCOffset()); } end_of_chain = (link_pcoffset == 0); @@ -4118,18 +4119,12 @@ void Assembler::LoadStore(const CPURegister& rt, const MemOperand& addr, if (addr.IsImmediateOffset()) { unsigned size = CalcLSDataSize(op); + int offset = static_cast(addr.offset()); if (IsImmLSScaled(addr.offset(), size)) { - int offset = static_cast(addr.offset()); - // Use the scaled addressing mode. - Emit(LoadStoreUnsignedOffsetFixed | memop | - ImmLSUnsigned(offset >> size)); - } else if (IsImmLSUnscaled(addr.offset())) { - int offset = static_cast(addr.offset()); - // Use the unscaled addressing mode. - Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset)); + LoadStoreScaledImmOffset(memop, offset, size); } else { - // This case is handled in the macro assembler. - UNREACHABLE(); + DCHECK(IsImmLSUnscaled(addr.offset())); + LoadStoreUnscaledImmOffset(memop, offset); } } else if (addr.IsRegisterOffset()) { Extend ext = addr.extend(); @@ -4149,31 +4144,18 @@ void Assembler::LoadStore(const CPURegister& rt, const MemOperand& addr, ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0)); } else { // Pre-index and post-index modes. + DCHECK(IsImmLSUnscaled(addr.offset())); DCHECK_NE(rt, addr.base()); - if (IsImmLSUnscaled(addr.offset())) { - int offset = static_cast(addr.offset()); - if (addr.IsPreIndex()) { - Emit(LoadStorePreIndexFixed | memop | ImmLS(offset)); - } else { - DCHECK(addr.IsPostIndex()); - Emit(LoadStorePostIndexFixed | memop | ImmLS(offset)); - } + int offset = static_cast(addr.offset()); + if (addr.IsPreIndex()) { + Emit(LoadStorePreIndexFixed | memop | ImmLS(offset)); } else { - // This case is handled in the macro assembler. - UNREACHABLE(); + DCHECK(addr.IsPostIndex()); + Emit(LoadStorePostIndexFixed | memop | ImmLS(offset)); } } } -bool Assembler::IsImmLSUnscaled(int64_t offset) { return is_int9(offset); } - -bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) { - bool offset_is_size_multiple = - (static_cast(static_cast(offset >> size) << size) == - offset); - return offset_is_size_multiple && is_uint12(offset >> size); -} - bool Assembler::IsImmLSPair(int64_t offset, unsigned size) { bool offset_is_size_multiple = (static_cast(static_cast(offset >> size) << size) == @@ -4688,29 +4670,49 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, const intptr_t max_pc_after_veneers = MaxPCOffsetAfterVeneerPoolIfEmittedNow(margin); - // The `unresolved_branches_` multimap is sorted by max-reachable-pc in - // ascending order. For efficiency reasons, we want to call + // The `unresolved_branches_` map is sorted by max-reachable-pc in ascending + // order. For efficiency reasons, we want to call // RemoveBranchFromLabelLinkChain in descending order. The actual veneers are // then generated in ascending order. // TODO(jgruber): This is still inefficient in multiple ways, thoughts on how // we could improve in the future: - // - Don't erase individual elements from the multimap, erase a range instead. - // - Replace the multimap by a simpler data structure (like a plain vector or - // a circular array). // - Refactor s.t. RemoveBranchFromLabelLinkChain does not need the linear // lookup in the link chain. + class FarBranchInfo { + public: + FarBranchInfo(int offset, Label* label) + : pc_offset_(offset), label_(label) {} + // Offset of the branch in the code generation buffer. + int pc_offset_; + // The label branched to. + Label* label_; + }; + static constexpr int kStaticTasksSize = 16; // Arbitrary. base::SmallVector tasks; { auto it = unresolved_branches_.begin(); while (it != unresolved_branches_.end()) { - const int max_reachable_pc = it->first; + const int max_reachable_pc = it->first & ~1; if (!force_emit && max_reachable_pc > max_pc_after_veneers) break; // Found a task. We'll emit a veneer for this. - tasks.emplace_back(it->second); + + // Calculate the branch location from the maximum reachable PC. Only + // B.cond, CB[N]Z and TB[N]Z are veneered, and the first two branch types + // have the same range. The LSB (branch type tag bit) is set for TB[N]Z, + // clear otherwise. + int pc_offset = it->first; + if (pc_offset & 1) { + pc_offset -= (Instruction::ImmBranchRange(TestBranchType) + 1); + } else { + static_assert(Instruction::ImmBranchRange(CondBranchType) == + Instruction::ImmBranchRange(CompareBranchType)); + pc_offset -= Instruction::ImmBranchRange(CondBranchType); + } + tasks.emplace_back(FarBranchInfo{pc_offset, it->second}); auto eraser_it = it++; unresolved_branches_.erase(eraser_it); } diff --git a/v8/src/codegen/arm64/assembler-arm64.h b/v8/src/codegen/arm64/assembler-arm64.h index 589300ec6..5ec988978 100644 --- a/v8/src/codegen/arm64/assembler-arm64.h +++ b/v8/src/codegen/arm64/assembler-arm64.h @@ -2973,8 +2973,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { return op << NEONModImmOp_offset; } - static bool IsImmLSUnscaled(int64_t offset); - static bool IsImmLSScaled(int64_t offset, unsigned size); + static constexpr bool IsImmLSUnscaled(int64_t offset) { + return is_int9(offset); + } + static constexpr bool IsImmLSScaled(int64_t offset, unsigned size) { + bool offset_is_size_multiple = + (static_cast(static_cast(offset >> size) << size) == + offset); + return offset_is_size_multiple && is_uint12(offset >> size); + } static bool IsImmLLiteral(int64_t offset); // Move immediates encoding. @@ -3070,6 +3077,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const; void LoadStore(const CPURegister& rt, const MemOperand& addr, LoadStoreOp op); + inline void LoadStoreScaledImmOffset(Instr memop, int offset, unsigned size); + inline void LoadStoreUnscaledImmOffset(Instr memop, int offset); + inline void LoadStoreWRegOffset(Instr memop, const Register& regoffset); void LoadStorePair(const CPURegister& rt, const CPURegister& rt2, const MemOperand& addr, LoadStorePairOp op); void LoadStoreStruct(const VRegister& vt, const MemOperand& addr, @@ -3306,30 +3316,28 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } #endif - class FarBranchInfo { - public: - FarBranchInfo(int offset, Label* label) - : pc_offset_(offset), label_(label) {} - // Offset of the branch in the code generation buffer. - int pc_offset_; - // The label branched to. - Label* label_; - }; - protected: // Information about unresolved (forward) branches. // The Assembler is only allowed to delete out-of-date information from here // after a label is bound. The MacroAssembler uses this information to // generate veneers. // - // The second member gives information about the unresolved branch. The first - // member of the pair is the maximum offset that the branch can reach in the - // buffer. The map is sorted according to this reachable offset, allowing to - // easily check when veneers need to be emitted. + // The first member of the pair (max_pc) is the maximum offset that the branch + // can reach in the buffer, with the bottom bit set to indicate a + // test-and-branch instruction. This bit is used to help in calculating the + // address of the branch, ie. + // + // branch_addr = { max_pc - 2^21, if max_pc<0> == 0 (B.cond, CB[N]Z) + // { max_pc - 2^16 - 1, if max_pc<0> == 1 (TB[N]Z) + // + // The second member is a pointer to the Label targetted by the branch. + // + // The map is sorted according to the reachable offset, max_pc, allowing to + // check easily when veneers need to be emitted. // Note that the maximum reachable offset (first member of the pairs) should // always be positive but has the same type as the return value for // pc_offset() for convenience. - std::multimap unresolved_branches_; + std::map unresolved_branches_; // We generate a veneer for a branch if we reach within this distance of the // limit of the range. @@ -3342,8 +3350,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { kVeneerNoProtectionFactor * kVeneerDistanceMargin; int unresolved_branches_first_limit() const { DCHECK(!unresolved_branches_.empty()); - return unresolved_branches_.begin()->first; + + // Mask branch type tag bit. + return unresolved_branches_.begin()->first & ~1; } + // This PC-offset of the next veneer pool check helps reduce the overhead // of checking for veneer pools. // It is maintained to the closest unresolved branch limit minus the maximum diff --git a/v8/src/codegen/arm64/constants-arm64.h b/v8/src/codegen/arm64/constants-arm64.h index 1866842f1..da9621467 100644 --- a/v8/src/codegen/arm64/constants-arm64.h +++ b/v8/src/codegen/arm64/constants-arm64.h @@ -300,7 +300,7 @@ SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING) constexpr int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask; // Condition codes. -enum Condition : uint8_t { +enum Condition : int { eq = 0, // Equal ne = 1, // Not equal hs = 2, // Unsigned higher or same (or carry set) diff --git a/v8/src/codegen/arm64/instructions-arm64.cc b/v8/src/codegen/arm64/instructions-arm64.cc index 3369333f2..1a19d0f71 100644 --- a/v8/src/codegen/arm64/instructions-arm64.cc +++ b/v8/src/codegen/arm64/instructions-arm64.cc @@ -210,12 +210,6 @@ Instruction* Instruction::ImmPCOffsetTarget() { return InstructionAtOffset(ImmPCOffset()); } -bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type, - ptrdiff_t offset) { - DCHECK_EQ(offset % kInstrSize, 0); - return is_intn(offset / kInstrSize, ImmBranchRangeBitwidth(branch_type)); -} - bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) { return IsValidImmPCOffset(BranchType(), DistanceTo(target)); } @@ -224,8 +218,14 @@ void Instruction::SetImmPCOffsetTarget(const AssemblerOptions& options, Instruction* target) { if (IsPCRelAddressing()) { SetPCRelImmTarget(options, target); - } else if (BranchType() != UnknownBranchType) { - SetBranchImmTarget(target); + } else if (IsCondBranchImm()) { + SetBranchImmTarget(target); + } else if (IsUncondBranchImm()) { + SetBranchImmTarget(target); + } else if (IsCompareBranch()) { + SetBranchImmTarget(target); + } else if (IsTestBranch()) { + SetBranchImmTarget(target); } else if (IsUnresolvedInternalReference()) { SetUnresolvedInternalReferenceImmTarget(options, target); } else { @@ -251,39 +251,6 @@ void Instruction::SetPCRelImmTarget(const AssemblerOptions& options, } } -void Instruction::SetBranchImmTarget(Instruction* target) { - DCHECK(IsAligned(DistanceTo(target), kInstrSize)); - DCHECK(IsValidImmPCOffset(BranchType(), DistanceTo(target))); - int offset = static_cast(DistanceTo(target) >> kInstrSizeLog2); - Instr branch_imm = 0; - uint32_t imm_mask = 0; - switch (BranchType()) { - case CondBranchType: { - branch_imm = Assembler::ImmCondBranch(offset); - imm_mask = ImmCondBranch_mask; - break; - } - case UncondBranchType: { - branch_imm = Assembler::ImmUncondBranch(offset); - imm_mask = ImmUncondBranch_mask; - break; - } - case CompareBranchType: { - branch_imm = Assembler::ImmCmpBranch(offset); - imm_mask = ImmCmpBranch_mask; - break; - } - case TestBranchType: { - branch_imm = Assembler::ImmTestBranch(offset); - imm_mask = ImmTestBranch_mask; - break; - } - default: - UNREACHABLE(); - } - SetInstructionBits(Mask(~imm_mask) | branch_imm); -} - void Instruction::SetUnresolvedInternalReferenceImmTarget( const AssemblerOptions& options, Instruction* target) { DCHECK(IsUnresolvedInternalReference()); diff --git a/v8/src/codegen/arm64/instructions-arm64.h b/v8/src/codegen/arm64/instructions-arm64.h index 87a245584..b1997d8af 100644 --- a/v8/src/codegen/arm64/instructions-arm64.h +++ b/v8/src/codegen/arm64/instructions-arm64.h @@ -308,7 +308,7 @@ class Instruction { } } - static int ImmBranchRangeBitwidth(ImmBranchType branch_type) { + static constexpr int ImmBranchRangeBitwidth(ImmBranchType branch_type) { switch (branch_type) { case UncondBranchType: return ImmUncondBranch_width; @@ -324,7 +324,7 @@ class Instruction { } // The range of the branch instruction, expressed as 'instr +- range'. - static int32_t ImmBranchRange(ImmBranchType branch_type) { + static constexpr int32_t ImmBranchRange(ImmBranchType branch_type) { return (1 << (ImmBranchRangeBitwidth(branch_type) + kInstrSizeLog2)) / 2 - kInstrSize; } @@ -416,7 +416,12 @@ class Instruction { // Check if the offset is in range of a given branch type. The offset is // a byte offset, unscaled. - static bool IsValidImmPCOffset(ImmBranchType branch_type, ptrdiff_t offset); + static constexpr bool IsValidImmPCOffset(ImmBranchType branch_type, + ptrdiff_t offset) { + DCHECK_EQ(offset % kInstrSize, 0); + return is_intn(offset / kInstrSize, ImmBranchRangeBitwidth(branch_type)); + } + bool IsTargetInImmPCOffsetRange(Instruction* target); // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or // a PC-relative addressing instruction. @@ -460,7 +465,35 @@ class Instruction { static const int ImmPCRelRangeBitwidth = 21; static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); } void SetPCRelImmTarget(const AssemblerOptions& options, Instruction* target); - V8_EXPORT_PRIVATE void SetBranchImmTarget(Instruction* target); + + template + void SetBranchImmTarget(Instruction* target) { + DCHECK(IsAligned(DistanceTo(target), kInstrSize)); + DCHECK(IsValidImmPCOffset(branch_type, DistanceTo(target))); + int offset = static_cast(DistanceTo(target) >> kInstrSizeLog2); + Instr branch_imm = 0; + uint32_t imm_mask = 0; + switch (branch_type) { + case CondBranchType: + case CompareBranchType: + static_assert(ImmCondBranch_mask == ImmCmpBranch_mask); + static_assert(ImmCondBranch_offset == ImmCmpBranch_offset); + branch_imm = truncate_to_int19(offset) << ImmCondBranch_offset; + imm_mask = ImmCondBranch_mask; + break; + case UncondBranchType: + branch_imm = truncate_to_int26(offset) << ImmUncondBranch_offset; + imm_mask = ImmUncondBranch_mask; + break; + case TestBranchType: + branch_imm = truncate_to_int14(offset) << ImmTestBranch_offset; + imm_mask = ImmTestBranch_mask; + break; + default: + UNREACHABLE(); + } + SetInstructionBits(Mask(~imm_mask) | branch_imm); + } }; // Simulator/Debugger debug instructions --------------------------------------- diff --git a/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h index 78027177d..dd2cf186a 100644 --- a/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h +++ b/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h @@ -20,6 +20,11 @@ constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() { return registers; } +constexpr auto CallInterfaceDescriptor::DefaultDoubleRegisterArray() { + auto registers = DoubleRegisterArray(d0, d1, d2, d3, d4, d5, d6); + return registers; +} + #if DEBUG template void StaticCallInterfaceDescriptor:: @@ -276,11 +281,36 @@ constexpr auto BinarySmiOp_BaselineDescriptor::registers() { } // static -constexpr auto ApiCallbackDescriptor::registers() { - return RegisterArray(x1, // kApiFunctionAddress - x2, // kArgc - x3, // kCallData - x0); // kHolder +constexpr Register +CallApiCallbackOptimizedDescriptor::ApiFunctionAddressRegister() { + return x1; +} +// static +constexpr Register +CallApiCallbackOptimizedDescriptor::ActualArgumentsCountRegister() { + return x2; +} +// static +constexpr Register CallApiCallbackOptimizedDescriptor::CallDataRegister() { + return x3; +} +// static +constexpr Register CallApiCallbackOptimizedDescriptor::HolderRegister() { + return x0; +} + +// static +constexpr Register +CallApiCallbackGenericDescriptor::ActualArgumentsCountRegister() { + return x2; +} +// static +constexpr Register CallApiCallbackGenericDescriptor::CallHandlerInfoRegister() { + return x3; +} +// static +constexpr Register CallApiCallbackGenericDescriptor::HolderRegister() { + return x0; } // static @@ -318,6 +348,11 @@ constexpr auto RunMicrotasksEntryDescriptor::registers() { return RegisterArray(x0, x1); } +constexpr auto WasmNewJSToWasmWrapperDescriptor::registers() { + // Arbitrarily picked register. + return RegisterArray(x8); +} + } // namespace internal } // namespace v8 diff --git a/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/v8/src/codegen/arm64/macro-assembler-arm64-inl.h index 9694fa77e..f42648e21 100644 --- a/v8/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/v8/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -1483,10 +1483,6 @@ void MacroAssembler::TestAndBranchIfAllClear(const Register& reg, } } -void MacroAssembler::MoveHeapNumber(Register dst, double value) { - Mov(dst, Operand::EmbeddedHeapNumber(value)); -} - } // namespace internal } // namespace v8 diff --git a/v8/src/codegen/arm64/macro-assembler-arm64.cc b/v8/src/codegen/arm64/macro-assembler-arm64.cc index 000b22ac6..2761c1030 100644 --- a/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -217,9 +217,6 @@ void MacroAssembler::LogicalMacro(const Register& rd, const Register& rn, // Ignore the top 32 bits of an immediate if we're moving to a W register. if (rd.Is32Bits()) { - // Check that the top 32 bits are consistent. - DCHECK(((immediate >> kWRegSizeInBits) == 0) || - ((immediate >> kWRegSizeInBits) == -1)); immediate &= kWRegMask; } @@ -904,31 +901,56 @@ void MacroAssembler::AddSubWithCarryMacro(const Register& rd, void MacroAssembler::LoadStoreMacro(const CPURegister& rt, const MemOperand& addr, LoadStoreOp op) { + // Call the most common addressing modes used by Liftoff directly for improved + // compilation performance: X register + immediate, X register + W register. + Instr memop = op | Rt(rt) | RnSP(addr.base()); + if (addr.IsImmediateOffset()) { + int64_t offset = addr.offset(); + unsigned size = CalcLSDataSize(op); + if (IsImmLSScaled(offset, size)) { + LoadStoreScaledImmOffset(memop, static_cast(offset), size); + return; + } else if (IsImmLSUnscaled(offset)) { + LoadStoreUnscaledImmOffset(memop, static_cast(offset)); + return; + } + } else if (addr.IsRegisterOffset() && (addr.extend() == UXTW) && + (addr.shift_amount() == 0)) { + LoadStoreWRegOffset(memop, addr.regoffset()); + return; + } + + // Remaining complex cases handled in sub-function. + LoadStoreMacroComplex(rt, addr, op); +} + +void MacroAssembler::LoadStoreMacroComplex(const CPURegister& rt, + const MemOperand& addr, + LoadStoreOp op) { int64_t offset = addr.offset(); - unsigned size = CalcLSDataSize(op); - - // Check if an immediate offset fits in the immediate field of the - // appropriate instruction. If not, emit two instructions to perform - // the operation. - if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && - !IsImmLSUnscaled(offset)) { - // Immediate offset that can't be encoded using unsigned or unscaled - // addressing modes. + bool is_imm_unscaled = IsImmLSUnscaled(offset); + if (addr.IsRegisterOffset() || + (is_imm_unscaled && (addr.IsPostIndex() || addr.IsPreIndex()))) { + // Load/store encodable in one instruction. + LoadStore(rt, addr, op); + } else if (addr.IsImmediateOffset()) { + // Load/stores with immediate offset addressing should have been handled by + // the caller. + DCHECK(!IsImmLSScaled(offset, CalcLSDataSize(op)) && !is_imm_unscaled); UseScratchRegisterScope temps(this); Register temp = temps.AcquireSameSizeAs(addr.base()); - Mov(temp, addr.offset()); + Mov(temp, offset); LoadStore(rt, MemOperand(addr.base(), temp), op); - } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { + } else if (addr.IsPostIndex()) { // Post-index beyond unscaled addressing range. + DCHECK(!is_imm_unscaled); LoadStore(rt, MemOperand(addr.base()), op); add(addr.base(), addr.base(), offset); - } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { + } else { // Pre-index beyond unscaled addressing range. + DCHECK(!is_imm_unscaled && addr.IsPreIndex()); add(addr.base(), addr.base(), offset); LoadStore(rt, MemOperand(addr.base()), op); - } else { - // Encodable in one load/store instruction. - LoadStore(rt, addr, op); } } @@ -971,29 +993,6 @@ void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, } } -bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( - Label* label, ImmBranchType b_type) { - bool need_longer_range = false; - // There are two situations in which we care about the offset being out of - // range: - // - The label is bound but too far away. - // - The label is not bound but linked, and the previous branch - // instruction in the chain is too far away. - if (label->is_bound() || label->is_linked()) { - need_longer_range = - !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset()); - } - if (!need_longer_range && !label->is_bound()) { - int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type); - unresolved_branches_.insert(std::pair( - max_reachable_pc, FarBranchInfo(pc_offset(), label))); - // Also maintain the next pool check. - next_veneer_pool_check_ = std::min( - next_veneer_pool_check_, max_reachable_pc - kVeneerDistanceCheckMargin); - } - return need_longer_range; -} - void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -1064,7 +1063,7 @@ void MacroAssembler::B(Label* label, Condition cond) { Label done; bool need_extra_instructions = - NeedExtraInstructionsOrRegisterBranch(label, CondBranchType); + NeedExtraInstructionsOrRegisterBranch(label); if (need_extra_instructions) { b(&done, NegateCondition(cond)); @@ -1080,7 +1079,7 @@ void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { Label done; bool need_extra_instructions = - NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); + NeedExtraInstructionsOrRegisterBranch(label); if (need_extra_instructions) { tbz(rt, bit_pos, &done); @@ -1096,7 +1095,7 @@ void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { Label done; bool need_extra_instructions = - NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); + NeedExtraInstructionsOrRegisterBranch(label); if (need_extra_instructions) { tbnz(rt, bit_pos, &done); @@ -1112,7 +1111,7 @@ void MacroAssembler::Cbnz(const Register& rt, Label* label) { Label done; bool need_extra_instructions = - NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); + NeedExtraInstructionsOrRegisterBranch(label); if (need_extra_instructions) { cbz(rt, &done); @@ -1128,7 +1127,7 @@ void MacroAssembler::Cbz(const Register& rt, Label* label) { Label done; bool need_extra_instructions = - NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); + NeedExtraInstructionsOrRegisterBranch(label); if (need_extra_instructions) { cbnz(rt, &done); @@ -2416,7 +2415,8 @@ void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { void MacroAssembler::LoadCodeInstructionStart(Register destination, Register code_object) { ASM_CODE_COMMENT(this); - Ldr(destination, FieldMemOperand(code_object, Code::kInstructionStartOffset)); + LoadCodePointerField( + destination, FieldMemOperand(code_object, Code::kInstructionStartOffset)); } void MacroAssembler::CallCodeObject(Register code_object) { @@ -2737,6 +2737,12 @@ void MacroAssembler::JumpIfCodeIsMarkedForDeoptimization( if_marked_for_deoptimization); } +void MacroAssembler::JumpIfCodeIsTurbofanned(Register code, Register scratch, + Label* if_turbofanned) { + Ldr(scratch.W(), FieldMemOperand(code, Code::kFlagsOffset)); + Tbnz(scratch.W(), Code::kIsTurbofannedBit, if_turbofanned); +} + Operand MacroAssembler::ClearedValue() const { return Operand( static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); @@ -2924,7 +2930,8 @@ void MacroAssembler::EnterExitFrame(const Register& scratch, int extra_space, StackFrame::Type frame_type) { ASM_CODE_COMMENT(this); DCHECK(frame_type == StackFrame::EXIT || - frame_type == StackFrame::BUILTIN_EXIT); + frame_type == StackFrame::BUILTIN_EXIT || + frame_type == StackFrame::API_CALLBACK_EXIT); // Set up the new stack frame. Push(lr, fp); @@ -3422,23 +3429,36 @@ void MacroAssembler::LoadExternalPointerField(Register destination, DCHECK(root_array_available_); isolate_root = kRootRegister; } - Ldr(external_table, - MemOperand(isolate_root, - IsolateData::external_pointer_table_offset() + - Internals::kExternalPointerTableBufferOffset)); - Ldr(destination.W(), field_operand); - // MemOperand doesn't support LSR currently (only LSL), so here we do the - // offset computation separately first. - static_assert(kExternalPointerIndexShift > kSystemPointerSizeLog2); - int shift_amount = kExternalPointerIndexShift - kSystemPointerSizeLog2; - Mov(destination, Operand(destination, LSR, shift_amount)); - Ldr(destination, MemOperand(external_table, destination)); - And(destination, destination, Immediate(~tag)); + Ldr(external_table, + MemOperand(isolate_root, + IsolateData::external_pointer_table_offset() + + Internals::kExternalPointerTableBufferOffset)); + Ldr(destination.W(), field_operand); + Mov(destination, Operand(destination, LSR, kExternalPointerIndexShift)); + Ldr(destination, MemOperand(external_table, destination, LSL, + kExternalPointerTableEntrySizeLog2)); + And(destination, destination, Immediate(~tag)); #else Ldr(destination, field_operand); #endif // V8_ENABLE_SANDBOX } +void MacroAssembler::LoadCodePointerField(Register destination, + MemOperand field_operand) { + ASM_CODE_COMMENT(this); +#ifdef V8_CODE_POINTER_SANDBOXING + UseScratchRegisterScope temps(this); + Register table = temps.AcquireX(); + Mov(table, ExternalReference::code_pointer_table_address()); + Ldr(destination.W(), field_operand); + Mov(destination, Operand(destination, LSR, kCodePointerIndexShift)); + Ldr(destination, + MemOperand(table, destination, LSL, kCodePointerTableEntrySizeLog2)); +#else + Ldr(destination, field_operand); +#endif // V8_CODE_POINTER_SANDBOXING +} + void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; ASM_CODE_COMMENT(this); @@ -3677,6 +3697,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { } void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, + CodeKind min_opt_level, Register feedback_vector, FeedbackSlot slot, Label* on_result, @@ -3691,9 +3712,14 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, // Is it marked_for_deoptimization? If yes, clear the slot. { UseScratchRegisterScope temps(this); - JumpIfCodeIsMarkedForDeoptimization(scratch_and_result, temps.AcquireX(), - &clear_slot); - B(on_result); + Register temp = temps.AcquireX(); + JumpIfCodeIsMarkedForDeoptimization(scratch_and_result, temp, &clear_slot); + if (min_opt_level == CodeKind::TURBOFAN) { + JumpIfCodeIsTurbofanned(scratch_and_result, temp, on_result); + B(&fallthrough); + } else { + B(on_result); + } } bind(&clear_slot); diff --git a/v8/src/codegen/arm64/macro-assembler-arm64.h b/v8/src/codegen/arm64/macro-assembler-arm64.h index c74c562ba..afaba226e 100644 --- a/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -921,8 +921,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode = StubCallMode::kCallBuiltinPointer); - inline void MoveHeapNumber(Register dst, double value); - // For a given |object| and |offset|: // - Move |object| to |dst_object|. // - Compute the address of the slot pointed to by |offset| in |object| and @@ -1560,12 +1558,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void StoreSandboxedPointerField(const Register& value, const MemOperand& dst_field_operand); - // Loads a field containing off-heap pointer and does necessary decoding - // if sandboxed external pointers are enabled. + // Loads a field containing an off-heap ("external") pointer and does + // necessary decoding if the sandbox is enabled. void LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTag tag, Register isolate_root = Register::no_reg()); + // Loads a field containing a code pointer and does the necessary decoding if + // the sandbox is enabled. + void LoadCodePointerField(Register destination, MemOperand field_operand); + // Instruction set functions ------------------------------------------------ // Logical macros. inline void Bics(const Register& rd, const Register& rn, @@ -1897,6 +1899,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void JumpIfCodeIsMarkedForDeoptimization(Register code, Register scratch, Label* if_marked_for_deoptimization); + void JumpIfCodeIsTurbofanned(Register code, Register scratch, + Label* if_marked_for_deoptimization); Operand ClearedValue() const; Operand ReceiverOperand(const Register arg_count); @@ -2148,8 +2152,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // Falls through and sets scratch_and_result to 0 on failure, jumps to // on_result on success. void TryLoadOptimizedOsrCode(Register scratch_and_result, - Register feedback_vector, FeedbackSlot slot, - Label* on_result, Label::Distance distance); + CodeKind min_opt_level, Register feedback_vector, + FeedbackSlot slot, Label* on_result, + Label::Distance distance); protected: // The actual Push and Pop implementations. These don't generate any code @@ -2203,8 +2208,40 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { // for generating appropriate code. // Otherwise it returns false. // This function also checks wether veneers need to be emitted. - bool NeedExtraInstructionsOrRegisterBranch(Label* label, - ImmBranchType branch_type); + template + bool NeedExtraInstructionsOrRegisterBranch(Label* label) { + static_assert((branch_type == CondBranchType) || + (branch_type == CompareBranchType) || + (branch_type == TestBranchType)); + + bool need_longer_range = false; + // There are two situations in which we care about the offset being out of + // range: + // - The label is bound but too far away. + // - The label is not bound but linked, and the previous branch + // instruction in the chain is too far away. + if (label->is_bound() || label->is_linked()) { + need_longer_range = !Instruction::IsValidImmPCOffset( + branch_type, label->pos() - pc_offset()); + } + if (!need_longer_range && !label->is_bound()) { + int max_reachable_pc = + pc_offset() + Instruction::ImmBranchRange(branch_type); + + // Use the LSB of the max_reachable_pc (always four-byte aligned) to + // encode the branch type. We need only distinguish between TB[N]Z and + // CB[N]Z/conditional branch, as the ranges for the latter are the same. + int branch_type_tag = (branch_type == TestBranchType) ? 1 : 0; + + unresolved_branches_.insert( + std::pair(max_reachable_pc + branch_type_tag, label)); + // Also maintain the next pool check. + next_veneer_pool_check_ = + std::min(next_veneer_pool_check_, + max_reachable_pc - kVeneerDistanceCheckMargin); + } + return need_longer_range; + } void Movi16bitHelper(const VRegister& vd, uint64_t imm); void Movi32bitHelper(const VRegister& vd, uint64_t imm); @@ -2212,6 +2249,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr, LoadStoreOp op); + void LoadStoreMacroComplex(const CPURegister& rt, const MemOperand& addr, + LoadStoreOp op); void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, const MemOperand& addr, LoadStorePairOp op); diff --git a/v8/src/codegen/arm64/utils-arm64.cc b/v8/src/codegen/arm64/utils-arm64.cc index cdc408db1..1f73cded4 100644 --- a/v8/src/codegen/arm64/utils-arm64.cc +++ b/v8/src/codegen/arm64/utils-arm64.cc @@ -73,14 +73,6 @@ int float16classify(float16 value) { return FP_NORMAL; } -int CountLeadingZeros(uint64_t value, int width) { - DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64)); - if (value == 0) { - return width; - } - return base::bits::CountLeadingZeros64(value << (64 - width)); -} - int CountLeadingSignBits(int64_t value, int width) { DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64)); if (value >= 0) { @@ -109,12 +101,6 @@ int HighestSetBitPosition(uint64_t value) { return 63 - CountLeadingZeros(value, 64); } -uint64_t LargestPowerOf2Divisor(uint64_t value) { - // Simulate two's complement (instead of casting to signed and negating) to - // avoid undefined behavior on signed overflow. - return value & ((~value) + 1); -} - int MaskToBit(uint64_t mask) { DCHECK_EQ(CountSetBits(mask, 64), 1); return base::bits::CountTrailingZeros(mask); diff --git a/v8/src/codegen/arm64/utils-arm64.h b/v8/src/codegen/arm64/utils-arm64.h index 266cd525c..f6bbc09b9 100644 --- a/v8/src/codegen/arm64/utils-arm64.h +++ b/v8/src/codegen/arm64/utils-arm64.h @@ -31,12 +31,22 @@ double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa); int float16classify(float16 value); // Bit counting. -int CountLeadingZeros(uint64_t value, int width); +inline static int CountLeadingZeros(uint64_t value, int width) { + DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64)); + if (value == 0) { + return width; + } + return base::bits::CountLeadingZeros64(value << (64 - width)); +} int CountLeadingSignBits(int64_t value, int width); V8_EXPORT_PRIVATE int CountSetBits(uint64_t value, int width); int LowestSetBitPosition(uint64_t value); int HighestSetBitPosition(uint64_t value); -uint64_t LargestPowerOf2Divisor(uint64_t value); +inline static uint64_t LargestPowerOf2Divisor(uint64_t value) { + // Simulate two's complement (instead of casting to signed and negating) to + // avoid undefined behavior on signed overflow. + return value & ((~value) + 1); +} int MaskToBit(uint64_t mask); template diff --git a/v8/src/codegen/bailout-reason.h b/v8/src/codegen/bailout-reason.h index b7aa93642..842eac938 100644 --- a/v8/src/codegen/bailout-reason.h +++ b/v8/src/codegen/bailout-reason.h @@ -67,6 +67,7 @@ namespace internal { V(kShouldNotDirectlyEnterOsrFunction, \ "Should not directly enter OSR-compiled function") \ V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \ + V(kOsrUnexpectedStackSize, "Unexpected stack size on OSR entry") \ V(kStackFrameTypesMustMatch, "Stack frame types must match") \ V(kUint32IsNotAInt32, \ "Uint32 cannot be converted to Int32 without loss of precision") \ diff --git a/v8/src/codegen/code-factory.cc b/v8/src/codegen/code-factory.cc index e3f3fe79e..d1603fe03 100644 --- a/v8/src/codegen/code-factory.cc +++ b/v8/src/codegen/code-factory.cc @@ -48,11 +48,6 @@ Callable CodeFactory::ApiGetter(Isolate* isolate) { return Builtins::CallableFor(isolate, Builtin::kCallApiGetter); } -// static -Callable CodeFactory::CallApiCallback(Isolate* isolate) { - return Builtins::CallableFor(isolate, Builtin::kCallApiCallback); -} - // static Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) { return typeof_mode == TypeofMode::kNotInside diff --git a/v8/src/codegen/code-stub-assembler.cc b/v8/src/codegen/code-stub-assembler.cc index d4cc542c1..d6f303c72 100644 --- a/v8/src/codegen/code-stub-assembler.cc +++ b/v8/src/codegen/code-stub-assembler.cc @@ -262,13 +262,13 @@ TNode CodeStubAssembler::NoContextConstant() { HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR -#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ - TNode().rootAccessorName())>::type>::type> \ - CodeStubAssembler::name##Constant() { \ - return UncheckedCast().rootAccessorName())>::type>::type>( \ - LoadRoot(RootIndex::k##rootIndexName)); \ +#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ + TNode().rootAccessorName())>::type> \ + CodeStubAssembler::name##Constant() { \ + return UncheckedCast().rootAccessorName())>::type>( \ + LoadRoot(RootIndex::k##rootIndexName)); \ } HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR @@ -1750,8 +1750,6 @@ TNode CodeStubAssembler::LoadExternalPointerFromObject( // constant value (Uint32Constant uses cached nodes). TNode index = Word32Shr(handle, UniqueUint32Constant(kExternalPointerIndexShift)); - // TODO(v8:10391): consider updating ElementOffsetFromIndex to generate code - // that does one shift right instead of two shifts (right and then left). TNode table_offset = ElementOffsetFromIndex( ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0); @@ -1774,7 +1772,6 @@ void CodeStubAssembler::StoreExternalPointerToObject(TNode object, TNode table = UncheckedCast( Load(MachineType::Pointer(), external_pointer_table_address, UintPtrConstant(Internals::kExternalPointerTableBufferOffset))); - TNode handle = LoadObjectField(object, offset); @@ -1783,8 +1780,6 @@ void CodeStubAssembler::StoreExternalPointerToObject(TNode object, // constant value (Uint32Constant uses cached nodes). TNode index = Word32Shr(handle, UniqueUint32Constant(kExternalPointerIndexShift)); - // TODO(v8:10391): consider updating ElementOffsetFromIndex to generate code - // that does one shift right instead of two shifts (right and then left). TNode table_offset = ElementOffsetFromIndex( ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0); @@ -1797,6 +1792,30 @@ void CodeStubAssembler::StoreExternalPointerToObject(TNode object, #endif // V8_ENABLE_SANDBOX } +TNode CodeStubAssembler::LoadCodePointerFromObject( + TNode object, TNode field_offset) { +#ifdef V8_CODE_POINTER_SANDBOXING + TNode table = + ExternalConstant(ExternalReference::code_pointer_table_address()); + TNode handle = + LoadObjectField(object, field_offset); + // Use UniqueUint32Constant instead of Uint32Constant here in order to ensure + // that the graph structure does not depend on the configuration-specific + // constant value (Uint32Constant uses cached nodes). + TNode index = + Word32Shr(handle, UniqueUint32Constant(kCodePointerIndexShift)); + // We're using a 32-bit shift here to reduce code size, but for that we need + // to be sure that the offset will always fit into a 32-bit integer. + static_assert(kCodePointerTableReservationSize <= 4ULL * GB); + TNode offset = ChangeUint32ToWord( + Word32Shl(index, UniqueUint32Constant(kCodePointerTableEntrySizeLog2))); + + return Load(table, offset); +#else + return LoadObjectField(object, field_offset); +#endif // V8_CODE_POINTER_SANDBOXING +} + TNode CodeStubAssembler::LoadFromParentFrame(int offset) { TNode frame_pointer = LoadParentFramePointer(); return LoadFullTagged(frame_pointer, IntPtrConstant(offset)); @@ -5751,7 +5770,7 @@ TNode CodeStubAssembler::TruncateTaggedToWord32(TNode context, TVARIABLE(Word32T, var_result); Label done(this); TaggedToWord32OrBigIntImpl( - context, value, &done, &var_result, IsKnownTaggedPointer::kNo); + context, value, &done, &var_result, IsKnownTaggedPointer::kNo, {}); BIND(&done); return var_result.value(); } @@ -5763,7 +5782,7 @@ void CodeStubAssembler::TaggedToWord32OrBigInt( TVariable* var_word32, Label* if_bigint, Label* if_bigint64, TVariable* var_maybe_bigint) { TaggedToWord32OrBigIntImpl( - context, value, if_number, var_word32, IsKnownTaggedPointer::kNo, + context, value, if_number, var_word32, IsKnownTaggedPointer::kNo, {}, if_bigint, if_bigint64, var_maybe_bigint); } @@ -5773,10 +5792,10 @@ void CodeStubAssembler::TaggedToWord32OrBigInt( void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback( TNode context, TNode value, Label* if_number, TVariable* var_word32, Label* if_bigint, Label* if_bigint64, - TVariable* var_maybe_bigint, TVariable* var_feedback) { + TVariable* var_maybe_bigint, const FeedbackValues& feedback) { TaggedToWord32OrBigIntImpl( context, value, if_number, var_word32, IsKnownTaggedPointer::kNo, - if_bigint, if_bigint64, var_maybe_bigint, var_feedback); + feedback, if_bigint, if_bigint64, var_maybe_bigint); } // Truncate {pointer} to word32 and jump to {if_number} if it is a Number, @@ -5785,31 +5804,36 @@ void CodeStubAssembler::TaggedToWord32OrBigIntWithFeedback( void CodeStubAssembler::TaggedPointerToWord32OrBigIntWithFeedback( TNode context, TNode pointer, Label* if_number, TVariable* var_word32, Label* if_bigint, Label* if_bigint64, - TVariable* var_maybe_bigint, TVariable* var_feedback) { + TVariable* var_maybe_bigint, const FeedbackValues& feedback) { TaggedToWord32OrBigIntImpl( context, pointer, if_number, var_word32, IsKnownTaggedPointer::kYes, - if_bigint, if_bigint64, var_maybe_bigint, var_feedback); + feedback, if_bigint, if_bigint64, var_maybe_bigint); } template void CodeStubAssembler::TaggedToWord32OrBigIntImpl( TNode context, TNode value, Label* if_number, TVariable* var_word32, - IsKnownTaggedPointer is_known_tagged_pointer, Label* if_bigint, - Label* if_bigint64, TVariable* var_maybe_bigint, - TVariable* var_feedback) { + IsKnownTaggedPointer is_known_tagged_pointer, + const FeedbackValues& feedback, Label* if_bigint, Label* if_bigint64, + TVariable* var_maybe_bigint) { // We might need to loop after conversion. TVARIABLE(Object, var_value, value); - OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNone); + TVARIABLE(Object, var_exception); + OverwriteFeedback(feedback.var_feedback, BinaryOperationFeedback::kNone); VariableList loop_vars({&var_value}, zone()); - if (var_feedback != nullptr) loop_vars.push_back(var_feedback); + if (feedback.var_feedback != nullptr) { + loop_vars.push_back(feedback.var_feedback); + } Label loop(this, loop_vars); + Label if_exception(this, Label::kDeferred); if (is_known_tagged_pointer == IsKnownTaggedPointer::kNo) { GotoIf(TaggedIsNotSmi(value), &loop); // {value} is a Smi. *var_word32 = SmiToInt32(CAST(value)); - CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall); + CombineFeedback(feedback.var_feedback, + BinaryOperationFeedback::kSignedSmall); Goto(if_number); } else { Goto(&loop); @@ -5834,11 +5858,11 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( // Not HeapNumber (or BigInt if conversion == kToNumeric). { - if (var_feedback != nullptr) { + if (feedback.var_feedback != nullptr) { // We do not require an Or with earlier feedback here because once we // convert the value to a Numeric, we cannot reach this path. We can // only reach this path on the first pass when the feedback is kNone. - CSA_DCHECK(this, SmiEqual(var_feedback->value(), + CSA_DCHECK(this, SmiEqual(feedback.var_feedback->value(), SmiConstant(BinaryOperationFeedback::kNone))); } GotoIf(InstanceTypeEqual(instance_type, ODDBALL_TYPE), &is_oddball); @@ -5846,20 +5870,36 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( auto builtin = conversion == Object::Conversion::kToNumeric ? Builtin::kNonNumberToNumeric : Builtin::kNonNumberToNumber; - var_value = CallBuiltin(builtin, context, value); - OverwriteFeedback(var_feedback, BinaryOperationFeedback::kAny); + if (feedback.var_feedback != nullptr) { + ScopedExceptionHandler handler(this, &if_exception, &var_exception); + var_value = CallBuiltin(builtin, context, value); + } else { + var_value = CallBuiltin(builtin, context, value); + } + OverwriteFeedback(feedback.var_feedback, BinaryOperationFeedback::kAny); Goto(&check_if_smi); + if (feedback.var_feedback != nullptr) { + BIND(&if_exception); + DCHECK(feedback.slot != nullptr); + DCHECK(feedback.maybe_feedback_vector != nullptr); + UpdateFeedback(SmiConstant(BinaryOperationFeedback::kAny), + (*feedback.maybe_feedback_vector)(), *feedback.slot, + feedback.update_mode); + CallRuntime(Runtime::kReThrow, context, var_exception.value()); + Unreachable(); + } + BIND(&is_oddball); var_value = LoadObjectField(value_heap_object, Oddball::kToNumberOffset); - OverwriteFeedback(var_feedback, + OverwriteFeedback(feedback.var_feedback, BinaryOperationFeedback::kNumberOrOddball); Goto(&check_if_smi); } BIND(&is_heap_number); *var_word32 = TruncateHeapNumberValueToWord32(CAST(value)); - CombineFeedback(var_feedback, BinaryOperationFeedback::kNumber); + CombineFeedback(feedback.var_feedback, BinaryOperationFeedback::kNumber); Goto(if_number); if (conversion == Object::Conversion::kToNumeric) { @@ -5869,7 +5909,8 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( if (var_maybe_bigint) { *var_maybe_bigint = CAST(value); } - CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt64); + CombineFeedback(feedback.var_feedback, + BinaryOperationFeedback::kBigInt64); Goto(if_bigint64); } @@ -5877,7 +5918,7 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( if (var_maybe_bigint) { *var_maybe_bigint = CAST(value); } - CombineFeedback(var_feedback, BinaryOperationFeedback::kBigInt); + CombineFeedback(feedback.var_feedback, BinaryOperationFeedback::kBigInt); Goto(if_bigint); } @@ -5887,7 +5928,8 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( // {value} is a Smi. *var_word32 = SmiToInt32(CAST(value)); - CombineFeedback(var_feedback, BinaryOperationFeedback::kSignedSmall); + CombineFeedback(feedback.var_feedback, + BinaryOperationFeedback::kSignedSmall); Goto(if_number); } } @@ -5914,107 +5956,89 @@ TNode CodeStubAssembler::TruncateHeapNumberValueToWord32( return Signed(TruncateFloat64ToWord32(value)); } -void CodeStubAssembler::TryHeapNumberToSmi(TNode number, - TVariable* var_result_smi, - Label* if_smi) { +TNode CodeStubAssembler::TryHeapNumberToSmi(TNode number, + Label* not_smi) { TNode value = LoadHeapNumberValue(number); - TryFloat64ToSmi(value, var_result_smi, if_smi); + return TryFloat64ToSmi(value, not_smi); } -void CodeStubAssembler::TryFloat32ToSmi(TNode value, - TVariable* var_result_smi, - Label* if_smi) { +TNode CodeStubAssembler::TryFloat32ToSmi(TNode value, + Label* not_smi) { TNode ivalue = TruncateFloat32ToInt32(value); TNode fvalue = RoundInt32ToFloat32(ivalue); - Label if_int32(this), if_heap_number(this); + Label if_int32(this); - GotoIfNot(Float32Equal(value, fvalue), &if_heap_number); + GotoIfNot(Float32Equal(value, fvalue), not_smi); GotoIfNot(Word32Equal(ivalue, Int32Constant(0)), &if_int32); + // if (value == -0.0) Branch(Int32LessThan(UncheckedCast(BitcastFloat32ToInt32(value)), Int32Constant(0)), - &if_heap_number, &if_int32); + not_smi, &if_int32); - TVARIABLE(Number, var_result); BIND(&if_int32); - { - if (SmiValuesAre32Bits()) { - *var_result_smi = SmiTag(ChangeInt32ToIntPtr(ivalue)); - } else { - DCHECK(SmiValuesAre31Bits()); - TNode> pair = Int32AddWithOverflow(ivalue, ivalue); - TNode overflow = Projection<1>(pair); - GotoIf(overflow, &if_heap_number); - *var_result_smi = - BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair))); - } - Goto(if_smi); + if (SmiValuesAre32Bits()) { + return SmiTag(ChangeInt32ToIntPtr(ivalue)); + } else { + DCHECK(SmiValuesAre31Bits()); + TNode> pair = Int32AddWithOverflow(ivalue, ivalue); + TNode overflow = Projection<1>(pair); + GotoIf(overflow, not_smi); + return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair))); } - BIND(&if_heap_number); } -void CodeStubAssembler::TryFloat64ToSmi(TNode value, - TVariable* var_result_smi, - Label* if_smi) { +TNode CodeStubAssembler::TryFloat64ToSmi(TNode value, + Label* not_smi) { TNode value32 = RoundFloat64ToInt32(value); TNode value64 = ChangeInt32ToFloat64(value32); - Label if_int32(this), if_heap_number(this, Label::kDeferred); + Label if_int32(this); - GotoIfNot(Float64Equal(value, value64), &if_heap_number); + GotoIfNot(Float64Equal(value, value64), not_smi); GotoIfNot(Word32Equal(value32, Int32Constant(0)), &if_int32); Branch(Int32LessThan(UncheckedCast(Float64ExtractHighWord32(value)), Int32Constant(0)), - &if_heap_number, &if_int32); + not_smi, &if_int32); TVARIABLE(Number, var_result); BIND(&if_int32); - { - if (SmiValuesAre32Bits()) { - *var_result_smi = SmiTag(ChangeInt32ToIntPtr(value32)); - } else { - DCHECK(SmiValuesAre31Bits()); - TNode> pair = Int32AddWithOverflow(value32, value32); - TNode overflow = Projection<1>(pair); - GotoIf(overflow, &if_heap_number); - *var_result_smi = - BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair))); - } - Goto(if_smi); + if (SmiValuesAre32Bits()) { + return SmiTag(ChangeInt32ToIntPtr(value32)); + } else { + DCHECK(SmiValuesAre31Bits()); + TNode> pair = Int32AddWithOverflow(value32, value32); + TNode overflow = Projection<1>(pair); + GotoIf(overflow, not_smi); + return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Projection<0>(pair))); } - BIND(&if_heap_number); } TNode CodeStubAssembler::ChangeFloat32ToTagged(TNode value) { - Label if_smi(this), done(this); - TVARIABLE(Smi, var_smi_result); + Label not_smi(this), done(this); TVARIABLE(Number, var_result); - TryFloat32ToSmi(value, &var_smi_result, &if_smi); - - var_result = AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value)); + var_result = TryFloat32ToSmi(value, ¬_smi); Goto(&done); - BIND(&if_smi); + BIND(¬_smi); { - var_result = var_smi_result.value(); + var_result = AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value)); Goto(&done); } + BIND(&done); return var_result.value(); } TNode CodeStubAssembler::ChangeFloat64ToTagged(TNode value) { - Label if_smi(this), done(this); - TVARIABLE(Smi, var_smi_result); + Label not_smi(this), done(this); TVARIABLE(Number, var_result); - TryFloat64ToSmi(value, &var_smi_result, &if_smi); - - var_result = AllocateHeapNumberWithValue(value); + var_result = TryFloat64ToSmi(value, ¬_smi); Goto(&done); - BIND(&if_smi); + BIND(¬_smi); { - var_result = var_smi_result.value(); + var_result = AllocateHeapNumberWithValue(value); Goto(&done); } BIND(&done); @@ -6573,6 +6597,20 @@ CodeStubAssembler::IsNumberStringNotRegexpLikeProtectorCellInvalid() { return TaggedEqual(cell_value, invalid); } +TNode CodeStubAssembler::IsSetIteratorProtectorCellInvalid() { + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); + TNode cell = SetIteratorProtectorConstant(); + TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); + return TaggedEqual(cell_value, invalid); +} + +TNode CodeStubAssembler::IsMapIteratorProtectorCellInvalid() { + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); + TNode cell = MapIteratorProtectorConstant(); + TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); + return TaggedEqual(cell_value, invalid); +} + TNode CodeStubAssembler::IsPrototypeInitialArrayPrototype( TNode context, TNode map) { const TNode native_context = LoadNativeContext(context); @@ -7658,7 +7696,7 @@ TNode CodeStubAssembler::NumberToString(TNode input, Label* bailout) { TVARIABLE(String, result); TVARIABLE(Smi, smi_input); - Label if_smi(this), if_heap_number(this), done(this, &result); + Label if_smi(this), not_smi(this), if_heap_number(this), done(this, &result); // Load the number string cache. TNode number_string_cache = NumberStringCacheConstant(); @@ -7676,42 +7714,13 @@ TNode CodeStubAssembler::NumberToString(TNode input, Goto(&if_smi); BIND(&if_heap_number); + TNode heap_number_input = CAST(input); { Comment("NumberToString - HeapNumber"); - TNode heap_number_input = CAST(input); // Try normalizing the HeapNumber. - TryHeapNumberToSmi(heap_number_input, &smi_input, &if_smi); - - // Make a hash from the two 32-bit values of the double. - TNode low = - LoadObjectField(heap_number_input, HeapNumber::kValueOffset); - TNode high = LoadObjectField( - heap_number_input, HeapNumber::kValueOffset + kIntSize); - TNode hash = Word32And(Word32Xor(low, high), mask); - TNode entry_index = - Signed(ChangeUint32ToWord(Int32Add(hash, hash))); - - // Cache entry's key must be a heap number - TNode number_key = - UnsafeLoadFixedArrayElement(number_string_cache, entry_index); - GotoIf(TaggedIsSmi(number_key), bailout); - TNode number_key_heap_object = CAST(number_key); - GotoIfNot(IsHeapNumber(number_key_heap_object), bailout); - - // Cache entry's key must match the heap number value we're looking for. - TNode low_compare = LoadObjectField( - number_key_heap_object, HeapNumber::kValueOffset); - TNode high_compare = LoadObjectField( - number_key_heap_object, HeapNumber::kValueOffset + kIntSize); - GotoIfNot(Word32Equal(low, low_compare), bailout); - GotoIfNot(Word32Equal(high, high_compare), bailout); - - // Heap number match, return value from cache entry. - result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index, - kTaggedSize)); - Goto(&done); + smi_input = TryHeapNumberToSmi(heap_number_input, ¬_smi); + Goto(&if_smi); } - BIND(&if_smi); { Comment("NumberToString - Smi"); @@ -7755,6 +7764,38 @@ TNode CodeStubAssembler::NumberToString(TNode input, } } } + + BIND(¬_smi); + { + // Make a hash from the two 32-bit values of the double. + TNode low = + LoadObjectField(heap_number_input, HeapNumber::kValueOffset); + TNode high = LoadObjectField( + heap_number_input, HeapNumber::kValueOffset + kIntSize); + TNode hash = Word32And(Word32Xor(low, high), mask); + TNode entry_index = + Signed(ChangeUint32ToWord(Int32Add(hash, hash))); + + // Cache entry's key must be a heap number + TNode number_key = + UnsafeLoadFixedArrayElement(number_string_cache, entry_index); + GotoIf(TaggedIsSmi(number_key), bailout); + TNode number_key_heap_object = CAST(number_key); + GotoIfNot(IsHeapNumber(number_key_heap_object), bailout); + + // Cache entry's key must match the heap number value we're looking for. + TNode low_compare = LoadObjectField( + number_key_heap_object, HeapNumber::kValueOffset); + TNode high_compare = LoadObjectField( + number_key_heap_object, HeapNumber::kValueOffset + kIntSize); + GotoIfNot(Word32Equal(low, low_compare), bailout); + GotoIfNot(Word32Equal(high, high_compare), bailout); + + // Heap number match, return value from cache entry. + result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index, + kTaggedSize)); + Goto(&done); + } BIND(&done); return result.value(); } @@ -9228,8 +9269,9 @@ void CodeStubAssembler::InsertEntry( } template -void CodeStubAssembler::Add(TNode dictionary, TNode key, - TNode value, Label* bailout) { +void CodeStubAssembler::AddToDictionary(TNode dictionary, + TNode key, TNode value, + Label* bailout) { CSA_DCHECK(this, Word32BinaryNot(IsEmptyPropertyDictionary(dictionary))); TNode capacity = GetCapacity(dictionary); TNode nof = GetNumberOfElements(dictionary); @@ -9264,9 +9306,9 @@ void CodeStubAssembler::Add(TNode dictionary, TNode key, } template <> -void CodeStubAssembler::Add(TNode dictionary, - TNode key, TNode value, - Label* bailout) { +void CodeStubAssembler::AddToDictionary(TNode dictionary, + TNode key, TNode value, + Label* bailout) { PropertyDetails d(PropertyKind::kData, NONE, PropertyDetails::kConstIfDictConstnessTracking); @@ -9288,9 +9330,8 @@ void CodeStubAssembler::Add(TNode dictionary, SwissNameDictionaryAdd(dictionary, key, value, var_details.value(), bailout); } -template void CodeStubAssembler::Add(TNode, - TNode, TNode, - Label*); +template void CodeStubAssembler::AddToDictionary( + TNode, TNode, TNode, Label*); template TNode CodeStubAssembler::GetNumberOfElements( @@ -10243,9 +10284,7 @@ TNode CodeStubAssembler::CallGetterIfAccessor( BIND(&if_function_template_info); { - Label runtime(this, Label::kDeferred); Label use_cached_property(this); - GotoIf(IsSideEffectFreeDebuggingActive(), &runtime); TNode cached_property_name = LoadObjectField( getter, FunctionTemplateInfo::kCachedPropertyNameOffset); @@ -10282,13 +10321,6 @@ TNode CodeStubAssembler::CallGetterIfAccessor( Goto(&done); } - - BIND(&runtime); - { - var_value = CallRuntime(Runtime::kGetProperty, context, holder, name, - receiver); - Goto(&done); - } } } else { DCHECK_EQ(mode, kReturnAccessorPair); @@ -10563,6 +10595,78 @@ CodeStubAssembler::AllocatePropertyDescriptorObject(TNode context) { return CAST(result); } +TNode CodeStubAssembler::GetInterestingProperty( + TNode context, TNode receiver, TNode symbol, + Label* if_not_found) { + TVARIABLE(HeapObject, var_holder, receiver); + TVARIABLE(Map, var_holder_map, LoadMap(receiver)); + + return GetInterestingProperty(context, receiver, &var_holder, &var_holder_map, + symbol, if_not_found, nullptr); +} + +TNode CodeStubAssembler::GetInterestingProperty( + TNode context, TNode receiver, + TVariable* var_holder, TVariable* var_holder_map, + TNode symbol, Label* if_not_found, Label* if_proxy) { + CSA_DCHECK(this, IsSetWord32( + LoadObjectField(symbol, Symbol::kFlagsOffset))); + // The lookup starts at the var_holder and var_holder_map must contain + // var_holder's map. + CSA_DCHECK(this, TaggedEqual(LoadMap((*var_holder).value()), + (*var_holder_map).value())); + TVARIABLE(Object, var_result, UndefinedConstant()); + + // Check if all relevant maps (including the prototype maps) don't + // have any interesting properties (i.e. that none of them have the + // @@toStringTag or @@toPrimitive property). + Label loop(this, {var_holder, var_holder_map}), + lookup(this, Label::kDeferred); + Goto(&loop); + BIND(&loop); + { + Label interesting_properties(this); + TNode holder = (*var_holder).value(); + TNode holder_map = (*var_holder_map).value(); + GotoIf(IsNull(holder), if_not_found); + TNode holder_bit_field3 = LoadMapBitField3(holder_map); + GotoIf(IsSetWord32( + holder_bit_field3), + &interesting_properties); + *var_holder = LoadMapPrototype(holder_map); + *var_holder_map = LoadMap((*var_holder).value()); + Goto(&loop); + BIND(&interesting_properties); + { + // Check flags for dictionary objects. + GotoIf(IsClearWord32(holder_bit_field3), + &lookup); + GotoIf(InstanceTypeEqual(LoadMapInstanceType(holder_map), JS_PROXY_TYPE), + if_proxy ? if_proxy : &lookup); + TNode properties = + LoadObjectField(holder, JSObject::kPropertiesOrHashOffset); + CSA_DCHECK(this, TaggedIsNotSmi(properties)); + // TODO(pthier): Support swiss dictionaries. + if constexpr (!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { + CSA_DCHECK(this, IsNameDictionary(CAST(properties))); + TNode flags = + GetNameDictionaryFlags(CAST(properties)); + GotoIf(IsSetSmi(flags, + NameDictionary::MayHaveInterestingPropertiesBit::kMask), + &lookup); + *var_holder = LoadMapPrototype(holder_map); + *var_holder_map = LoadMap((*var_holder).value()); + } + Goto(&loop); + } + } + + BIND(&lookup); + return CallBuiltin(Builtin::kGetPropertyWithReceiver, context, + (*var_holder).value(), symbol, receiver, + SmiConstant(OnNonExistent::kReturnUndefined)); +} + void CodeStubAssembler::TryLookupElement( TNode object, TNode map, TNode instance_type, TNode intptr_index, Label* if_found, Label* if_absent, @@ -11745,6 +11849,17 @@ TNode CodeStubAssembler::PrepareValueForWriteToTypedArray( return ToBigInt(context, input); } +#if V8_ENABLE_WEBASSEMBLY +TorqueStructInt64AsInt32Pair CodeStubAssembler::BigIntToRawBytes( + TNode value) { + TVARIABLE(UintPtrT, var_low); + // Only used on 32-bit platforms. + TVARIABLE(UintPtrT, var_high); + BigIntToRawBytes(value, &var_low, &var_high); + return {var_low.value(), var_high.value()}; +} +#endif // V8_ENABLE_WEBASSEMBLY + void CodeStubAssembler::BigIntToRawBytes(TNode bigint, TVariable* var_low, TVariable* var_high) { @@ -15533,16 +15648,6 @@ TNode CodeStubAssembler::IsDebugActive() { return Word32NotEqual(is_debug_active, Int32Constant(0)); } -// TODO(v8:13825): remove once CallApiGetter/CallApiAccessor are able to handle -// side effects checking. -TNode CodeStubAssembler::IsSideEffectFreeDebuggingActive() { - TNode execution_mode = Load( - ExternalConstant(ExternalReference::execution_mode_address(isolate()))); - int32_t mask = - static_cast(IsolateExecutionModeFlag::kCheckSideEffects); - return IsSetWord32(execution_mode, mask); -} - TNode CodeStubAssembler::HasAsyncEventDelegate() { const TNode async_event_delegate = Load(ExternalConstant( ExternalReference::async_event_delegate_address(isolate()))); @@ -15699,7 +15804,7 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( // IsFunctionTemplateInfo: API call BIND(&check_is_function_template_info); - sfi_code = HeapConstant(BUILTIN_CODE(isolate(), HandleApiCall)); + sfi_code = HeapConstant(BUILTIN_CODE(isolate(), HandleApiCallOrConstruct)); Goto(&done); // IsInterpreterData: Interpret bytecode @@ -15737,8 +15842,7 @@ TNode CodeStubAssembler::GetSharedFunctionInfoCode( } TNode CodeStubAssembler::LoadCodeInstructionStart(TNode code) { - return LoadObjectField( - code, IntPtrConstant(Code::kInstructionStartOffset)); + return LoadCodePointerFromObject(code, Code::kInstructionStartOffset); } TNode CodeStubAssembler::IsMarkedForDeoptimization(TNode code) { @@ -15995,22 +16099,6 @@ void CodeStubAssembler::PerformStackCheck(TNode context) { BIND(&ok); } -TNode CodeStubAssembler::CallApiCallback( - TNode context, TNode callback, TNode argc, - TNode data, TNode holder, TNode receiver) { - Callable callable = CodeFactory::CallApiCallback(isolate()); - return CallStub(callable, context, callback, argc, data, holder, receiver); -} - -TNode CodeStubAssembler::CallApiCallback( - TNode context, TNode callback, TNode argc, - TNode data, TNode holder, TNode receiver, - TNode value) { - Callable callable = CodeFactory::CallApiCallback(isolate()); - return CallStub(callable, context, callback, argc, data, holder, receiver, - value); -} - TNode CodeStubAssembler::CallRuntimeNewArray( TNode context, TNode receiver, TNode length, TNode new_target, TNode allocation_site) { @@ -17035,5 +17123,115 @@ void CodeStubAssembler::SharedValueBarrier( BIND(&done); } +TNode CodeStubAssembler::AllocateArrayList(TNode size) { + TVARIABLE(ArrayList, result); + Label empty(this), nonempty(this), done(this); + + Branch(SmiEqual(size, SmiConstant(0)), &empty, &nonempty); + + BIND(&nonempty); + { + TNode fixed_array_size = + SmiAdd(size, SmiConstant(ArrayList::kFirstIndex)); + result = + CAST(AllocateFixedArray(PACKED_ELEMENTS, fixed_array_size, + AllocationFlag::kNone, ArrayListMapConstant())); + ArrayListSetLength(result.value(), SmiConstant(0)); + + Goto(&done); + } + + BIND(&empty); + { + result = EmptyArrayListConstant(); + Goto(&done); + } + + BIND(&done); + return result.value(); +} + +TNode CodeStubAssembler::ArrayListEnsureSpace( + TNode array, TNode requested_size) { + Label overflow(this, Label::kDeferred); + TNode capacity = LoadFixedArrayBaseLength(array); + TNode requested_capacity = + TrySmiAdd(requested_size, SmiConstant(ArrayList::kFirstIndex), &overflow); + + Label grow(this), done(this); + + TVARIABLE(ArrayList, result_array, array); + + GotoIf(SmiLessThan(capacity, requested_capacity), &grow); + Goto(&done); + + BIND(&grow); + { + // new_capacity = new_length; + // new_capacity = capacity + max(capacity / 2, 2); + // + // Ensure calculation matches ArrayList::EnsureSpace. + TNode new_capacity = TrySmiAdd( + requested_capacity, + SmiMax(SmiShr(requested_capacity, 1), SmiConstant(2)), &overflow); + result_array = CAST( + ExtractFixedArray(array, SmiConstant(0), capacity, new_capacity, + ExtractFixedArrayFlag::kFixedArrays)); + Goto(&done); + } + + BIND(&overflow); + CallRuntime(Runtime::kFatalInvalidSize, NoContextConstant()); + Unreachable(); + + BIND(&done); + return result_array.value(); +} + +TNode CodeStubAssembler::ArrayListAdd(TNode array, + TNode object) { + TNode length = ArrayListGetLength(array); + TNode new_length = SmiAdd(length, SmiConstant(1)); + TNode array_with_space = ArrayListEnsureSpace(array, new_length); + + CSA_DCHECK(this, SmiEqual(ArrayListGetLength(array_with_space), length)); + + ArrayListSet(array_with_space, length, object); + ArrayListSetLength(array_with_space, new_length); + + return array_with_space; +} + +void CodeStubAssembler::ArrayListSet(TNode array, TNode index, + TNode object) { + StoreFixedArrayElement( + array, SmiAdd(SmiConstant(ArrayList::kFirstIndex), index), object); +} + +TNode CodeStubAssembler::ArrayListGetLength(TNode array) { + TNode capacity = LoadFixedArrayBaseLength(array); + return Select( + SmiEqual(capacity, SmiConstant(0)), [=]() { return SmiConstant(0); }, + [=]() { + return CAST( + LoadFixedArrayElement(array, SmiConstant(ArrayList::kLengthIndex))); + }); +} + +void CodeStubAssembler::ArrayListSetLength(TNode array, + TNode length) { + StoreFixedArrayElement(array, SmiConstant(ArrayList::kLengthIndex), length); +} + +TNode CodeStubAssembler::ArrayListElements(TNode array) { + // TODO(v8:12499): Consider supporting other ElementsKinds. + constexpr ElementsKind kind = ElementsKind::PACKED_ELEMENTS; + TNode length = PositiveSmiUntag(ArrayListGetLength(array)); + TNode elements = AllocateFixedArray(kind, length); + CopyElements(kind, elements, IntPtrConstant(0), array, + IntPtrConstant(ArrayList::kFirstIndex), length); + return CAST(elements); +} + } // namespace internal } // namespace v8 diff --git a/v8/src/codegen/code-stub-assembler.h b/v8/src/codegen/code-stub-assembler.h index d5078973b..4e6d3c0df 100644 --- a/v8/src/codegen/code-stub-assembler.h +++ b/v8/src/codegen/code-stub-assembler.h @@ -138,6 +138,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; AllocationSiteWithoutWeakNextMap) \ V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \ V(arguments_to_string, arguments_to_string, ArgumentsToString) \ + V(ArrayListMap, array_list_map, ArrayListMap) \ V(Array_string, Array_string, ArrayString) \ V(array_to_string, array_to_string, ArrayToString) \ V(BooleanMap, boolean_map, BooleanMap) \ @@ -148,6 +149,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(constructor_string, constructor_string, ConstructorString) \ V(date_to_string, date_to_string, DateToString) \ V(default_string, default_string, DefaultString) \ + V(EmptyArrayList, empty_array_list, EmptyArrayList) \ V(EmptyByteArray, empty_byte_array, EmptyByteArray) \ V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \ V(EmptyScopeInfo, empty_scope_info, EmptyScopeInfo) \ @@ -168,10 +170,12 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(Function_string, function_string, FunctionString) \ V(function_to_string, function_to_string, FunctionToString) \ V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \ + V(has_string, has_string, HasString) \ V(Infinity_string, Infinity_string, InfinityString) \ V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \ IsConcatSpreadableSymbol) \ V(iterator_symbol, iterator_symbol, IteratorSymbol) \ + V(keys_string, keys_string, KeysString) \ V(length_string, length_string, LengthString) \ V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \ V(match_symbol, match_symbol, MatchSymbol) \ @@ -215,6 +219,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(search_symbol, search_symbol, SearchSymbol) \ V(SingleCharacterStringTable, single_character_string_table, \ SingleCharacterStringTable) \ + V(size_string, size_string, SizeString) \ V(species_symbol, species_symbol, SpeciesSymbol) \ V(StaleRegister, stale_register, StaleRegister) \ V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \ @@ -536,16 +541,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode NoContextConstant(); -#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ - TNode().rootAccessorName())>::type>::type> \ +#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ + TNode().rootAccessorName())>::type> \ name##Constant(); HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR -#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ - TNode().rootAccessorName())>::type>::type> \ +#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ + TNode().rootAccessorName())>::type> \ name##Constant(); HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) #undef HEAP_CONSTANT_ACCESSOR @@ -1121,6 +1125,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode pointer, ExternalPointerTag tag); + // Load a code pointer from an object. + TNode LoadCodePointerFromObject(TNode object, + int offset) { + return LoadCodePointerFromObject(object, IntPtrConstant(offset)); + } + + TNode LoadCodePointerFromObject(TNode object, + TNode offset); + TNode LoadForeignForeignAddressPtr(TNode object) { return LoadExternalPointerFromObject(object, Foreign::kForeignAddressOffset, kForeignForeignAddressTag); @@ -2234,6 +2247,22 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return UncheckedCast(base); } + TNode AllocateArrayList(TNode size); + + TNode ArrayListEnsureSpace(TNode array, + TNode size); + + TNode ArrayListAdd(TNode array, TNode object); + + void ArrayListSet(TNode array, TNode index, + TNode object); + + TNode ArrayListGetLength(TNode array); + + void ArrayListSetLength(TNode array, TNode length); + + TNode ArrayListElements(TNode array); + template bool ClassHasMapConstant() { return false; @@ -2430,28 +2459,31 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Label* if_number, TVariable* var_word32, Label* if_bigint, Label* if_bigint64, TVariable* var_maybe_bigint); + struct FeedbackValues { + TVariable* var_feedback = nullptr; + const LazyNode* maybe_feedback_vector = nullptr; + TNode* slot = nullptr; + UpdateFeedbackMode update_mode = UpdateFeedbackMode::kNoFeedback; + }; void TaggedToWord32OrBigIntWithFeedback(TNode context, TNode value, Label* if_number, TVariable* var_word32, Label* if_bigint, Label* if_bigint64, TVariable* var_maybe_bigint, - TVariable* var_feedback); + const FeedbackValues& feedback); void TaggedPointerToWord32OrBigIntWithFeedback( TNode context, TNode pointer, Label* if_number, TVariable* var_word32, Label* if_bigint, Label* if_bigint64, - TVariable* var_maybe_bigint, TVariable* var_feedback); + TVariable* var_maybe_bigint, const FeedbackValues& feedback); TNode TruncateNumberToWord32(TNode value); // Truncate the floating point value of a HeapNumber to an Int32. TNode TruncateHeapNumberValueToWord32(TNode object); // Conversions. - void TryHeapNumberToSmi(TNode number, TVariable* output, - Label* if_smi); - void TryFloat32ToSmi(TNode number, TVariable* output, - Label* if_smi); - void TryFloat64ToSmi(TNode number, TVariable* output, - Label* if_smi); + TNode TryHeapNumberToSmi(TNode number, Label* not_smi); + TNode TryFloat32ToSmi(TNode number, Label* not_smi); + TNode TryFloat64ToSmi(TNode number, Label* not_smi); TNode ChangeFloat32ToTagged(TNode value); TNode ChangeFloat64ToTagged(TNode value); TNode ChangeInt32ToTagged(TNode value); @@ -2697,6 +2729,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsRegExpSpeciesProtectorCellInvalid(); TNode IsPromiseSpeciesProtectorCellInvalid(); TNode IsNumberStringNotRegexpLikeProtectorCellInvalid(); + TNode IsSetIteratorProtectorCellInvalid(); + TNode IsMapIteratorProtectorCellInvalid(); TNode LoadBasicMemoryChunkFlags(TNode object); @@ -3246,8 +3280,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode enum_index); template - void Add(TNode dictionary, TNode key, TNode value, - Label* bailout); + void AddToDictionary(TNode dictionary, TNode key, + TNode value, Label* bailout); // Tries to check if {object} has own {unique_name} property. void TryHasOwnProperty(TNode object, TNode map, @@ -3314,6 +3348,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return CallBuiltin(Builtin::kGetProperty, context, receiver, name); } + TNode GetInterestingProperty(TNode context, + TNode receiver, + TNode symbol, + Label* if_not_found); + TNode GetInterestingProperty(TNode context, + TNode receiver, + TVariable* var_holder, + TVariable* var_holder_map, + TNode symbol, + Label* if_not_found, Label* if_proxy); + TNode SetPropertyStrict(TNode context, TNode receiver, TNode key, TNode value) { @@ -3548,6 +3593,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void BigIntToRawBytes(TNode bigint, TVariable* var_low, TVariable* var_high); +#if V8_ENABLE_WEBASSEMBLY + TorqueStructInt64AsInt32Pair BigIntToRawBytes(TNode value); +#endif // V8_ENABLE_WEBASSEMBLY + void EmitElementStore(TNode object, TNode key, TNode value, ElementsKind elements_kind, KeyedAccessStoreMode store_mode, Label* bailout, @@ -3821,7 +3870,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Debug helpers TNode IsDebugActive(); - TNode IsSideEffectFreeDebuggingActive(); // JSArrayBuffer helpers TNode LoadJSArrayBufferByteLength( @@ -4368,14 +4416,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler enum IsKnownTaggedPointer { kNo, kYes }; template - void TaggedToWord32OrBigIntImpl(TNode context, TNode value, - Label* if_number, - TVariable* var_word32, - IsKnownTaggedPointer is_known_tagged_pointer, - Label* if_bigint = nullptr, - Label* if_bigint64 = nullptr, - TVariable* var_maybe_bigint = nullptr, - TVariable* var_feedback = nullptr); + void TaggedToWord32OrBigIntImpl( + TNode context, TNode value, Label* if_number, + TVariable* var_word32, + IsKnownTaggedPointer is_known_tagged_pointer, + const FeedbackValues& feedback, Label* if_bigint = nullptr, + Label* if_bigint64 = nullptr, + TVariable* var_maybe_bigint = nullptr); // Low-level accessors for Descriptor arrays. template diff --git a/v8/src/codegen/compiler.cc b/v8/src/codegen/compiler.cc index 34b9a492c..19dd1cb14 100644 --- a/v8/src/codegen/compiler.cc +++ b/v8/src/codegen/compiler.cc @@ -42,8 +42,7 @@ #include "src/heap/heap-inl.h" #include "src/heap/local-factory-inl.h" #include "src/heap/local-heap-inl.h" -#include "src/heap/local-heap.h" -#include "src/heap/parked-scope.h" +#include "src/heap/parked-scope-inl.h" #include "src/init/bootstrapper.h" #include "src/interpreter/interpreter.h" #include "src/logging/counters-scopes.h" @@ -103,11 +102,12 @@ class CompilerTracer : public AllStatic { } static void TraceStartMaglevCompile(Isolate* isolate, - Handle function, + Handle function, bool osr, ConcurrencyMode mode) { if (!v8_flags.trace_opt) return; CodeTracer::Scope scope(isolate->GetCodeTracer()); PrintTracePrefix(scope, "compiling method", function, CodeKind::MAGLEV); + if (osr) PrintF(scope.file(), " OSR"); PrintF(scope.file(), ", mode: %s", ToString(mode)); PrintTraceSuffix(scope); } @@ -194,12 +194,13 @@ class CompilerTracer : public AllStatic { } static void TraceFinishMaglevCompile(Isolate* isolate, - Handle function, + Handle function, bool osr, double ms_prepare, double ms_execute, double ms_finalize) { if (!v8_flags.trace_opt) return; CodeTracer::Scope scope(isolate->GetCodeTracer()); PrintTracePrefix(scope, "completed compiling", function, CodeKind::MAGLEV); + if (osr) PrintF(scope.file(), " OSR"); PrintF(scope.file(), " - took %0.3f, %0.3f, %0.3f ms", ms_prepare, ms_execute, ms_finalize); PrintTraceSuffix(scope); @@ -324,7 +325,7 @@ void Compiler::LogFunctionCompilation(Isolate* isolate, int line_num = info.line + 1; int column_num = info.column + 1; Handle script_name(script->name().IsString() - ? String::cast(script->name()) + ? Tagged::cast(script->name()) : ReadOnlyRoots(isolate).empty_string(), isolate); LogEventListener::CodeTag log_tag = @@ -481,7 +482,8 @@ CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) { CompilationJob::Status OptimizedCompilationJob::ExecuteJob( RuntimeCallStats* stats, LocalIsolate* local_isolate) { - DCHECK_IMPLIES(local_isolate, local_isolate->heap()->IsParked()); + DCHECK_IMPLIES(local_isolate && !local_isolate->is_main_thread(), + local_isolate->heap()->IsParked()); // Delegate to the underlying implementation. DCHECK_EQ(state(), State::kReadyToExecute); ScopedTimer t(&time_taken_to_execute_); @@ -533,8 +535,10 @@ void TurbofanCompilationJob::RecordCompilationStats(ConcurrencyMode mode, compilation_time += (ms_creategraph + ms_optimize + ms_codegen); compiled_functions++; code_size += function->shared().SourceSize(); - PrintF("Compiled: %d functions with %d byte source size in %fms.\n", - compiled_functions, code_size, compilation_time); + PrintF( + "[turbofan] Compiled: %d functions with %d byte source size in " + "%fms.\n", + compiled_functions, code_size, compilation_time); } } // Don't record samples from machines without high-resolution timers, @@ -660,7 +664,7 @@ void InstallInterpreterTrampolineCopy(Isolate* isolate, int line_num = info.line + 1; int column_num = info.column + 1; Handle script_name = - handle(script->name().IsString() ? String::cast(script->name()) + handle(script->name().IsString() ? Tagged::cast(script->name()) : ReadOnlyRoots(isolate).empty_string(), isolate); PROFILE(isolate, CodeCreateEvent(log_tag, abstract_code, shared_info, @@ -685,6 +689,7 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info, #endif // V8_ENABLE_WEBASSEMBLY shared_info->set_bytecode_array(*compilation_info->bytecode_array()); + shared_info->set_age(0); Handle feedback_metadata = FeedbackMetadata::New( isolate, compilation_info->feedback_vector_spec()); @@ -817,6 +822,7 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs( std::vector functions_to_compile; functions_to_compile.push_back(parse_info->literal()); + bool compilation_succeeded = true; bool is_first = true; while (!functions_to_compile.empty()) { FunctionLiteral* literal = functions_to_compile.back(); @@ -841,7 +847,19 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs( allocator, &functions_to_compile, isolate->AsLocalIsolate()); - if (!job) return false; + if (!job) { + // Compilation failed presumably because of stack overflow, make sure + // the shared function info contains uncompiled data for the next + // compilation attempts. + if (!shared_info->HasUncompiledData()) { + SharedFunctionInfo::CreateAndSetUncompiledData(isolate, shared_info, + literal); + } + compilation_succeeded = false; + // Proceed finalizing other functions in case they don't have uncompiled + // data. + continue; + } UpdateSharedFunctionFlagsAfterCompilation(literal, *shared_info); @@ -859,7 +877,10 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs( break; case CompilationJob::FAILED: - return false; + compilation_succeeded = false; + // Proceed finalizing other functions in case they don't have uncompiled + // data. + continue; case CompilationJob::RETRY_ON_MAIN_THREAD: // This should not happen on the main thread. @@ -881,7 +902,7 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs( parse_info->pending_error_handler()->PrepareWarnings(isolate); } - return true; + return compilation_succeeded; } bool FinalizeDeferredUnoptimizedCompilationJobs( @@ -944,7 +965,6 @@ class OptimizedCodeCache : public AllStatic { code = feedback_vector.optimized_code(); } - DCHECK_IMPLIES(!code.is_null(), code.kind() <= code_kind); if (code.is_null() || code.kind() != code_kind) return {}; DCHECK(!code.marked_for_deoptimization()); @@ -972,7 +992,7 @@ class OptimizedCodeCache : public AllStatic { Handle bytecode(shared.GetBytecodeArray(isolate), isolate); interpreter::BytecodeArrayIterator it(bytecode, osr_offset.ToInt()); DCHECK_EQ(it.current_bytecode(), interpreter::Bytecode::kJumpLoop); - feedback_vector.SetOptimizedOsrCode(it.GetSlotOperand(2), code); + feedback_vector.SetOptimizedOsrCode(isolate, it.GetSlotOperand(2), code); return; } @@ -1023,17 +1043,12 @@ bool CompileTurbofan_NotConcurrent(Isolate* isolate, return false; } - { - // Park main thread here to be in the same state as background threads. - ParkedScope parked_scope(isolate->main_thread_local_isolate()); - if (job->ExecuteJob(isolate->counters()->runtime_call_stats(), - isolate->main_thread_local_isolate())) { - UnparkedScope unparked_scope(isolate->main_thread_local_isolate()); - CompilerTracer::TraceAbortedJob( - isolate, compilation_info, job->prepare_in_ms(), job->execute_in_ms(), - job->finalize_in_ms()); - return false; - } + if (job->ExecuteJob(isolate->counters()->runtime_call_stats(), + isolate->main_thread_local_isolate())) { + CompilerTracer::TraceAbortedJob(isolate, compilation_info, + job->prepare_in_ms(), job->execute_in_ms(), + job->finalize_in_ms()); + return false; } if (job->FinalizeJob(isolate) != CompilationJob::SUCCEEDED) { @@ -1118,7 +1133,8 @@ bool ShouldOptimize(CodeKind code_kind, Handle shared) { case CodeKind::TURBOFAN: return v8_flags.turbofan && shared->PassesFilter(v8_flags.turbo_filter); case CodeKind::MAGLEV: - return v8_flags.maglev && shared->PassesFilter(v8_flags.maglev_filter); + return maglev::IsMaglevEnabled() && + shared->PassesFilter(v8_flags.maglev_filter); default: UNREACHABLE(); } @@ -1171,10 +1187,9 @@ MaybeHandle CompileTurbofan(Isolate* isolate, Handle function, #ifdef V8_ENABLE_MAGLEV // TODO(v8:7700): Record maglev compilations better. void RecordMaglevFunctionCompilation(Isolate* isolate, - Handle function) { + Handle function, + Handle code) { PtrComprCageBase cage_base(isolate); - Handle abstract_code( - AbstractCode::cast(function->code(cage_base)), isolate); Handle shared(function->shared(cage_base), isolate); Handle